def __init__(self, verbose=0): Tools.__init__(self) """ Assigned Internet Protocol Numbers search tool downloaded port list from http://www.iana.org/assignments/protocol-numbers/protocol-numbers-1.csv format of csv file Decimal,Keyword,Protocol,Reference 0,HOPOPT,IPv6 Hop-by-Hop Option,[RFC2460] 1,ICMP,Internet Control Message,[RFC792] format of dictionary [Protocol_num]['name'] = name [Protocol_num]['description'] = description Tested with Python ver 2.7.2 on Win7 & Win XP (c) 2012 - 2014 Intelligent Planet Ltd """ self.verbose = verbose self.protocol_dict = {} self.path = os.getcwd() + '\\' self.load_file = self.path + 'protocols.csv' self.load()
def __init__(self, verbose=0): Tools.__init__(self) """ X.25 Routing parser for cisco routers format of router output r01#sh x25 rou | inc dest|0/0 1 dest ^(0000111111).* xot 10.11.12.13 2 dest ^(0000111112).* xot 10.11.12.14 format of dictionary key = auto incrementing integer [key]['router'] = hostname [key]['route_id'] = route_num [key]['x25_route'] = destination [key]['ip_address'] = ip_add [key]['match'] = match Written by Peter Rogers (C) Intelligent Planet 2013 """ self.verbose = verbose self.x25_dict = {} self.route_list = [] self.config = [] self.config_list = [] self.load_file = 'c:/x25' self.out_file = 'c:/x25_out' self.display_heading = '\n # X.25 route IPaddress match router \n' self.load()
def setup_class(self): # 获取token corpid = "" corpsecret = "" url = "https://qyapi.weixin.qq.com/cgi-bin/gettoken" params = {"corpid": corpid, "corpsecret": corpsecret} res = requests.get(url=url, params=params) print("url:%s;params:%s" % (url, params)) print(json.dumps(res.json(), indent=2, ensure_ascii=False)) self.token = res.json()["access_token"] self.t = Tools() # 清理数据 url = "https://qyapi.weixin.qq.com/cgi-bin/externalcontact/get_corp_tag_list" params = {"access_token": self.token} res = requests.post(url=url, params=params) print("url:%s;params:%s" % (url, params)) print(json.dumps(res.json(), indent=2, ensure_ascii=False)) rsp = res.json() tag_list = [ tag["id"] for group in rsp["tag_group"] for tag in group["tag"] ] if len(tag_list) > 2: url = "https://qyapi.weixin.qq.com/cgi-bin/externalcontact/del_corp_tag" params = {"access_token": self.token} json_datas = {"tag_id": tag_list[2:]} res = requests.post(url=url, params=params, json=json_datas) print("url:%s;params:%s;datas:%s" % (url, params, json_datas)) print(json.dumps(res.json(), indent=2, ensure_ascii=False))
def __init__(self, name=None): self.name = name self.filename = str(name) + ".json" self.tools = Tools() self.issue = ["", "", "", "", "", ""] if self.name != None: self.issue = self.tools.get_issue(name)
def __init__(self): self.svc = None self.X_scaler = None # set default parameters first self.color_space = 'YUV' # Can be YCrCb, RGB, HSV, HLS, YUV, BGR2YCrCb, LUV self.hog_channel = 'ALL' # Numbers of HOG Channels to extract. Value range: [0, 1, 2, ALL] self.orient = 11 # HOG orientations self.pixel_per_cell = 16 # HOG pixels per cell self.cell_per_block = 2 # HOG cells per block self.spatial_size = (16, 16) # Spatial binning dimensions self.hist_bins = 32 # Number of histogram bins self.heat_threshold = 3 # plus half the size the buffer of detections from previous frames self.image_scaling_checked = False self.tools = Tools() self.imageProcessing = ImageProcessingUtilities() self.detectionTracker = DetectionTracker() # Load parameters if corresponding pickle file available classifier_params_count = 2 self.svc, self.X_scaler = self.tools.load_params( classifier_params_count, 'svm_params.pkl') # case no parameters have been stored if self.svc is None: print("Training SVM") self.train_classifier()
def showcase(cls): answer_of_simulation = None answer_of_real = None classical_answer = None algorithm = input(constants.input_message_1) while algorithm not in constants.acceptable_algorithm_inputs: algorithm = input(constants.input_message_1) execution = input(constants.input_message_2) while execution not in constants.acceptable_execution_inputs: execution = input(constants.input_message_2) if execution == "0": classical_answer = Tools.execute_classically(algorithm) elif execution == "1": answer_of_simulation = Tools.execute_in_simulator(algorithm) elif execution == "2": answer_of_real = Tools.execute_in_real_device(algorithm) elif execution == "3": combined = Tools.execute_both(algorithm) classical_answer = combined[0] answer_of_real = combined[1] print_answers(answer_of_simulation, answer_of_real, classical_answer, algorithm)
def __extract_hashtag_data(self, config_file): """ extract Instagram data based on hashtag """ try: insta_hashtag = InstagramHashtag() # read list of hashtag with open(config_file["hashtag_list"],\ 'r', encoding = "utf-8") as file: self.content = file.readlines() self.content = [x.strip().replace('\ufeff', '')\ for x in self.content] for hashtag in self.content: print('Scraping links with #{}, please wait just for moments'\ .format(hashtag)) self.arr_meta_data_info.append(\ insta_hashtag.extract_data(hashtag, config_file)) Tools.write_json(config_file['hashtag_output'],\ self.arr_meta_data_info) except: print('Occurred some errors')
def cut(self): video_path = self.file_path # 测试文件夹是否存在 save_path = temp_pic_dir + self.temp_save_path tool = Tools() tool.mkdir(save_path) self._video_split(video_path, save_path)
def __init__(self, path): self.tools = Tools() self.image = self.loadImage(path) self.preprocess() sudoku_image = self.cropSudoku() sudoku_image = self.strengthen(sudoku_image) self.cells = Cells(sudoku_image).cells
def convert_to_standard_form(restrictions, vars, costs, a, b): logging.debug( '\n =================================== \n = CONVERTING TO STANDARD FORM = \n ===================================' ) objective = 0 certificate = [0] * restrictions operations = Tools.identity(restrictions) gap_vars = Tools.identity(restrictions) costs = costs + [0] * restrictions costs = [x * -1 for x in costs] array_tableu = np.concatenate((certificate, costs), axis=0) array_tableu = np.append(array_tableu, objective) for x in range(0, restrictions): array_tableu = np.concatenate( (array_tableu, np.array(operations[x])), axis=0) array_tableu = np.concatenate((array_tableu, np.array(a[x])), axis=0) array_tableu = np.concatenate( (array_tableu, np.array(gap_vars[x])), axis=0) array_tableu = np.append(array_tableu, b[x]) matrix_tableu = array_tableu.reshape( restrictions + 1, len(certificate) + len(costs) + 1).astype('object') return matrix_tableu
def generate_stats(self): word_count = 0 pause = False spkwhole = SpeakerStatistics("whole") self.statistics["whole"]=spkwhole for spc in self.get_speech_list(): self.statistics["whole"].add_speech(spc.get_length()) #five hundred span and 10 pause for words in spc.get_words_array(): word_count = word_count + 1 if (word_count % 500 == 0): word_count = 0 pause = True if(word_count == 10 and pause == True): pause = False word_count = 0 if "." in spc.get_speaker(): speaker = spc.get_speaker() else: speaker = spc.get_speaker() + "." if(pause != True): if(speaker not in self.statistics): spkstat = SpeakerStatistics(speaker) spkstat.add_speech(spc.get_length()) self.statistics[speaker] = spkstat else: spkstat = self.statistics[speaker] spkstat.add_speech(spc.get_length()) #the medium and mean count for the whole text for key,value in self.statistics.items(): if(key!="whole"): self.counts.append(value.get_count()) tools = Tools() self.median_count = tools.calc_median(self.counts) self.average_count = tools.calc_average(tools.calc_sum(self.counts),len(self.counts))
def __init__(self, ip='192.168.1.10', name='test', port=23): Tools.__init__(self) """ Network tool kit Tested on Win XP with Python 2.7 (c) 2012, 2013 Intelligent Planet Ltd """ self.init_net(ip, name, port) self.timeout = 0.2 self.sleep = 0.1 self.verbose = 1 self.buffers = 1024 self.error = 'fail' self.web_proxy = '172.19.193.122' self.port_list = [20, 21, 22, 23, 25, 53, 67, 68, 69, 80, 161, 162, 179, 443, 520, 1719, 1720, 1985, 1998, 2000, 2427, 3389, 5060, 5900, 8080] #FTP(20, 21), SSH(22), Telnet(23), SMTP(25), DNS(53), DHCP(67, 68), TFTP(69), HTTP(80), SNMP(161, 162), BGP(179), HTTPS(443), RIP(520) #H.323(1719. 1720), HSRP(1985), XOT(1998), SCCP(2000), MGCP(2427), RDP(3389), SIP(5060), VNC(5900) self.http_get = 'GET /index.html HTTP/1.1 \r\n' self.http_host = 'Host: %s \r\n\r\n'
def cleanup_motion_region(motion_region): """ Performs a series of drawings and image processing to a mask of the motion regions to minimize noise. Returns the cleaner mask and a list of the largest non-overlapping bounding boxes of the mask. (binary image) --> (binary image, list of boxes) """ image = cv2.cvtColor(motion_region, cv2.COLOR_GRAY2BGR) motion_contours = Motion.get_all_contours(image) motion_region = Motion.draw_all_bounding_boxes(image, motion_contours, filled=True) bbox_contours = Motion.get_all_contours(motion_region) motion_region = Motion.draw_all_bounding_boxes(image, bbox_contours, filled=True) # final touches motion_region = Tools.erode(motion_region, Tools.BIG_RECT_KERNEL) motion_region = Tools.dilate(motion_region, Tools.BIG_RECT_KERNEL) # get largest box here largest_boxes = [] bbox_contours = Motion.get_all_contours(motion_region) for box in bbox_contours: largest_boxes += [cv2.boundingRect(box)] motion_region = cv2.cvtColor(motion_region, cv2.COLOR_BGR2GRAY) return motion_region, largest_boxes
def exp2(model, individual, data, threshold, fifths_or_tenths, export_file, title): conf = {} model_conf = {} if model == 'dipm': conf = ConfigExp2.config_dipm_exp2() model_conf = Models.get_dipm_base_generator() elif model == 'scpm': conf = ConfigExp2.config_scpm_exp2() model_conf = Models.get_scpm_base_generator() # we update the model's weights' configuration model_conf = Tools.update_conf(model_conf, data, individual) # we update the sim's configuration conf.update({'model_conf': {0: model_conf, 1: model_conf}}) results = GaSimulator.run_sims(model, conf, fifths_or_tenths) matrix = GaSimulator.analyze_results(results, conf, threshold, fifths_or_tenths) goal = GoalMatrix.matrix_fifth( ) if fifths_or_tenths is Misc.FIFTHS else GoalMatrix.matrix_tenth() print('Fitness value: ' + str(Tools.value_for_fitness(matrix, goal))) coordinates_fifths = [0.0, 0.2, 0.4, 0.6, 0.8, 1.0] coordinates_tenths = [ 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0 ] coordinates = [coordinates_fifths, coordinates_fifths ] if fifths_or_tenths == Misc.FIFTHS else [ coordinates_tenths, coordinates_tenths ] Display.save_simple_abilities_matrix(matrix, title, export_file, coordinates)
def run(): """Выполняет задачу 6 домашнего задания к уроку 1""" print("\r\nЗадание 6\r\n") current_distance = input("Введите начальную дистанцию: ").replace(',', '.') if not tools.is_float(current_distance): print("Значение дистанции должно быть числовым") return None current_distance = float(current_distance) if current_distance <= 0: print("Значение дистанции должно быть больше нуля") aim_distance = input("Введите целевую дистанцию: ").replace(',', '.') if not tools.is_float(aim_distance): print("Значение дистанции должно быть числовым") return None aim_distance = float(aim_distance) if aim_distance < current_distance: print("Целевая дистанция не должны быть меньше начальной дистанции") return None current_day = 0 while current_distance < aim_distance: current_day += 1 current_distance += current_distance * 0.1 print( f"Ответ: на {current_day}-й день спортсмен достиг результата — не менее {aim_distance} км." ) print("Задание завершено.\r\n")
def __init__(self, addr, x, y, depth, energy, clock, verbose=False): assert clock.__class__.__name__ is 'Clock', 'Need a clock object' self.verbose = verbose self.inbox = [] self.outbox = [] # pair [msg, number of transmissions] self.waitingACK = False self.isSink = addr in self.sinkNodesAddr self.clock = clock # for TDMA self.round = 0 # self.addr = addr self.position = [x, y, depth] # Energy related self.energy = energy self.maxEnergy = energy self.criticalEnergy = False self.energyThresholds = [0.05, 0.2, 0.5] self.energyThreshold = energy * self.energyThresholds.pop() # for UOAR self.state = UOARState.INITIAL self.status = UOARStatus.IDLE self.oneighbors = {} self.numReachableNodes = 0 self.highestScore = [0, INFINITY] # pair [score, addr] self.nextHop = None self.nextHopDist = INFINITY self.hopsToSink = INFINITY self.stopWaiting = False self.updateStatus = 0 # 0: not updating # 1: update in progress # 2: update done self.cheadList = {} # to route phase [addr, is in route] self.cmemberList = [] # self.score = 0 self.greaterDistance = 0 self.avgDistance = 0 # for possible connection head-member self.minHopsToSink = INFINITY self.memberAlternative = None # for retransmissions self.numRetries = 0 # for recovery (next hop is dead) self.msgsLostCount = 0 self.msgsLostLimit = 2 self.deadNode = None # for statistics self.recvdMsgsCounter = 0 self.sentMsgsCounter = 0 self.avgNumHops = 0 self.maxNumHops = 0 self.avgTimeSpent = 0 self.maxTimeSpent = 0 # best for memory self.acouticAck = MG.create_acoustic_ack(addr, 0) time, _ = Tools.estimate_transmission(self.acouticAck) self.acousticAckTime = 2 * time self.opticalAck = MG.create_optical_ack(addr, 0) time, _ = Tools.estimate_transmission(self.opticalAck) self.opticalAckTime = 2 * time
def cal(self, node_data=None): if not node_data: node_data = self.node_data if self.lv_mut_tag[3]: # lv.3 func = node_data['inter_method'] instance_list = node_data['method_ins'] args = [] for instance in instance_list: sig_series = instance.node_result.copy() args.append(sig_series) self.primitive_result = func(*args) self.node_result = Tools.cal_weight(self.primitive_result, node_data['weight']) elif self.lv_mut_tag[2]: # lv.2 self.node_result = Tools.cal_weight(self.primitive_result, node_data['weight']) for key in self.lv_mut_tag.keys(): self.lv_mut_tag[key] = False # reset mutation_tag return self.node_result
def spide_base_message(self, url): # url_fanbb = 'https://weibo.cn/fbb0916' url_fanbb = url pattern = re.compile('\[(.*?)\]') try: self.browser.get(url_fanbb) selector = etree.HTML(self.browser.page_source) name = selector.xpath( '/html/body/div[4]/table/tbody/tr/td[2]/div/span[1]/text()' )[0].encode('utf-8') num_weibo = selector.xpath( '/html/body/div[4]/div/span/text()')[0].encode('utf-8') num_watch = selector.xpath( '/html/body/div[4]/div/a[1]/text()')[0].encode('utf-8') num_fans = selector.xpath( '/html/body/div[4]/div/a[2]/text()')[0].encode('utf-8') num_weibo = re.findall(pattern, num_weibo)[0] num_watch = re.findall(pattern, num_watch)[0] num_fans = re.findall(pattern, num_fans)[0] aperson = Person(name, num_weibo, num_watch, num_fans) # call the method to write the messae of person into file print '-------------------aperson-------------' print aperson Tools.write_aperson(aperson) except Exception, e: print e
def __init__(self, verbose=0): Tools.__init__(self) """ Well known port search tool downloaded port list from http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.csv format of csv file Service Name,Port Number,Transport Protocol,Description,Assignee,Contact,Registration Date,Modification Date,Reference,Service Code,Known Unauthorized Uses,Assignment Notes ftp-data,20,tcp,File Transfer [Default Data],[Jon_Postel],[Jon_Postel],,,,,, ftp-data,20,udp,File Transfer [Default Data],[Jon_Postel],[Jon_Postel],,,,,, format of dictionary [portnum_protocol]['name'] = name [portnum_protocol]['description'] = description Tested with Python ver 2.7.2 on Win7 & Win XP (c) 2012 - 2014 Intelligent Planet Ltd """ self.verbose = verbose self.port_dict = {} self.path = os.getcwd() + '\\' self.load_file = self.path + 'ports.csv' self.load()
def __init__(self): Tools.__init__(self) """ MAC lookup to identify the vendor from the OUI of the mac address Usage example: >>> x = mac.Mac() >>> x.id_mac('0000.0c07.ac03') ('Cisco', 'CISCO SYSTEMS') Tested with Python ver 2.7.2 on Win7 & Win XP (c) 2012 - 2014 Intelligent Planet Ltd """ if "\\" in os.getcwd(): self.path = os.getcwd() + "\\" else: self.path = os.getcwd() + "/" self.mac_file = self.path + "mac" self.mac_file_url = "http://anonsvn.wireshark.org/wireshark/trunk/manuf" self.oui_list = [] try: self.load_mac() except: self.create_mac_file() # download mac oui list and save to file as per the self.mac_file value self.load_mac()
def __init__(self): Tools.__init__(self) """ Cisco show command parse tool and Db viewer sh ip int brief format is: ['Interface', 'IP-Address', 'OK?', 'Method', 'Status', 'Protocol'] Dict format example is: self.dict_db[self.hostname] = {} self.dict_db[self.hostname][command] = {} self.dict_db[self.hostname][command][interface] = {} self.dict_db[self.hostname][command][interface][ip-address] = value """ self.verbose = 0 ### set the path to the log and db files ### try: self.path = 'H:/crt/sessions/' except: self.path = 'C:/Program Files/SecureCRT/Sessions/' self.load_file = self.path + 'log' self.db_file = self.path + 'show_db' self.load_dict() self.classify_dict = {'sh ip int brief': ['Interface', 'IP-Address']}
def __init__(self, verbose=0): Tools.__init__(self) """ Cisco IP Cache Flow parse tool and Db viewer SrcIf SrcIPaddress DstIf DstIPaddress Pr SrcP DstP Pkts Fa0/1 10.155.20.122 Se0/0:0.101* 10.7.10.124 01 0000 0000 219 Fa0/1 10.155.20.123 Se0/0:0.101* 10.7.10.124 01 0000 0000 219 Fa0/0 10.182.137.8 Se0/0:0.101* 62.239.26.81 06 C508 07D0 2 [flow_id] = {} [flow_id][SrcIPaddress] [flow_id][DstIPaddress] [flow_id][SrcIf] [flow_id][DstIf] [flow_id][Protocol] [flow_id][SrcPort] [flow_id][DstPort] [flow_id][Packets] Written by Peter Rogers (C) Intelligent Planet 2013 """ self.load_file = 'c:/cache_load' self.limit = 200 self.verbose = verbose self.cache_dict = {} self.display_heading = '\nSrcIPaddress SrcPort DstIPaddress DstPort Protocol Packets\n'
def createNew(self): self.s.cookies.update({ 'advertising_id': Tools().rndDeviceId(), 'appid': 'com.com2us.smon.normal.freefull.apple.kr.ios.universal', 'device': 'iPad5,4', 'did': str(random.randint(200000000, 300000000)) if not self.did else str(self.did), 'native_version': 'Hub v.2.6.4', 'osversion': '10.2', 'platform': 'ios', 'vendor_id': Tools().rndDeviceId() }) self.registered() res = self.key() public_key = res['public_key'] signature = res['signature'] res = self.generate(public_key, signature) self.auth(res['guest_uid']) return res['guest_uid'], res['did']
def __init__(self, verbose=0): Tools.__init__(self) """ Well known port search tool downloaded port list from http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.csv format of csv file Service Name,Port Number,Transport Protocol,Description,Assignee,Contact,Registration Date,Modification Date,Reference,Service Code,Known Unauthorized Uses,Assignment Notes ftp-data,20,tcp,File Transfer [Default Data],[Jon_Postel],[Jon_Postel],,,,,, ftp-data,20,udp,File Transfer [Default Data],[Jon_Postel],[Jon_Postel],,,,,, format of dictionary [portnum_protocol]['name'] = name [portnum_protocol]['description'] = description Written by Peter Rogers (C) Intelligent Planet 2013 """ self.verbose = verbose self.port_dict = {} self.load_file = 'c:/ports.csv' self.load()
def __init__(self, ip="", hostname="", out_dict="", auth_con=""): Tools.__init__(self) Vty.__init__(self, hostname, out_dict) """ interface to the the telnet library """ self.ip = ip self.hostname = hostname.lower() self.port = 23 self.auth_con = auth_con self.user = "" self.password = "" self.path = os.getcwd() + "\\" self.newline = "\r\n" self.space = " " self.verbose = 1 self.read_until_timeout = 2 self.timeout = 5 self.sleep = 0.1 # set the values for Cisco as default self.login_text = "Username: "******"Password: "******"q" self.banner_list = ["\d+>", "--More--", "\d+#"] self.more = "--More--"
def dump_rttm(self, scores): for ivecset in self.ivecs: if ivecset.size() > 0: name = ivecset.name # dirty trick, will be removed, watch out if 'beamformed' in ivecset.name: ivecset.name = re.sub('beamformed/', '', ivecset.name) # # # # # # # # # # # # # # # # # # # # # reg_name = re.sub('/.*', '', ivecset.name) Tools.mkdir_p(os.path.join(self.out_dir, os.path.dirname(name))) with open(os.path.join(self.out_dir, name + '.rttm'), 'w') as f: for i, ivec in enumerate(ivecset.ivecs): start, end = ivec.window_start, ivec.window_end idx = np.argmax(scores[name].T[i]) f.write( 'SPEAKER {} 1 {} {} <NA> <NA> {}_spkr_{} <NA>\n'. format(reg_name, float(start / 1000.0), float((end - start) / 1000.0), reg_name, idx)) else: logwarning( '[Diarization.dump_rttm] No i-vectors to dump in {}.'. format(ivecset.name))
class Tomato: def __init__(self): self.username = None self.password = None self.login_succ = False self.mins = 25 self.secs = 0 self.tools = Tools() self.user = User() self.now_issue = None self.now_time = None def add_user(self , uname , upassword): return self.tools.add_user(uname, upassword) def log_in(self , uname , upassword): self.username = uname self.password = upassword if self.tools.check( self.username , self.password ) == True: self.login_succ = True self.user = User(self.username) return True else: self.login_succ = False return False
def __init__(self, user, password): self.tools = Tools() self.urlUtil = UrlUtil() self.user = user self.password = password self.session = requests.Session() self.courseUrl = 'https://www.coursera.org/learn/hipython'
def generate_stats(self): spkwhole = SpeakerStatistics("whole") self.statistics["whole"]=spkwhole for spc in self.get_speech_list(): self.statistics["whole"].add_speech(spc.get_length()) #check if the speakers all have the dot so we don't get the same speaker doubled. if "." in spc.get_speaker(): speaker = spc.get_speaker() else: speaker = spc.get_speaker() + "." if(speaker not in self.statistics): spkstat = SpeakerStatistics(speaker) spkstat.add_speech(spc.get_length()) self.statistics[speaker] = spkstat else: spkstat = self.statistics[speaker] spkstat.add_speech(spc.get_length()) #the medium and mean count for the whole text for key,value in self.statistics.items(): if(key!="whole"): self.counts.append(value.get_count()) tools = Tools() self.median_count = tools.calc_median(self.counts) self.average_count = tools.calc_average(tools.calc_sum(self.counts),len(self.counts))
def __init__(self): self.tool = Tools() ''' HanSight_TI_API ''' self.ti_url = 'http://172.16.100.32:80/ti/simplebulkapi'
def __init__(self): Tools.__init__(self) """ Dictionary storage method Use # followed by csv fields to define headers e.g. #host, ip, model, serial number dictionary format example dict_db[unique numeric key] = {} dict_db[unique numeric key]['host'] = 'router-01' dict_db[unique numeric key]['ip'] = '1.1.1.1' dict_db[unique numeric key]['model'] = 'cisco 2811' dictionary search index search_db['router-01'] = {} search_db['router-01']['tag'] = 'host' search_db['router-01']['key'] = unique numeric key search_db['1.1.1.1'] = {} search_db['router-01']['tag'] = 'ip' search_db['router-01']['key'] = unique numeric key host entries appended to the self index list to provide a way to check for unique entries when adding data data entries appended to the register if unique, if not the key number is added to the entry i.e. Cisco #123 Written by Peter Rogers (C) Intelligent Planet 2013 """ self.verbose = 1 self.space_size = 18 self.index = [] self.index_db = {} self.register = {} self.dict_db = {} self.search_db = {}
def compute_motion_region(self): """ Computes a mask of the regions of the scene that are in motion. Returns the mask and a list of the largest non-overlapping bounding boxes of the motion regions (none) --> (binary image, list of boxes) """ motion_region = None largest_boxes = None if Motion.BGS_MODE == Motion.NONE: motion_region = self.motion_detector.get_motion_region(self.cur_frame) motion_region = cv2.inRange(motion_region, 20, 255) motion_region = Tools.dilate(motion_region, Tools.MID_RECT_KERNEL) motion_region, largest_boxes = Motion.cleanup_motion_region(motion_region) elif Motion.BGS_MODE == Motion.SIMPLE: motion_region = self.motion_detector.get_motion_region(self.cur_frame) motion_region = cv2.inRange(motion_region, 20, 255) # to get less of the face # motion_region = cv2.inRange(motion_region, 25, 255) motion_region = Tools.dilate(motion_region, Tools.MID_RECT_KERNEL) motion_region, largest_boxes = Motion.cleanup_motion_region(motion_region) elif Motion.BGS_MODE == Motion.MOG: motion_region = self.motion_detector.apply(self.cur_frame, learningRate=1./200) motion_region = Tools.dilate(motion_region, Tools.MID_RECT_KERNEL) motion_region, largest_boxes = Motion.cleanup_motion_region(motion_region) return motion_region, largest_boxes
def __extract_user_info(self, url_list, output): """ extract Instagram user's profile information """ try: # obtain url from list with open(url_list, 'r', encoding="utf-8") as file: self.content = file.readlines() if self.content != '': self.content = [x.strip() for x in self.content] for user in self.content: profile_info = InstagramUser.obtain_uer_info\ (None, user_name = user) self.info_arr.append(Tools.packer(\ User_Name = profile_info.username, User_Id = profile_info.userid,\ Bio = profile_info.biography, Profile_Image = profile_info.profile_pic_url,\ Posts_Count = InstagramUser.obtain_user_post_count\ (profile_info.username, self.ctx), Followers = profile_info.followers,\ Following = profile_info.followees, Is_Private = profile_info.is_private, Is_Verified = profile_info.is_verified)) print("took {}'s information".format(user)) print('-' * 50) Tools.write_json(output, self.info_arr) except: print('Occurred some errors')
def __init__(self, config_file_path): try: Tools.clean() self.ctx = ssl.create_default_context() self.ctx.check_hostname = False self.ctx.verify_mode = ssl.CERT_NONE self.info_arr = [] self.config_file_path = config_file_path self.arr_meta_data_info = [] # choose operation self.__choose_operation() user_input = int(input(">> ")) if user_input not in (2, 3, 4): return # obtain config's information config_file = self.__load_settings(self.config_file_path) # obtain user's info if user_input == 2: self.__extract_user_info(\ config_file["users_list"], config_file["user_output"]) # obtain image links from hashtags if user_input == 3: self.__extract_hashtag_data(config_file) # download files if user_input == 4: self.__downloader(config_file['hashtag_output'], config_file) except: print('Occurred some errors')
def trans_unicode_list2atom_gram_list(unicode_list) : atom_list = [] idx = 0 unicode_num = len(unicode_list) atom_unicode_list = [] while idx < unicode_num : current_unicode = unicode_list[idx] atom_unicode_list.append(current_unicode) if not Tools.is_unicode_Lu_Ll_char(current_unicode) : type_str = "" if len(atom_unicode_list) > 1 : type_str = ENG_TYPE else : type_str = Tools.get_unichr_type(atom_unicode_list[0]) #if type_str not in TYPES : # TYPES.append(type_str) # print type_str # print WSAtomTranslator.trans_atom_gram_list2unicode_line(atom_unicode_list).encode("utf-8") atom_list.append(WSAtom(atom_unicode_list , type_str)) atom_unicode_list = [] idx += 1 else : if len(atom_unicode_list) > 0 : # here it must be Lu or Ll Type char atom_list.append(WSAtom(atom_unicode_list , ENG_TYPE )) atom_unicode_list = [] return atom_list
def __init__(self): self.tools = Tools() # 右上角 筛选 self.shaixuan = (906, 37) # 热门 self.shoucang = (969, 363) # 姑获鸟 (选择第一个) self.guhuoniao = (812, 208) # 以津真天 (选择第二个) self.yijin = (807, 312) # 山童 (选择第三个) self.shantong = (822, 417) # 挑战 self.tiaozhan = (907, 424) # 40级按钮坐标 self.jibie_40 = (327, 236) # 1级按钮坐标 self.jibie_1 = (102, 241)
def find_start_headless_index(self): for index in range(13, 16): address = self.read_headless_address("headless{0}".format(index)) if address == self.headless_addresses[0]: return index Tools.log("Can not find start headless!") return ""
def run(): url1 = 'https://book.douban.com/chart?subcat=I' url2 = 'https://book.douban.com/chart?subcat=F' r1 = tools.get_html(url1) r2 = tools.get_html(url2) get_book_list(r1) get_book_list(r2) return most_watched_book_list
def __init__(self, taxonomy, protein): self.Taxonomy = taxonomy self.Protein = protein self.ncbi_api = Retrieve() self.tools = Tools() self.dataset = None self.fasta = None self.summary = None
def set_widget_actions(self): self.crystalFile = self.loadCrystalFileButton.clicked.connect( lambda: Tools().open_file(self)) self.instrumentFile = self.loadInstrumentFileButton.clicked.connect( lambda: Tools().open_file(self)) self.sectorNucButton.clicked.connect(lambda: self.showView('nuc')) self.sectorMagButton.clicked.connect(lambda: self.showView('mag')) pass
class Coursera: def __init__(self, user, password): self.tools = Tools() self.urlUtil = UrlUtil() self.user = user self.password = password self.session = requests.Session() self.courseUrl = 'https://www.coursera.org/learn/hipython' # self.courseUrl = 'https://www.coursera.org/learn/da-xue-hua-xue' def login(self): url = 'https://www.coursera.org/api/login/v3Ssr?csrf3-token=1510823580.eRKpKzepl351P5ij&src=undefined' param = {'email': self.user, 'password': self.password} header = { 'Origin': 'https://www.coursera.org', 'Referer': self.courseUrl + '/home/welcome' } cookies = {'CSRF3-Token': '1510823580.eRKpKzepl351P5ij'} response = self.urlUtil.post(url, param, self.session, cookies=cookies, header=header) response.encoding = 'utf-8' # 页面写入文件 方便分析页面 # self.tools.writeFile('index.html',response.text) return response.text # 获取课程标题 def getTitle(self, soup): title = soup.select( 'h1[class="course-name color-primary-text display-3-text"]')[0] title = self.tools.removeTag(str(title)) print('发现课程:《' + title + '》\n') return title def getWeekUrls(self, soup): weeks = soup.select('div[class="week-number body-2-text flex-1"] span') pattern = re.compile(r'(\d)', re.S) weekUrls = [] for week in weeks: week = self.tools.removeTag(str(week)) week = re.sub('\D', '', week) weekUrls.append(self.courseUrl + '/home/week/' + str(week)) return weekUrls def start(self): self.courseUrl = input( '请输入课程主页地址(例https://www.coursera.org/learn/hipython):\n') print('\n') content = self.login() soup = BeautifulSoup(content, 'lxml') self.getTitle(soup) weekUrls = self.getWeekUrls(soup) weekcourse = WeekCourse(weekUrls, self.session) weekcourse.start()
def get_fix_stage(self): tool = Tools() fDs = self.root.findall(".//"+self.prefix+"stage") result = [] for f in fDs: if ("{http://www.germanistik.uni-wuerzburg.de/lehrstuehle/computerphilologie/dramenanalyse}fixed-stage" in f.attrib): result.append(tool.unicode_safe(f.text)) #print tool.unicode_safe(f.text) return result
def __init__(self): """Constructor. Defines instance variables """ self.conn = sqlite3.connect(DATABASE_FILE) self.conn.text_factory = str self.conn.row_factory = sqlite3.Row self.cursor = self.conn.cursor() self.create_table() self.tools = Tools()
def __init__(self, file_name): self.file_path = video_path + file_name # 内部的file name是不带后缀的 f_suffix = file_name.split('.')[-1] self.file_name = file_name.replace('.'+f_suffix, '') self.sound_file_path = sound_dir # 创建那个目录 tools = Tools() tools.mkdir(self.sound_file_path)
def rename(self) : dest = self.make_dest() if dest is not None : tool = Tools(self.src, dest) tool.move(True) self.src = dest if self.db_exists(self.src) : print('{0} already exists, removing '.format(self.src)) os.remove(self.src)
def to_work(self) : print('Moving to working space') renamed = os.path.join(self.work_new, '#rename') for name in os.listdir(self.dl_tmp) : src = os.path.join(self.dl_tmp, name) if name[0] != '#' and Tools.is_archive(src) : tool = Tools(src, os.path.join(renamed, name)) tool.move() print('-'*150)
def site_getDevices(): if not g.user: return jsonify({"error": "not loggued"}), 403 r = get_db() obj = Tools.getData(r, "user_" + g.user) if len(obj) == 0: return jsonify({"error": "internal error, user object is empty"}), 500 devices = {} if "devices" in obj: for k in obj["devices"]: devices[k] = Tools.getData(r, "device_" + k) return jsonify({"devices": devices})
def __init__(self): Tools.__init__(self) """ English language lookup tools Word list from http://www-01.sil.org/Linguistics/wordlists/english/wordlist/wordsEn.txt (c) 2012, 2013 Intelligent Planet Ltd """ self.alphabet = 'abcdefghijklmnopqrstuvwxyz' self.eng_dict = self.load('c:/python27/wordsEn.txt') self.freq = self.letter_frequency(self.eng_dict)
def parse(self) : for folder in self.watchs : if not os.path.isdir(folder) : print('{0} not a folder'.format(folder)) continue for name in os.listdir(folder) : if self.is_exclude(name) : continue elif self.is_include(name) : src = os.path.join(folder, name) dest = os.path.join(self.dest, name) tool = Tools(src, dest) tool.move(force=True)
def __init__(self, verbose=1, maxq=200): Net.__init__(self) Tools.__init__(self) """ Multithreaded network tools """ self.verbose = verbose self.maxq = maxq self.timeout = 0.2 # self.buffers = 256 #for check_port
def parse_dl(self) : plan.Plan.parse_dl(self) for name in os.listdir(self.dl_tmp) : src = os.path.join(self.dl_tmp, name) if name[0] == '#' : continue elif os.path.isdir(src) : tool = Tools(src, self.dl_tmp) tool.move_sub_into(match_func=Tools.is_media) print('Removing ... : {0}'.format(src)) shutil.rmtree(src) tools.split_line()
def to_work(self) : print('Moving to working space') move = os.path.join(self.dl_tmp, '#move') if os.path.isdir(move) : tool = Tools(move, self.work_new) tool.move_sub_into(match_func=Tools.is_media) remove = os.path.join(self.dl_tmp, '#remove') if os.path.isdir(remove) and len(os.listdir(remove)) > 0 : print('Removing {0}'.format(remove)) shutil.rmtree(remove) os.makedirs(remove) tools.split_line()
def update(self): rho = 0 if self.data['is_temp_active']: rod = self.data['current_temp_rod'].lower() rod_pos = rod + 'pos' rod_value = rod + '_value' value_table = self.data[rod_value] if self.data[rod_pos] < self.data['temp_target']: rho = (Tools.lookup(value_table, self.data[rod_pos]) + Tools.lookup(value_table, self.data[rod_pos] + 1)) / 2 self.data[rod_pos] += 1 elif self.data[rod_pos] > self.data['temp_target']: rho = -(Tools.lookup(value_table, self.data[rod_pos]) + Tools.lookup(value_table, self.data[rod_pos] - 1)) / 2 self.data[rod_pos] -= 1 else: self.data['is_temp_active'] = False if self.data['is_power_active']: rod = self.data['current_power_rod'].lower() rod_pos = rod + 'pos' rod_value = rod + '_value' value_table = self.data[rod_value] if self.data[rod_pos] < self.data['power_target']: rho = (Tools.lookup(value_table, self.data[rod_pos]) + Tools.lookup(value_table, self.data[rod_pos] + 1)) / 2 self.data[rod_pos] += 1 elif self.data[rod_pos] > self.data['power_target']: rho = -(Tools.lookup(value_table, self.data[rod_pos]) + Tools.lookup(value_table, self.data[rod_pos] - 1)) / 2 self.data[rod_pos] -= 1 else: self.data['is_power_active'] = False self.data['delta_rho'] = rho * self.random_item
def site_subscribe(): if g.user: return jsonify({"error": "already loggued"}), 403 if not "email" in request.form and len(request.form["email"]) > 0: return jsonify({"error": "email not present"}), 400 email = request.form["email"] if not "password" in request.form and len(request.form["password"]) > 0: return jsonify({"error": "password not present"}), 400 password = request.form["password"] r = get_db() if len(Tools.getData(r, "user_" + email)) > 0: return jsonify({"error": "user already exists"}), 400 Tools.setData(r, "user_" + email, {"email": email, "password": hashlib.sha1(password).hexdigest()}) return jsonify({"subscribed": "true"})
def get_all_speech(self): tools = Tools() fDs = self.root.findall(".//"+self.prefix+"speaker") result = [] for f in fDs: #jump empty speakers if f.text is not None: speaker = tools.unicode_safe(f.text) speaker_id = f.attrib["{http://www.w3.org/XML/1998/namespace}id"] sps=self.get_sp_by_speaker_id(speaker_id) for sp in sps: speechs = self.get_all_speech_by_speaker(sp) speech = Speech(speaker,speaker_id,speechs) result.append(speech) return result
def get_current_saving_data(self) : #! if we flush the model , and we can ignore the W_time and just using time_now to restore the W_time !! #~ so we flush it ! if not self.is_flushed : self._flush_time() #! using a dict for more convenience when loading saving_struct = { 'emit_feature_space' : self.emit_feature_space , 'label_space' : self.label_space , 'w' : Tools.clear_zero_value(self.W) , 'w_sum' : Tools.clear_zero_value(self.W_sum) , 'w_size' : self.W_size , 'time_now' : self.time_now } return saving_struct
def failingPeaks(path): from DataLoader import getData data = getData(path, None, True) samples = tls.getDictArray(data.Features) people = dict() peaks = dict() i = 0 for sample in samples: for f in sample.keys(): countedPeaks = [] if sample[f] is None: peak = f.split('.')[1] countedPeaks += peak if `i` in people: people[`i`] += 1 else: people[`i`] = 1 if f in peaks: peaks[peak] += 1 else: peaks[peak] = 1 i += 1
def update_model(self , gold_feature , predict_feature) : ''' Update model 1. W += ( gold_feature - predict_feature ) 2. W_sum += W ( fake ) 3. W_time update Args : gold_feature : dict (sparse vector) predict_feature : dict (sparse vector) ''' self._increase_time() diff = Tools.sparse_vector_sub(gold_feature , predict_feature) for f_idx in diff : assert(isinstance(f_idx , int) and f_idx < self.W_size) update_value = diff[f_idx] current_value = self.W[f_idx] if f_idx in self.W else 0 #! update W self.W[f_idx] = current_value + update_value #! update W_sum previous_update_time = self.W_time[f_idx] keeping_time = self.time_now - previous_update_time current_sum_value = self.W_sum[f_idx] if f_idx in self.W_sum else 0 self.W_sum[f_idx] = current_sum_value + keeping_time * current_value + update_value #! update W_time self.W_time[f_idx] = self.time_now self.is_flushed = False
def __init__(self, ip, name, port, auth_con=''): Tools.__init__(self) self.ip = ip self.name = name self.port = port self.auth_con = auth_con self.verbose = 1 self.telnet_timeout = 2 self.telnet_cmd_timeout = 5 self.telnet_sleep = 0.1 self.path = os.getcwd() + '\\' self.ssh_key_file = self.path + 'ssh_host_keys'
def phone_fetchcode(): if not "id" in request.form: return jsonify({"error": "id not present"}), 400 if not "key" in request.form: return jsonify({"error": "key not present"}), 400 _id = request.form['id'] _key = request.form['key'] # + key pour signer WS r = get_db() key = Tools.generateSimpleHash() while r.hexists("waiting_devices", key): key = generateSimpleHash() r.hset("waiting_devices", key, _id) print("fetchcode: device_" + _id + ", secret: " + _key + ", 6digits" + key) # rajouter un expire Tools.setData(r, "device_" + _id, {"secret": _key, "6digits": key}) return jsonify({"hash": key})