def shutdown(): ''' Just shutdown VMs from current lab ''' log.info("Shutting down VMs for lab %s" % LAB) vms = local("virsh list --name | grep '%s'" % LAB_ID, capture=True) for vm in vms.stdout.splitlines(): local("virsh shutdown {vm_name}".format(vm_name=vm)) log.info('VM %s was shut down' % vm)
def prepare(topo, distro="centos7"): ''' Prepare VMs for installing Openstack with Packstack: aio, 2role, 3role ''' log.info("Preparing virtual machines for lab=%s" % LAB) disk = REDHAT_DISK if not disk: disk = { 'centos7': CENTOS7_DISK, 'centos65': CENTOS65_DISK, 'fedora20': FEDORA20_DISK, 'rhel7': RHEL7_DISK }.get(distro, None) if not disk: raise NameError("Please choose distro from 'centos7', 'centos65'," " 'fedora20', 'rhel7'") topo_file = { "aio": "aio_rh_topology.yaml", "2role": "rh_2role_topology.yaml", "3role": "rh_3role_topology.yaml" }.get(topo, None) if not topo_file: raise NameError("Please choose topology from 'aio', '2role', '3role'") log.info("Running {topo} Openstack with distro {disk}".format(topo=topo, disk=disk)) url = IMAGES_REPO + disk local("test -e %s || wget -nv %s" % (disk, url)) local("python ./tools/cloud/create.py -l {lab} -s /opt/imgs " "-z ./{disk} -r redhat -c " "./tools/cloud/cloud-configs/{topo} > " "config_file".format(lab=LAB, disk=disk, topo=topo_file))
def clear(): """ Making space for progressbars """ with common.term.location(): logger.info('Please wait while QARK tries to decompile the code back to source using multiple decompilers. This may take a while.') print("\n"*11)
def generate_tool_conf(parsed_ctds, tool_conf_destination, galaxy_tool_path, default_category): # for each category, we keep a list of models corresponding to it categories_to_tools = dict() for parsed_ctd in parsed_ctds: category = strip(parsed_ctd.ctd_model.opt_attribs.get("category", "")) if not category.strip(): category = default_category if category not in categories_to_tools: categories_to_tools[category] = [] categories_to_tools[category].append(utils.get_filename(parsed_ctd.suggested_output_file)) # at this point, we should have a map for all categories->tools toolbox_node = Element("toolbox") if galaxy_tool_path is not None and not galaxy_tool_path.strip().endswith("/"): galaxy_tool_path = galaxy_tool_path.strip() + "/" if galaxy_tool_path is None: galaxy_tool_path = "" for category, file_names in categories_to_tools.iteritems(): section_node = add_child_node(toolbox_node, "section") section_node.attrib["id"] = "section-id-" + "".join(category.split()) section_node.attrib["name"] = category for filename in file_names: tool_node = add_child_node(section_node, "tool") tool_node.attrib["file"] = galaxy_tool_path + filename toolconf_tree = ElementTree(toolbox_node) toolconf_tree.write(open(tool_conf_destination,'w'), encoding="UTF-8", xml_declaration=True, pretty_print=True) logger.info("Generated Galaxy tool_conf.xml in %s" % tool_conf_destination, 0)
def urlopen_read(self, req): tries = 10 for i in range(tries): try: self.n_connections += 1 page = None with contextlib.closing(urllib2.urlopen(req)) as resp: if resp.info().get('Content-Encoding') == 'gzip': page = self.gzip_data(resp.read()) else: page = resp.read() return page except Exception, e: if e.code == 404: msg = 'Error in urlopen_read: %s.' %str(e) write_message(msg, self.window) return None if i < tries - 1: sec = (i + 1) * 5 msg = ('Error in urlopen_read: %s\nTake a rest: %s seconds, and retry.' %(str(e), sec)) write_message(msg, self.window) time.sleep(sec) else: msg = 'Exit incorrect. %s' %str(e) logger.info(msg) write_message(msg, self.window) return None
def fullha(cloud=False, cobbler=False): ''' Prepare and install Full HA Openstack ''' log.info("Full install of Full HA Openstack" + ( " with cobbler" if cobbler else '')) prepare("fullha", cloud=cloud, cobbler=cobbler) time.sleep(GLOBAL_TIMEOUT) install("fullha", cobbler=cobbler)
def save_verify_code(self, url): try: cookie_str = '' for cookie in self.cj.as_lwp_str(True, True).split('\n'): cookie = cookie.split(';')[0] cookie = cookie.replace('\"', '').replace('Set-Cookie3: ', ' ').strip()+';' cookie_str += cookie headers = self.get_headers(url) headers['Accept'] = 'image/png,image/*;q=0.8,*/*;q=0.5' headers['Referer'] = 'http://weibo.com/' headers['Cookie'] = cookie_str del headers['Accept-encoding'] req = self.pack_request(url, headers) content = self.urlopen_read(req) f = open(os.path.join(self.soft_path, 'pin.png'), 'wb') f.write(content) f.flush() f.close() except Exception, e: msg = 'Save verify code error. %s' %str(e) logger.info(msg) write_message(msg, self.window) return
def crawl_follows(self): def _crawl(parser, uid, page, num_pages=''): msg = 'Crawl user(%s)\'s follows-page: %s:%s' %(self.uid, num_pages, page) write_message(msg, self.window) url = 'http://weibo.com/%s/follow?page=%s' %(uid, page) html = self._fetch(url, query=settings.QUERY_FOLLOWS) try: pq_doc = pq(html) return parser.parse(pq_doc) except: return 0 msg = 'Checking: whether user(%s) exists or not...' %self.uid write_message(msg, self.window) is_exist= self.fetcher.check_user(self.uid) if is_exist is None: return if not is_exist: msg = 'Not exist: %s.' %(self.uid) logger.info(msg) write_message(msg, self.window) return self.storage = FileStorage(self.uid, settings.MASK_FOLLOW, self.store_path) start_time = time.time() parser = ComFollowsParser(self.storage, uids_storage=self.uids_storage) num_pages = _crawl(parser, self.uid, page=1) if settings.PAGE_LIMIT != 0: if num_pages > settings.PAGE_LIMIT: msg = 'For sina policy, reduce page count from %s to %s' %(num_pages, settings.PAGE_LIMIT) write_message(msg, self.window) num_pages = settings.PAGE_LIMIT pages = [i for i in xrange(2, num_pages+1)] if len(pages) > 0: n_threads = 5 worker_manager = WorkerManager(n_threads) for pg in pages: worker_manager.add_job(_crawl, parser, self.uid, pg, num_pages) worker_manager.wait_all_complete() cost_time = int(time.time() - start_time) msg = ('Crawl user(%s)\'s follows: total page=%s,' ' cost time=%s sec, connections=%s' %(self.uid, num_pages, cost_time, self.fetcher.n_connections)) logger.info(msg) write_message(msg, self.window)
def only_test(topology): ''' Prepare and run Tempest tests, provide topology: aio, 2role, fullha ''' log.info("Configuring Openstack for tempest") if not topology or topology not in ("aio", "2role", "fullha"): raise NameError("Topology should be one of: 'aio', '2role', 'fullha'") prepare_coi(topology) run_tests(force=False)
def get_servertime(self): url = ('http://login.sina.com.cn/sso/prelogin.php?entry=account' '&callback=sinaSSOController.preloginCallBack&su=&rsakt=mod' '&client=ssologin.js(v1.4.5)&_=%s' % self.get_milli_time()) headers = self.get_headers(url) headers['Accept'] = '*/*' headers['Referer'] = 'http://weibo.com/' del headers['Accept-encoding'] result = {} req = self.pack_request(url, headers) for _ in range(3): data = None try: with contextlib.closing(urllib2.urlopen(req)) as resp: data = resp.read() p = re.compile('\((.*)\)') json_data = p.search(data).group(1) data = json.loads(json_data) result['servertime'] = str(data['servertime']) result['nonce'] = data['nonce'] result['rsakv'] = str(data['rsakv']) result['pubkey'] = str(data['pubkey']) self.pcid = str(data['pcid']) break except Exception, e: msg = 'Get severtime error. %s' %str(e) logger.info(msg) write_message(msg, self.window)
def main_loop(self): rlist = [] rlist.append(self.__pipe.inform) timeout = 10 print "Total threads: {0}".format(threading.activeCount()) try: while self.__running: readable, _, _ = select.select(rlist, [], [], timeout) if not readable: continue if self.__pipe.inform in readable: try: message = self.__pipe.read(256) except OSError, exc: logger.warn("[Error %d] appeared at reading pipe" % exc.errno) continue if len(message) == 0: continue pdu_id = message.split()[0].split('.')[-2] pdu_index = self.to_index(int(pdu_id)) logger.info("Assign message to pdu {0}".format(pdu_id)) self.__pdus[pdu_index].handle_message(message) except KeyboardInterrupt: logger.error("Break by user.") except Exception, ex: logger.error("{0}: {1}".format(sys._getframe().f_code.co_name, ex))
def venv(private=True): log.info("Installing virtualenv for tempest") install = os.path.join(TEMPEST_DIR, "tools", "install_venv.py") wraps = '' if not private: wraps = "export venv=%s; " % TCVENV local("%spython " % wraps + install)
def update_snmprec_file(self, oid, val): old_file = os.path.join(self.config_instance.snmp_data_dir, "public.snmprec") new_file = os.path.join(self.config_instance.snmp_data_dir, "new.snmprec") logger.info("update oid %s, val %s" % (oid, str(val))) # open file try: old_fdh = open(old_file, 'r') new_fdh = open(new_file, 'w') while True: line = old_fdh.readline() if not line: break record_list = line.strip(os.linesep).split('|') if record_list[0] == oid: record_list[2] = val new_line = '|'.join(["%s" % x for x in record_list]) new_fdh.write(new_line + os.linesep) else: new_fdh.write(line) except IOError as e: logger.error("Exception in updating snmprec file, exception: {}". format(e)) return new_fdh.close() old_fdh.close() os.rename(new_file, old_file)
def new_station(self): """ Update the current station from ident and display new main screen """ try: avwx.core.valid_station(self.station) except avwx.exceptions.BadStation: return self.error_station() self.draw_loading_screen() new_metar = avwx.Metar(self.station) try: new_metar.update() except (TimeoutError, ConnectionError, avwx.exceptions.SourceError): self.error_connection() except avwx.exceptions.InvalidRequest: self.error_station() except Exception as exc: logger.error(f'An unknown error has occured: {exc}') self.error_unknown() else: logger.info(new_metar.raw) self.metar = new_metar self.old_ident = copy(self.ident) self.reset_update_time() self.export_session() self.draw_main()
def get_login_form(self): url = 'http://3g.sina.com.cn/prog/wapsite/sso/login.php?ns=1&revalid=2&backURL=http%3A%2F%2Fweibo.cn%2F&backTitle=%D0%C2%C0%CB%CE%A2%B2%A9&vt=' headers = self.get_headers(url) headers['Accept'] = '*/*' headers['Referer']= 'http://weibo.cn' del headers['Accept-encoding'] req = self.pack_request(url, headers) rand = None passwd_s = None vk = None for _ in range(3): try: data = None with contextlib.closing(urllib2.urlopen(req)) as resp: data = resp.read() rand = HTML.fromstring(data).xpath('//form/@action')[0] passwd_s = HTML.fromstring(data).xpath("//input[@type='password']/@name")[0] vk = HTML.fromstring(data).xpath("//input[@name='vk']/@value")[0] return rand, passwd_s, vk except Exception, e: msg = 'get login form error: %s' %str(e) logger.info(msg) write_message(msg, self.window) pass
def find_hashes (*links): """ Finds unique tripcodes. If no URLs are given it will attempt to scrape all of 4chan where tripcodes are allowed. """ hashes = set() pool = Pool(num_threads=parameters.num_threads) def work (unit): if isinstance(unit, Post): if unit.public: return unit.public.cipher return logger.info('working %r', unit) for e in unit.process(): pool.push(work, e) if not links: links = boards for link in map(classify, links): pool.push(work, link) pool.join() logger.info('Join complete, updating with results.') for e in pool.get_results(): hashes.add(e) pool.close() return hashes
def role2(cloud=False, cobbler=False): ''' Prepare and install 2role Openstack ''' log.info("Full install of 2role Openstack" + ( " with cobbler" if cobbler else '')) prepare("2role", cloud=cloud, cobbler=cobbler) time.sleep(GLOBAL_TIMEOUT) install("2role", cobbler=cobbler)
def build_cache (*links): """ Builds up the internal WebEntity.webcache with a snapshot of the provided URLs. If no URLs are given, it will attempt to update the cache with a snapshot of the entirety of 4chan. """ pool = Pool(num_threads=parameters.num_threads) def work (unit): logger.info('working %r', unit) if isinstance(unit, Thread): unit.download() else: for e in unit.process(): pool.push(work, e) if not links: links = all_boards for link in map(classify, links): pool.push(work, link) pool.join() logger.info('Join complete.')
def valid_cookie(self, html=''): html = str(html) if not html: url = 'http://weibo.cn/kaifulee' headers = self.get_headers(url) html = self.get_content_head(url, headers=headers) if not html: msg = 'Error in cookie: need relogin.' logger.info(msg) write_message(msg, self.window) self.clear_cookie(self.cookie_file) return False if u'登录' in html: if not self.login(): msg = 'In valid_cookie: relogin failed.' logger.info(msg) write_message(msg, self.window) self.clear_cookie(self.cookie_file) return False gsid = None for c in self.cj: if c.name.startswith('gsid') and c.domain == '.weibo.cn': gsid = c.value self.login_params = {'gsid': gsid, 'vt': '4', 'lret': '1'} return True
def fetch_msg_comments(self, msg_id, page=1): url = 'http://weibo.com/aj/comment/big?_wv=5' headers = self.get_headers(url) headers['Accept'] = '*/*' headers['Referer'] = 'http://weibo.com/' del headers['Accept-encoding'] body = { '__rnd' : str(int(time.time() * 1000)), '_t' : '0', 'id' : msg_id, 'page' : page } url = url + urllib.urlencode(body) req = self.pack_request(url, headers) page= self.urlopen_read(req) try: if json.loads(page)['code'] == '100000': data = json.loads(page)['data'] html = data['html'] pg = int(data['page']['totalpage']) return html, pg else: msg = json.loads(page)['msg'] logger.info(msg) write_message(msg, self.window) return None, None except ValueError: return page, None
def __update_control_list(self, datastore, vmname, pdu, port): has_datastore = False for node_list in self.__nodes_control_list: if node_list.has_key(datastore): node_found = False for node_info in node_list[datastore]: if node_info['node_name'] == vmname: node_info['control_pdu'] = pdu node_info['control_port'] = port node_found = True if node_found is False: node_info = {} node_info['node_name'] = vmname node_info['control_pdu'] = pdu node_info['control_port'] = port node_list[datastore].append(node_info) has_datastore = True if has_datastore is False: vm_list = {} vm_list[datastore] = [] ni = {} ni['node_name'] = vmname ni['control_pdu'] = pdu ni['control_port'] = port vm_list[datastore].append(ni) self.__nodes_control_list.append(vm_list) logger.info("Updated. nodes control list: {0}". format(self.__nodes_control_list))
def run(self): while True: msg = '---Connect to the task manager for task allocation---' logger.info(msg) wx.CallAfter(self.window.write_logs, msg) task_id = self.request_task() if task_id is None: msg = '---No task exists---' logger.info(msg) wx.CallAfter(self.window.write_logs, msg) break msg = '---Start task: %s---' %task_id logger.info(msg) wx.CallAfter(self.window.write_logs, msg) tar_file = self.do_task() msg = '---Finish task: %s---' %task_id logger.info(msg) wx.CallAfter(self.window.write_logs, msg) msg = '---Upload: %s-%s---' %(task_id, tar_file) logger.info(msg) wx.CallAfter(self.window.write_logs, msg) self.upload_task(tar_file) #finished wx.CallAfter(self.window.finished)
def preprocessing(self, path, event): if not (os.path.exists(path) and os.path.isfile(path)): logger.warn('path is not exist '+str(path)) return logger.info("file: %s, %s" % (path, event)) if event != 'removed' : time.sleep(15) #sleeping for sure new_path = path+"_" logger.info('Move to '+new_path) os.rename(path, new_path) #new_path = path # # processing param = '' # details = '' lines = tuple(open(new_path, 'r')) result = {} ad_result = [] for line in lines : worker = 'w?' if 'worker' in line : pos = line.find('worker:')+len('worker:') endpos = line.find(';', pos) worker = line[pos:endpos] # logger.info('processing worker '+str(worker)+ '\n' +line) wpos = -1 if result.has_key('worker') and worker in result['worker'] : wpos = result['worker'].index(worker) # logger.info('wpos = '+str(wpos)) l = line.split('|', 2) if len(l) > 0 : #result for item in l[0].split(';') : i = item.split(':') if not i[0] in result : result[i[0]] = [] t = self.getCorrectValue(i[1]) if wpos >= 0 : result[i[0]][wpos] = t else : result[i[0]].append(t) resa = [] if len(l) > 1 : #additional result # ad_result[worker] = str(l[1]) # ll = l[1].split("+"); # if len(ll) > 0 : for k,v in simplejson.loads(l[1]).iteritems(): val = str(k)+":"+str(v) self.addAdditionalData(val.strip(), resa) ad_result.append(resa) # for worker in ad_result : # if len(details) > 0 : # details += ' + ' # details += "worker: "+worker+" + "+str(ad_result[worker]).strip('{').rstrip('}').replace(' ','').replace('},', '} + ').replace("'",'').replace('"','') param = str(result).strip('{').rstrip('}').replace(' ','').replace("'",'').replace('"','').replace('],', '];') return_value=param +';additionalResults:'+str(ad_result).replace("'","").replace("\"","") # logger.info('Returns: '+str(return_value)) return return_value
def work (unit): logger.info('working %r', unit) if isinstance(unit, Thread): unit.download() else: for e in unit.process(): pool.push(work, e)
def determine_configuration(ip='localhost'): with settings(host_string=ip): folder = os.path.dirname(run('find ~ -name stack.sh')) dest = run('grep -E "^DEST=(.+)$" {0}/local.conf {0}/stackrc | cut -d = -f2'.format(folder)) d = {'folder': folder, 'DEST': dest.split()[0], 'screen': folder + '/stack-screenrc'} log.info(d) return d
def heart_song(self): song = self.doubanfm.current_song if int(song.get('like')) != 0: l.info(u'unhearted song: {sid} - {artist} - {title}'.format(**song)) self.doubanfm.unheart_song() else: l.info(u'hearted song: {sid} - {artist} - {title}'.format(**song)) self.doubanfm.heart_song()
def replaceInFile(self, replacedFile, searchRegExp, replaceBy): if replacedFile and searchRegExp and replaceBy : logger.info("Replace '"+str(constants.MONITOR_ID)+"' by '"+str(replaceBy)+"' in file '"+str(replacedFile)+"'") for line in fileinput.input(replacedFile, inplace=True): if re.match(searchRegExp, line) : line = re.sub(searchRegExp,replaceBy, line) # logger.info('Replaced') sys.stdout.write(line)
def prepare_with_ip(private=True): ''' Prepare tempest with IP of Horizon: for RedHat, etc ''' log.info("Preparing tempest configuration by downloading openrc from Horizon") init(private=private) ip = get_lab_vm_ip() prepare(ip=ip) conf_dir = os.path.join(TEMPEST_DIR, "etc") local("mv ./tempest.conf.jenkins %s/tempest.conf" % conf_dir)
def _play_song(self): '''should be called by self.next_song() ONLY''' song = self.doubanfm.current_song self.player.set_property('uri',song.get('url')) self.player.set_state(gst.STATE_PLAYING) l.info(u'playing song: {sid} - {artist} - {title}'.format(**song)) if int(song.get('like')) != 0: pass
def full(topology): ''' Prepare, install and test RedHat Openstack with Tempest ''' log.info("Full install and test of %s Openstack" % topology) if not topology or topology not in ("aio", "2role", "3role"): raise NameError("Topology should be one of: 'aio', '2role', '3role'") prepare(topology, distro='centos7') time.sleep(GLOBAL_TIMEOUT) install(topology) only_test()
def analyze_products_by_user(data_set): repository = Repository(data_set=data_set) users = repository.get_users() count = 0 total = len(users) for user_ids in batch(users, 100): users_products = [] for user_id in user_ids: user_products = repository.get_products_bought_by_user(user_id) user_products = dict(user_id=user_id, products=[ dict(product_id=p['_id'], count=p['count']) for p in user_products ]) users_products.append(user_products) count += 1 logger.info("{}/{}".format(count, total)) repository.add_user_products(users_products)
def parse_macros_files(macros_file_names): macros_to_expand = set() for macros_file_name in macros_file_names: try: macros_file = open(macros_file_name) logger.info("Loading macros from %s" % macros_file_name, 0) root = parse(macros_file).getroot() for xml_element in root.findall("xml"): name = xml_element.attrib["name"] if name in macros_to_expand: logger.warning("Macro %s has already been found. Duplicate found in file %s." % (name, macros_file_name), 0) else: logger.info("Macro %s found" % name, 1) macros_to_expand.add(name) except ParseError, e: raise ApplicationException("The macros file " + macros_file_name + " could not be parsed. Cause: " + str(e)) except IOError, e: raise ApplicationException("The macros file " + macros_file_name + " could not be opened. Cause: " + str(e))
def assertequals(self, jsonpathkey, value): """ 判断json结果里面某个键的值是否和期望值value相等 :param key: json的键 :param value: 期望值 :return: 是否相等 """ res = None try: res = str(jsonpath.jsonpath(self.jsonres, jsonpathkey)[0]) except Exception as e: pass # 关联 value = self.__get_relations(value) if str(res) == str(value): logger.info('PASS') self.__write_excel_res('PASS', res) return True else: logger.info('FAIL') self.__write_excel_res('FAIL', res) return False
def check_docker(self): logger.info(f"testing docker API on {DOCKER_SOCKET}…") if ( not DOCKER_SOCKET.exists() or not DOCKER_SOCKET.is_socket() or not os.access(DOCKER_SOCKET, os.R_OK) ): logger.critical(f"\tsocket ({DOCKER_SOCKET}) not available.") sys.exit(1) self.docker = docker.DockerClient( base_url=f"unix://{DOCKER_SOCKET}", timeout=DOCKER_CLIENT_TIMEOUT ) try: if len(self.docker.containers.list(all=False)) < 1: logger.warning("\tno running container, am I out-of-docker?") except Exception as exc: logger.critical("\tdocker API access failed: exiting.") logger.exception(exc) sys.exit(1) else: logger.info("\tdocker API access successful")
def main(args): config = get_config_from_args(args, mode='infer') max_seq_length = args.max_seq_length or config.max_seq_length config.max_seq_length = max_seq_length logger.info("exporting {} model...".format(config.model)) checkpoint_path = tf.train.latest_checkpoint(config.checkpoint_dir) with tf.Session() as sess: model = get_task_model_class(config.model, config.task)(config) input_nodes, logits_ph = model.export_graph(config, training=False, logits=True) saver = tf.train.Saver(var_list=tf.global_variables()) logger.info('begin restoring model from checkpoints...') saver.restore(sess, checkpoint_path) inference_graph_file = config.inference_graph saved_model_path = os.path.join(os.path.dirname(inference_graph_file), 'saved_model') if not os.path.exists(saved_model_path): logger.info("exporting saved_model...") tf.saved_model.simple_save(sess, saved_model_path, inputs=input_nodes, outputs={'logits': logits_ph}) if args.quantize: save_name = "{}.quant.tflite".format(model.name) else: save_name = "{}.tflite".format(model.name) tflite_file = os.path.join(os.path.dirname(inference_graph_file), save_name) if not os.path.exists(tflite_file): logger.info("exporting tflite model...") converter = tf.lite.TFLiteConverter.from_session( sess, list(input_nodes.values()), [logits_ph]) if args.quantize: converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE] converter.target_ops = [ tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS ] tflite_model = converter.convert() with open(tflite_file, "wb") as f: f.write(tflite_model) """freeze_graph --input_saved_model_dir=data/ckpt/bert/saved_model \
def install(software_list, ignore_exist=False): logger.info("install software[%s]..", software_list) software_home = SOFTWARE_HOME_FMT.format(get_admin_user()) if software_list: run_shell("mkdir -p {}".format(software_home)) exist_info = {} for s in software_list: # software exist if os.path.exists("{}/{}".format(software_home, s["name"])): exist_info[s["name"]] = True logger.error("The software[%s] already exist!", s["name"]) if not ignore_exist: return 55 for s in software_list: if not exist_info.get(s["name"], False): ret = _install(s["name"], s["source"], software_home, s.get("installer")) if ret is not 0: return ret return 0
def assertequals(self, key, value): """ powered by Jhx at 2020/1/24 断言 :param key: 需要断言的返回参数的键 :param value: 需要断言的返回参数的值 :return: """ value = self.__get_value(value) try: if (str(self.result_json[key]) == str(value)): logger.info('PASS') self.writer.write(self.writer.row, 7, 'PASS') self.writer.write(self.writer.row, 8, str(self.result_json[key])) else: logger.info('FAIL') self.writer.write(self.writer.row, 7, 'FAIL') self.writer.write(self.writer.row, 8, str(self.result_json[key])) except Exception as e: logger.exception(e)
def resetUser(): users = api.getAllUsers() if len(users) != 0: logger.info("users:" + json.dumps(users)) for k, v in users.items(): port = int(str(k).split(":")[1]) password = v['p'] userDict[port] = password logger.info("start add user tables") addUserTables() logger.info("start write to ssfile") writeDict2ss() logger.info("start ss server, config is" + json.dumps(userDict)) os.system(shell['restart']) return "ok"
def generate_tool_conf(parsed_ctds, tool_conf_destination, galaxy_tool_path, default_category): # for each category, we keep a list of models corresponding to it categories_to_tools = dict() for parsed_ctd in parsed_ctds: category = strip(parsed_ctd.ctd_model.opt_attribs.get("category", "")) if not category.strip(): category = default_category if category not in categories_to_tools: categories_to_tools[category] = [] categories_to_tools[category].append( utils.get_filename(parsed_ctd.suggested_output_file)) # at this point, we should have a map for all categories->tools toolbox_node = Element("toolbox") if galaxy_tool_path is not None and not galaxy_tool_path.strip().endswith( "/"): galaxy_tool_path = galaxy_tool_path.strip() + "/" if galaxy_tool_path is None: galaxy_tool_path = "" for category, file_names in categories_to_tools.iteritems(): section_node = add_child_node(toolbox_node, "section") section_node.attrib["id"] = "section-id-" + "".join(category.split()) section_node.attrib["name"] = category for filename in file_names: tool_node = add_child_node(section_node, "tool") tool_node.attrib["file"] = galaxy_tool_path + filename toolconf_tree = ElementTree(toolbox_node) toolconf_tree.write(open(tool_conf_destination, 'w'), encoding="UTF-8", xml_declaration=True, pretty_print=True) logger.info("Generated Galaxy tool_conf.xml in %s" % tool_conf_destination, 0)
def get(self, url, params=None): """ get请求 :param url: :param params: :return: """ # 替换参数 if params is None or params == '': pass else: params = self.__get_param(params) # 将url和参数拼接起来 if not (url.startswith('http') or url.startswith('https')): url = self.url + '/' + url + "?" + params else: url = self.url + "?" + params # 执行get请求 res = self.session.get(url, verify=False) self.result = res.content.decode('utf8') logger.info("执行结果: " + self.result) print("执行结果: " + self.result) try: # 将result的string格式转为json格式 jsons = self.result jsons = jsons[jsons.find('{'):jsons.rfind('}') + 1] self.jsonres = json.loads(jsons) self.writer.write(self.writer.row, 7, 'PASS') self.writer.write(self.writer.row, 8, str(jsons)) return True except Exception as e: # 如果解析失败了,jsonres存储为上一次结果,需要清空 self.jsonres = {} self.writer.write(self.writer.row, 7, 'FAIL') self.writer.write(self.writer.row, 8, str(self.result)) return False
def parse_quic_client(in_dir: str, out_dir: str, scenarios: Dict[str, Dict], config_cols: List[str], multi_process: bool = False) -> pd.DataFrame: """ Parse all quic client results. :param in_dir: The directory containing the measurement results. :param out_dir: The directory to save the parsed results to. :param scenarios: The scenarios to parse within the in_dir. :param config_cols: The column names for columns taken from the scenario configuration. :param multi_process: Whether to allow multiprocessing. :return: A dataframe containing the combined results from all scenarios. """ logger.info("Parsing quic client results") df_cols = [ *config_cols, 'run', 'second', 'bps', 'bytes', 'packets_received' ] if multi_process: df_quic_client = __mp_parse_slices(2, __parse_quic_client_from_scenario, in_dir, scenarios, df_cols, 'quic', 'client') else: df_quic_client = __parse_slice(__parse_quic_client_from_scenario, in_dir, [*scenarios.items()], df_cols, 'quic', 'client') logger.debug("Fixing quic client data types") df_quic_client = fix_dtypes(df_quic_client) logger.info("Saving quic client data") df_quic_client.to_pickle(os.path.join(out_dir, 'quic_client.pkl')) with open(os.path.join(out_dir, 'quic_client.csv'), 'w+') as out_file: df_quic_client.to_csv(out_file) return df_quic_client
def external_local_test(query): """Fetch initialization information for a random selection of locally stored subeddits. """ # Now begin to test the collation by running the # function, making sure there are no errors. if query == 'random': # Fetch all the subreddits we monitor and ask for # the number to test. number_to_test = int(input("\nEnter the number of tests to conduct: ")) random_subs = sample(database.monitored_subreddits_retrieve(), number_to_test) random_subs.sort() print("\n\n### Now testing: r/{}.\n".format(', r/'.join(random_subs))) init_times = [] for test_sub in random_subs: time_initialize_start = time.time() print("\n---\n\n> Testing r/{}...\n".format(test_sub)) # If the length of the generated text is longer than a # certain amount, then it's passed. tested_data = artemis_stats.wikipage_collater(test_sub) if len(tested_data) > 1000: total_time = time.time() - time_initialize_start artemis_stats.wikipage_editor_local(test_sub, tested_data) print("> Test complete for r/{} in {:.2f} seconds.\n".format( test_sub, total_time)) init_times.append(total_time) print('\n\n# All {} wikipage collater tests complete. ' 'Average initialization time: {:.2f} secs'.format( number_to_test, sum(init_times) / len(init_times))) else: logger.info('Testing data for r/{}.'.format(query)) print(artemis_stats.wikipage_collater(query)) return
def urlopen_read(self, req): tries = 10 for i in range(tries): try: self.n_connections += 1 page = None with contextlib.closing(urllib2.urlopen(req)) as resp: if resp.info().get('Content-Encoding') == 'gzip': page = self.gzip_data(resp.read()) else: page = resp.read() if '$CONFIG' in page and not ("$CONFIG['islogin'] = '******'" in page or "$CONFIG['islogin']='******'" in page): print 'Not login, try to login' if not self.check_cookie(): msg = 'Error in urlopen_read. Login failed.' write_message(msg, self.window) return None else: return page except Exception, e: if i < tries - 1: sec = (i + 1) * 5 msg = ('Error in urlopen_read: %s\nTake a rest: %s seconds, and retry.' %(str(e), sec)) write_message(msg, self.window) time.sleep(sec) else: msg = 'Exit incorrect. %s' %str(e) logger.info(msg) write_message(msg, self.window) return None
def visible_element(self, loc, loc_msg="", timeout=30, frequency=0.5): """ 封装自己的等待元素可见方法,方便日志输出以及等待失败页面截图 :function:等待元素可见 :param loc: 元素定位表达式 :param timeout: 设置超时时间 :param frequency: 设置刷新频率 :param loc_msg: 功能页面page的描述 :return: """ logger.info("等待元素 {} 可见".format(loc_msg)) try: # start=datetime.datetime.now() WebDriverWait(self.driver, timeout, frequency).until( EC.visibility_of_element_located(loc)) # end=datetime.datetime.now() # logger.info("等待元素开始时间为{},结束时间为{},等待元素{}可见共耗时{}".format(start,end,loc,start-end)) except: logger.exception(" {} 等待元素可见失败".format(loc_msg)) self.save_web_screenshot(loc_msg) raise
def switch_to_alert(self, loc, loc_msg): """ :funcation:切换并关闭alert弹框 :param loc: 元素定位表达式 :param loc_msg: 功能页面page的描述 """ #1.点击某个按钮,使弹框出现 self.button_click(loc, loc_msg) try: #2.等待alert弹框可见 WebDriverWait(self.driver, 30, 0.1).until(EC.alert_is_present()) #3.切换到alert弹框中 alert = self.driver.switch_to.alert() logger.info("alert弹框的文本内容是 {} ".format(alert.text)) #4.处理alert弹框,是弹框消失 alert.accept() logger.info(" {} 中的alert弹框成功处理结束") except: logger.exception(" {} 中的alert弹框处理失败") self.save_web_screenshot(loc_msg) raise
def runapp(self, caps, t=''): """ 连接appium服务器,并根据conf配置,启用待测试app :param conf: APP的启动配置,格式尽量为json格式字符串(尽量所有的值都用字符串,少用布尔值等其他格式) :return: """ try: caps = eval(caps) if t == '': t = 20 else: t = int(t) self.driver = webdriver.Remote( "http://localhost:" + self.port + "/wd/hub", caps) self.driver.implicitly_wait(t) logger.info("启动APP成功 %s" % caps) self.writer.write(self.writer.row, 7, "PASS") self.writer.write(self.writer.row, 8, "") except Exception as e: logger.error("启动APP失败 %s" % caps) logger.error(traceback.format_exc()) self.writer.write(self.writer.row, 7, "FAIL") self.writer.write(self.writer.row, 8, str(traceback.format_exc()))
def svc_register(self, cmdObj): assert ('outerip' in cmdObj) assert ('outerport' in cmdObj) assert ('svctype' in cmdObj) assert ('gpid' in cmdObj) outerip = cmdObj['outerip'] outerport = cmdObj['outerport'] svctype = cmdObj['svctype'] gpid = cmdObj['gpid'] self.gpid = gpid cur_time = datetime.datetime.now() svcroute = Service(svctype=svctype, gpid=gpid, outerip=outerip, outerport=outerport, connection=weakref.ref(self), regtime=cur_time) logger.info('服务注册: [%s], [%s], [%s:%d]', svctype, gpid, outerip, outerport) gl_routetable.add_svc(svcroute) svcreg_rsp = {'code': 0} rspbuf = self.makePacket('svc_register', 'rsp', svcreg_rsp) self.transport.write(rspbuf)
def install(user='******', password='******', devstack_config="devstack_single_node"): """ Install devstack Openstack on prepared environment """ log.info("Installing devstack Openstack") tempest_repo = os.environ.get("TEMPEST_REPO", "") tempest_br = os.environ.get("TEMPEST_BRANCH", "") devstack_repo = os.environ.get("DEVSTACK_REPO", "") devstack_br = os.environ.get("DEVSTACK_BRANCH", "") devstack_patch = os.environ.get("DEVSTACK_PATCH", "") local("python ./tools/deployers/install_devstack.py " "-c config_file -u {user} -p {password} -r {repo} -b {br} " "-e {devstack_repo} -l {devstack_br} -m {patch} " "--devstack_config {devstack_config}".format( user=user, password=password, repo=tempest_repo, br=tempest_br, devstack_repo=devstack_repo, devstack_br=devstack_br, devstack_config=os.path.join(DEVSTACK_CONF, devstack_config + ".yaml"), patch=devstack_patch))
def modify(queue_list, ignore_exist=False): logger.info("Do modify queue[%s] info..", queue_list) try: queues_info = get_queues_details() # queue exist for queue in queue_list: if not queues_info.get(queue["name"]): logger.error("The queue[%s] doesn't exist!", queue["name"]) if not ignore_exist: return 55 else: queues_info[queue["name"]].update(queue) update_queues_conf(queues_info) logger.info("the queue detail: [%s] after modify [%s]!", queues_info, queue_list) return 0 except Exception: logger.error("Failed to get queue [%s]: \n%s", queue_list, traceback.format_exc()) return 1
def plot_model(self, model, dpi=300, to_file=None): """ Plot a model to an image file """ # Set the default file location and name if to_file is None: to_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), "Models", "%s.png" % self.NAME) # Make sure the output directory exists output_dir = os.path.dirname(to_file) if not os.path.exists(output_dir): os.makedirs(output_dir) logger.info("Creating plot of model %s: %s" % (self.NAME, to_file)) tf.keras.utils.plot_model( model, to_file=to_file, show_shapes=True, show_layer_names=True, rankdir= "TB", # "TB" creates a vertical plot; "LR" creates a horizontal plot expand_nested=True, dpi=dpi)
def convert_models(args, parsed_ctds): # go through each ctd model and perform the conversion, easy as pie! for parsed_ctd in parsed_ctds: model = parsed_ctd.ctd_model origin_file = parsed_ctd.input_file output_file = parsed_ctd.suggested_output_file logger.info("Converting %s (source %s)" % (model.name, utils.get_filename(origin_file))) cwl_tool = convert_to_cwl(model, args) logger.info("Writing to %s" % utils.get_filename(output_file), 1) stream = open(output_file, 'w') stream.write(CWL_SHEBANG + '\n\n') stream.write( "# This CWL file was automatically generated using CTDConverter.\n" ) stream.write( "# Visit https://github.com/WorkflowConversion/CTDConverter for more information.\n\n" ) yaml.dump(cwl_tool, stream, default_flow_style=False) stream.close()
def test_run_cases(self, case): logger.info("执行第{0}条测试用例,测试title是:{1}".format(case['id'], case['title'])) # 使用封装好的HttpRequest类中的http_request方法来完成请求 res = self.request.http_request(case['method'], case['url'], case['data']) #断言 try: self.assertEqual(eval(case['ExpectedResult']), res.json()) logger.info("测试结果:Pass") TestResult = 'Pass' except Exception as e: logger.error("执行接口测试期望结果与实际结果不一致:{0}".format(e)) logger.error("测试结果:Fail") TestResult = 'Fail' finally: DoExcel(contants.cases_file, 'login').write_back(case['id'] + 1, 7, str(res.json())) # 写入实际结果 DoExcel(contants.cases_file, 'login').write_back(case['id'] + 1, 8, TestResult) # 写入测试结论
def external_mail_alert(): """ Function to mail moderators of subreddits that use the flair enforcement function to let them know about downtime or any other such issues. To be rarely used. """ flair_enforced_subreddits = database.monitored_subreddits_retrieve(True) flair_enforced_subreddits.sort() # Retrieve the message to send. subject = input("\nPlease enter the subject of the message: ").strip() subject = '[Artemis Alert] {}'.format(subject) message = input("\nPlease enter the message you wish to send " "to {} subreddits: ".format( len(flair_enforced_subreddits))).strip() # Send the message to moderators. for subreddit in flair_enforced_subreddits: reddit.subreddit(subreddit).message(subject, message) logger.info( 'External Mail: Sent a message to the moderators of r/{}.'.format( subreddit)) return
def assertequals(self, key, value): """ 判断json结果里某个键的值是否和期望值value相等 :param key:json结果的键 :param value:期望值 :return:是否相等 """ res = None try: res = str(self.jsonres[key]) except Exception as e: pass #value添加关联 value = self.__get_relations(value) if str(res) == str(value): logger.info('PASS') self.__writer_excel('PASS', res) return True else: logger.info('Fail') self.__writer_excel('FAIL', res) return False
def predict(args, data, model, mode): # limit GPU memory tf_config = tf.ConfigProto() tf_config.gpu_options.allow_growth = True config = config_model(args) if mode == 'dev': data_Ids = data.dev_Ids elif mode == 'test': data_Ids = data.test_Ids with tf.Session(config=tf_config) as sess: model = create_model(sess, NERModel, args.output_dir, config, data, logger) # results = [] metric = SeqEntityScore(data.label_alphabet, markup=args.markup) (test_info, class_info), _ = model.evaluate(sess, data_Ids, metric, batch_size=1) test_info = {f'test_{key}': value for key, value in test_info.items()} logger.info(test_info) for key, value in class_info.items(): info = f"Subject: {key} - Acc: {value['acc']} - Recall: {value['recall']} - F1: {value['f1']}" logger.info(info)
def handle_adding_task(update: Update, context: CallbackContext): user = update.message.from_user task = update.message.text current_list = context.user_data["current_list"] if task == "Done": logger.info("User %s finished adding tasks to the list \"%s\"", user.first_name, current_list["name"]) tasks = db.get_tasks(current_list["id"]) update.message.reply_text(format_tasks(tasks), reply_markup=ReplyKeyboardRemove()) return ConversationHandler.END logger.info( "User %s added task \"%s\" to the list \"%s\"", user.first_name, task, current_list["name"], ) db.add_task(task, False, current_list["id"]) update.message.reply_text("Task added.") return conversations["add_tasks"]["handle_tasks"]
def calculate_mahalanobis_distances(self): """ Calculate all the Mahalanobis distances and save them to the file """ with h5py.File(self.patches.filename, "r+") as hf: g = hf.get(self.NAME) if g is None: raise ValueError("The model needs to be saved first") maha = np.zeros(self.patches.shape, dtype=np.float64) for i in tqdm(np.ndindex(self.patches.shape), desc="Calculating mahalanobis distances", total=self.patches.size, file=sys.stderr): maha[i] = self.__mahalanobis_distance__(self.patches[i]) no_anomaly = maha[self.patches.labels == 1] anomaly = maha[self.patches.labels == 2] if g.get("mahalanobis_distances") is not None: del g["mahalanobis_distances"] m = g.create_dataset("mahalanobis_distances", data=maha) m.attrs["max_no_anomaly"] = np.nanmax(no_anomaly) if no_anomaly.size > 0 else np.NaN m.attrs["max_anomaly"] = np.nanmax(anomaly) if anomaly.size > 0 else np.NaN logger.info("Saved Mahalanobis distances to file") return True
def policy_evaluation(environment, policy, gamma=0.9, epsilon=1e-4): # type: (Environment, Dict, float, float) -> np.ndarray iters = 0 action_values = np.zeros(len(environment.states)) while True: iters += 1 new_values = np.zeros(action_values.shape) for s in environment.states: for a_idx, _ in enumerate(environment.get_valid_actions(s)): next_state, reward = environment.step(s, a_idx) # Bellman equation for value function new_values[s] += policy[s][a_idx] * (reward + gamma * action_values[next_state]) # termination criteria: check for convergence diff = np.sum(np.abs(new_values - action_values)) if diff < epsilon: logger.info(f"Policy evaluation converged. Diff={diff:.6f}") break # update previous computed action values action_values = new_values return action_values
def _play_song(self): '''should be called by self.next_song() ONLY''' song = self.doubanfm.current_song l.info(u'playing song: {sid} - {artist} - {title}'.format(**song)) self.ui.labelAlbum.setText(u'<{0}> {1}'.format( song.get('albumtitle'), song.get('public_time') or '')) self.ui.labelTitle.setText(song.get('title')) self.ui.labelTitle.setToolTip(song.get('title')) self.ui.labelArtist.setText(song.get('artist')) if int(song.get('like')) != 0: self.set_ui_state(GUIState.Hearted) else: self.set_ui_state(GUIState.Heart) self.mediaObject.setCurrentSource(Phonon.MediaSource(song.get('url'))) self.mediaObject.play() self.__down_cover() from share import now_playing now_playing(song, channel_id=self.doubanfm.channel_id, channel_name=self.doubanfm.channel_name)
def send_result_service(request): # Log request logger.info('Received request: ' + str(request)) # Declare response response = { 'response_code': None, 'response_msg': None, 'response_body': None } # Validate request # {"user_name" : string, "score": int, "clear_time": double} if ('user_name' not in request or 'score' not in request or 'clear_time' not in request or 'session_id' not in request): app.logger.error('Invalid input values.') logger.error('Got invalid input values. ' + str(response)) response['response_code'] = '1101' response['response_msg'] = 'Invalid input values' return response ##### Send game result to redis or DB send_result = False db_session = dbwork.connect() send_result = dbwork.insert_game_result(db_session, request) # If it is failed to send result... if send_result is False: logger.error('Failed to send result to redis.') logger.error('Need to be implemented.') response['response_code'] = '2201' response['response_msg'] = 'Fail to insert result' return response dbwork.close(db_session) # Make response response['response_code'] = '0000' response['response_msg'] = 'Success!' return response