def UpdateNetList(self, state=None, info=None, force_check=False, firstrun=False): if self._Daemon == None: return if not state: state, trash = self._Daemon.GetConnectionStatus() print("state") pp(state) print("Trash: ") pp(trash) if force_check or self._PrevWicdState != state: self.GenNetworkList() ## refresh the network list if info != None: if len(info) > 3: _id = int(info[3]) if _id < len(self._WirelessList): self._WirelessList[_id].UpdateStrenLabel(str(info[2])) self._PrevWicdState = state
def print_dup(self): """ 打印重复图片信息 :return: """ beeprint.pp(self.__dup_name_dict) return self
def feature_outcome_statistics(how_many_documents=-1, db='log_feature'): db = Mongo.getInstance()[db] names = [ collection for collection in db.list_collection_names() if not collection.startswith('system.') ] stats_dict = {} for collection_name in names: c = db[collection_name] start_date = None end_date = None count_true = 0 # for idx, d in enumerate(c.find().sort([("timestamp", pymongo.DESCENDING)])): for idx, d in enumerate(c.find().sort([("_id", pymongo.DESCENDING)])): if idx == 0: start_date = d['timestamp'] count_true += 1 if d['outcome'] == True else 0 if idx == how_many_documents - 1: break stats_dict[collection_name] = { 'name': collection_name, 'number positive outcomes': count_true, 'perc positive outcomes': count_true / idx, 'start date': start_date.strftime("%Y-%m-%d %H:%M"), 'end date': d['timestamp'].strftime("%Y-%m-%d %H:%M"), } for key in stats_dict.keys(): pp(stats_dict[key])
def print_info(self): """ 打印图片信息 :return: """ beeprint.pp(self.__info_dict) return self
def create(self, request): cp = request.data.copy() cp['year'] = request.data['year'] + '-01-01' volume = int(cp['volume']) # generate questions # questions = serializers.serialize("json", QuestionTr.objects.order_by('?')[:volume]) questions = QuestionTr.objects.order_by('?')[:volume].values() question_ids = list(questions.values_list('id', flat=True)) cp['questions'] = str(question_ids) beeprint.pp(cp) serializer = self.get_serializer(data=cp) serializer.is_valid(raise_exception=True) self.perform_create(serializer) exam_id = serializer.data["id"] # Update ExamTr for questions field # s = ExamTr.objects.filter(id=exam_id).first() # s.questions = question_ids # s.save() # Serialize Response Data response_data = { "questions": list(questions), "exam": serializer.data, } # beeprint.pp(response_data) headers = self.get_success_headers(exam_id) return Response(response_data, status=status.HTTP_201_CREATED, headers=headers, content_type='application/json')
def test_complicate_data(output=True): config = Config() config.string_break_method = C._STRING_BREAK_BY_WIDTH config.text_autoclip_enable = False res = pp(values, output, config=config) res += pp(long_text_in_dict, output, config=config) res += pp(long_text_in_list, output, config=config) return res
def send(ctx, phone_number, country_code='86', mode='async', send_mode='sms'): pp(call( 'verification/send', country_code=country_code, phone_number=phone_number, mode=mode, send_mode=send_mode ))
def changeArrows(): pp(plot.layout) arrowRenderers = [ renderer for renderer in plot.renderers if (isinstance(renderer.name, str) and renderer.name.startswith("arrow")) ] for arrowRenderer in arrowRenderers: arrowRenderer.end.line_color = "green"
def file_rm(cls,url): path = "intelligence/{0}".format(url) if not os.path.isdir(path): return shell = "cd {} && rm -rf *.txt && rm -rf tee*.csv && rm -rf *.xml && rm -rf *.html".format(path) subprocess.run(shell,shell=True) pp('清除临时文件')
def pp(var): """Pretty print. Wrapper around beeprint Args: var: some variable """ beeprint.pp(var)
def alternative_pretty_print(self, myobject, prefix_line=None): """ Make object(s) and structure(s) clearer to debug """ if prefix_line is not None: print("PRETTY PRINT [%s]" % prefix_line) from prettyprinter import pprint as pp pp(myobject) return self
def pretty_print(myobject, prefix_line=None): """ Make object(s) and structure(s) clearer to debug """ if prefix_line is not None: print("PRETTY PRINT [%s]" % prefix_line) from beeprint import pp pp(myobject) return
def make(only_xls=False): ################### q = query.get_table_query(RDB_TABLE1) cursor = q.run() # print("SOME", cursor) # HTML STRIPPER if es.indices.exists(index=EL_INDEX0): es.indices.delete(index=EL_INDEX0) es.indices.create(index=EL_INDEX0, body=HTML_ANALYZER) # MULTI INDEX FILTERING if es.indices.exists(index=EL_INDEX1): es.indices.delete(index=EL_INDEX1) es.indices.create(index=EL_INDEX1, body=INDEX_BODY1) logger.info("Created index %s" % EL_INDEX1) # SUGGESTIONS if es.indices.exists(index=EL_INDEX2): es.indices.delete(index=EL_INDEX2) es.indices.create(index=EL_INDEX2, body=INDEX_BODY2) logger.info("Created index %s" % EL_INDEX2) # LEXIQUE if es.indices.exists(index=EL_INDEX3): es.indices.delete(index=EL_INDEX3) es.indices.create(index=EL_INDEX3, body={}) logger.info("Created index %s" % EL_INDEX3) # es.indices.put_mapping( # index=EL_INDEX2, doc_type=EL_TYPE2, body=SUGGEST_MAPPINGS) # print(es.indices.stats(index=EL_INDEX2)) # exit(1) # print(es.indices.stats(index=EL_INDEX1)) # print(es.info()) ################## # READ FROM XLS FILE # dictionary = read_xls(fix_suggest=(not only_xls)) read_xls(fix_suggest=(not only_xls)) ################### count = 0 for doc in cursor: elobj = single_update(doc) if elobj is not None: count += 1 logger.info("[Count %s]\t%s" % (count, elobj['extrait'])) # print("TOTAL", es.search(index=EL_INDEX1)) print("Completed. No images:") pp(noimages.keys())
def DbusDaemonStatusChangedSig(self, state=None, info=None): print("in DbusDaemonStatusChangedSig") """ dbus.UInt32(2L) ['192.168.31.141', 'TP-LINK4G', '88', '0', '72.2 Mb/s'] """ pp(info) self.UpdateNetList(state, info) if info != None: self._Screen.Draw() self._Screen.SwapAndShow()
def save_credentials(user_id, code): code = code[0] user_id = user_id[0] print code print user_id oauth.fetch_access_token(code, get_redirect_url()) # current = yaml.load(open(CREDS_FILE)) user = oauth.token user['id'] = user_id pp(user) save_token(user)
def pair_print(title, v): print(title) print("----") print("**pprint:**\n```python") pprint(v) print("```\n") print("**beeprint:**\n```python") pp(v) print("```\n") print("**pprintpp:**\n```python") ppp(v) print("```\n")
def eval(y_true, y_hat): cm = confusion_matrix(y_true, y_hat) pp(cm) ps = precision_score(y_true, y_hat, average='micro') rs = recall_score(y_true, y_hat, average='micro') f1 = f1_score(y_true, y_hat, average='micro') print("Precision: {:.2f}, Recall: {:.2f} F1 Score: {:.2f}".format( ps, rs, f1))
def main(): # args = parse_args() parser = Parser(input_str) nodes = parser.parse() for n in nodes: beeprint.pp(n, max_depth=10, indent=4) checker = Checker(nodes) checker.check() gen = Generator(nodes, "./deep/nested/shit", True) gen.generate()
def import_students(table, path): if len(list(table.run())) > 0: print "Table not empty" return with open(path) as fp: csvfile = UTF8Recoder(fp, 'utf-8') for r in csv.DictReader(csvfile): # r['name'] = unicode(r['name'].decode('utf-8') r['name'] = r['name'].strip() r['address'] = r['address'].strip() r['id'] = int(r['id'].strip()) pp(r) table.insert(r).run()
def feature_importance(model, x_pipeline): i = model.feature_importances_ extra_attrs = ["rooms_per_hhold", "pop_per_hhold", "bedrooms_per_room"] # encoder = get_pipeline_object(x_pipeline, ["union", "cat_pipeline", "label_binarizer"]) encoder = x_pipeline.get_params()["union__cat_pipeline__label_binarizer"] one_hot_classes = list(np.array(encoder.categories_).ravel()) attrs = num_attribs + extra_attrs + one_hot_classes pp(sorted(zip(i, attrs), reverse=True))
def info(obj: pd.DataFrame): print(type(obj)) if isinstance(obj, np.ndarray): __np_info(obj) df = pd.DataFrame(obj) __df_info(df) return if isinstance(obj, pd.DataFrame): df = pd.DataFrame(obj) __df_info(df) return if isinstance(obj, BaseEstimator): if hasattr(obj, "coef_"): pp(obj.coef_) if hasattr(obj, "feature_importances_"): pp(obj.feature_importances_) pp(obj.__dict__) return if isinstance(obj, dict): pp(obj) return raise ValueError(type(obj))
def test_complicate_data(self): config = Config() config.string_break_method = C._STRING_BREAK_BY_WIDTH config.text_autoclip_enable = False ans = u"" data_path = os.path.join(CUR_SCRIPT_PATH, 'data/all_in_one.txt') with codecs.open(data_path, encoding="utf8") as fp: ans = fp.read() res = pp(values, output=False, config=config) res += pp(df.long_text_in_dict, output=False, config=config) res += pp(df.long_text_in_list, output=False, config=config) self.assertEqual(res, ans)
def test_autoclip_no_room(output=True): config = Config() # config.debug_level = 9 config.max_depth = 2 config.string_break_width = 1 config.string_break_method = C._STRING_BREAK_BY_WIDTH return pp(autoclip_no_room, output, config=config)
def test(): """test all cases""" for name, subcases in CASES.items(): for subname, f in subcases.items(): value = f(make_data()) try: ok = (value['user'] == DATA['user'] and value['tags'] == DATA['tags'] and value['style'] == DATA['style']) except: ok = False if ok: print_item(name, subname, 'OK') else: print('{}:{}'.format(name, subname).center(60, '-')) pp(value)
def the_data(cls,url=''): #用于处理theharvester的生成文件 path = "intelligence/{0}/{0}.html".format(url) try: with open(path,'r',encoding='utf-8') as f: soup = BeautifulSoup(f.read(),'lxml') with open("intelligence/{0}/the_{0}.txt".format(url),"a+") as r: for data in soup.find_all('li'): r.writelines(data.get_text()+'\r\n') subprocess.run('rm %s'%path,shell=True) except: pp('文件不存在:%s'%path)
def test_3lines_clip(output=True): config = Config() config.text_autoclip_enable = True config.text_autoclip_method = C._TEXT_AUTOCLIP_BY_LINE config.string_break_enable = True config.string_break_method = C._STRING_BREAK_BY_WIDTH return pp(clip_by_3_lines, output, config=config)
def test_comma_ending(self): rel_path = 'data/dict/comma.txt' data_path = os.path.join(CUR_SCRIPT_PATH, rel_path) with codecs.open(data_path, encoding='utf8') as fp: ans = fp.read() res = pp({'key': 'aaa,,,'}, output=False) self.assertEqual(ans, res)
def GenList(self): if self._AList == None: return self._MyList = [] self._PsIndex = 0 start_x = 0 start_y = 0 for i, v in enumerate(self._AList): #print(i,v) # (0, dbus.String(u'AddressType')) li = InfoPageListItem() li._Parent = self li._PosX = start_x li._PosY = start_y + i * InfoPageListItem._Height li._Width = Width li._Fonts["normal"] = self._ListFontObj if v == "UUIDs": li._Fonts["small"] = self._ListSm2FontObj else: li._Fonts["small"] = self._ListSmFontObj li.Init(str(v)) li._Flag = v if v == "UUIDs": if len(self._AList[v]) > 1: pp(self._AList[v][0]) sm_text = str(self._AList[v][0]) else: sm_text = "<empty>" else: sm_text = str(self._AList[v]) if sm_text == "0": sm_text = "No" elif sm_text == "1": sm_text = "Yes" sm_text = sm_text[:20] li.SetSmallText(sm_text) li._PosX = 2 self._MyList.append(li)
def fast_get_all(self, keyword, size=5, index=EL_INDEX3, type=EL_TYPE1): args = {'index': index, 'doc_type': type} # args['sort'] = ["sort_string:asc", "sort_number:asc"] # args['from_'] = current - 1 args['from_'] = 0 args['size'] = size args['body'] = { 'query': {"match": {"_all": {"query": keyword}}} } try: out = self._api.search(**args) except Exception as e: logger.error("Failed to execute fast get query\n%s" % e) return None, False pp(out) return out['hits']['hits'], out['hits']['total']
def main(): # config for training config = Config() # config for validation valid_config = Config() valid_config.keep_prob = 1.0 valid_config.dec_keep_prob = 1.0 valid_config.batch_size = 60 # configuration for testing test_config = Config() test_config.keep_prob = 1.0 test_config.dec_keep_prob = 1.0 test_config.batch_size = 1 pp(config) # get data set
def grid_search(model, train_X, train_y): from sklearn.model_selection import GridSearchCV param_grid = [ {'n_estimators': [3, 10, 30, 50, 80], 'max_features': [2, 4, 6, 8, 10]}, {'bootstrap': [False], 'n_estimators': [3, 10, 30, 50, 80], 'max_features': [2, 4, 6, 8, 10]}, ] grid_search = GridSearchCV(model, param_grid, cv=5, n_jobs=os.cpu_count(), scoring="neg_mean_squared_error") grid_search.fit(train_X, train_y) pp(grid_search.best_params_) pp(grid_search.best_estimator_) cvres = grid_search.cv_results_ for mean_score, params in zip(cvres["mean_test_score"], cvres["params"]): print(np.sqrt(-mean_score), params)
def inspect(df): print("\n\nHEAD") pp(df.head()) print("\n\nINFO") pp(df.info()) print("\n\nINCOME_CAT_DIST") pp(df["income_cat"].value_counts() / len(df)) print("\n\nCORR median_house_value") corr_matrix = df.corr() pp(corr_matrix["median_house_value"].sort_values(ascending=False))
def main_processing(args): bp = BuildProcess() bp.create_process() print("====================================") git_rev = bp.get_git_commit() print "Git Commit is %r" % git_rev print("====================================") build_components(git_rev) pp(DeviceComponent.comp_dict) if args.clean: for key, value in DeviceComponent.comp_dict.items(): print "Cleaning Device %s" % key value.clean(bp) else: for key, value in DeviceComponent.comp_dict.items(): print "Building Device %s" % key value.build(bp) bp.terminate_process()
def test_sort_of_string(output=True): config = Config() config.debug_delay = False config.str_display_not_prefix_u = False config.str_display_not_prefix_b = False for sstr in sort_of_string: ints = '' for e in sstr: try: ints += '%d ' % ord(e) except: ints += '%d ' % e print('%40s %s %s' % (ints, repr(sstr), len(sstr))) return pp(sort_of_string, output, config=config)
def test_out_of_range_in_dict(self): config = Config() config.max_depth = 1 ans = "" rel_path = 'data/out_of_range_in_dict.txt' data_path = os.path.join(CUR_SCRIPT_PATH, rel_path) with codecs.open(data_path, encoding='utf8') as fp: ans = fp.read() # delete object id, such as # <definition.NormalClassOldStyle object at 0x7f2d9a61bac8> ans, _ = re.subn("at 0x[\d\w]+", "", ans) res = pp(df.out_of_range_in_dict, output=False, config=config) # delete object id, such as # <definition.NormalClassOldStyle object at 0x7f2d9a61bac8> res, _ = re.subn("at 0x[\d\w]+", "", res) self.assertEqual(res, ans)
def test_recursion(output=True): d = {} d['d'] = d d2 = {'1':1} d2['r'] = {'d2':d2} l = [] l.append(l) recursive_values = [ l, l, # this one should not be treat as recursion d, d2, inst_of_recur_normal, RecurTestRecurClass, ] config = Config() # config.debug_level = 9 return pp(recursive_values, output, config=config)
def test_boundary_break(output=True): config = Config() config.string_break_method = C._STRING_BREAK_BY_WIDTH return pp(v_line_break_boundary, output, config=config)
def test_out_of_range_in_dict(output=True): config = Config() config.max_depth = 1 return pp(out_of_range_in_dict, output, config=config)
def test_dict_ordered_keys(output=True): config = Config() config.dict_ordered_key_enable = True return pp(dict_multi_keys, output, config=config)
def test_inline_repr_out_of_range(output=True): config = Config() config.max_depth = 1 config.string_break_method = C._STRING_BREAK_BY_WIDTH config.string_break_width = 40 return pp(inline_repr, output, config=config)
timeit("json.loads(text)", number=100000, globals={"json": json, "text": text})) print("validr: %.3f" % timeit("f(value)", number=100000, globals={"f": f_validr, "value": value})) print("validr_simple: %.3f" % timeit("f(value)", number=100000, globals={"f": f_validr_simple, "value": value})) print("schema: %.3f" % timeit("f(value)", number=100000, globals={"f": f_schema, "value": value})) def profile(): run("for i in range(10000):\tf(value)") if __name__ == "__main__": cmd = sys.argv[1] if len(sys.argv) > 1 else None if cmd == "-p": profile() elif cmd == "-t": try: from beeprint import pp except: from pprint import pprint as pp pp(f_validr(value)) pp(f_validr_simple(value)) pp(f_schema(value)) else: benchmark()
def test_negative(self): self.assertEqual(pp(-1, output=False), '-1\n') self.assertEqual(pp(-1.1, output=False), '-1.1\n')
def test_string(self): # string literal config = Config() config.str_display_not_prefix_u = False config.str_display_not_prefix_b = False self.assertEqual(pp("plain string", output=False, config=config), "'plain string'\n") # unicode string s = u'unicode string' if pyv == 2: self.assertEqual(pp(s, output=False, config=config), u"u'unicode string'\n") else: self.assertEqual(pp(s, output=False, config=config), u"'unicode string'\n") # utf8 string s = u'utf8 string'.encode('utf-8') if pyv == 2: self.assertEqual(pp(s, output=False, config=config), u"'utf8 string'\n") else: self.assertEqual(pp(s, output=False, config=config), u"b'utf8 string'\n") # gb2312 string s = u'gb2312 string'.encode('gb2312') if pyv == 2: self.assertEqual(pp(s, output=False, config=config), u"'gb2312 string'\n") else: self.assertEqual(pp(s, output=False, config=config), u"b'gb2312 string'\n") # unicode special characters string s = u'\\' if pyv == 2: self.assertEqual(pp(s, output=False, config=config), u"u'\\\\'\n") else: self.assertEqual(pp(s, output=False, config=config), u"'\\\\'\n") # utf8 special characters string s = u'\\'.encode("utf8") if pyv == 2: self.assertEqual(pp(s, output=False, config=config), u"'\\\\'\n") else: self.assertEqual(pp(s, output=False, config=config), u"b'\\\\'\n")
def test_class_last_el(output=True): config = Config() config.instance_repr_enable = False rm = ReprMethodClassNewStyle() nc = NormalClassNewStyle() return pp([rm, nc, rm], output, config=config)
def test_class_inst_repr_enable(output=True): config = Config() inst_repr = [] for c in class_repr: inst_repr.append(c()) return pp(class_repr + inst_repr, output, config=config)
def test_inner_class(output=True): config = Config() return pp(OuterClass, output, config=config)
def test_class(output=True): config = Config() return pp(EmptyClassNewStyle, output, config=config)
def test_tuple_nested(output=True): config = Config() return pp(tuple_nested, output, config=config)
def test_positive(self): self.assertEqual(pp(1, output=False), '1\n') self.assertEqual(pp(1.1, output=False), '1.1\n')
def test_tuple(output=True): config = Config() return pp(tuple_testing, output, config=config)
def test_class_all_repr_disable(output=True): config = Config() config.instance_repr_enable = False return pp(class_repr, output, config=config)
# variables host = 'rdb' db = 'mydb' table = 'mytable' # connect and create r.connect(host=host).repl() if db not in r.db_list().run(): r.db_create(db).run() if table not in r.db(db).table_list().run(): r.db(db).table_create(table).run() # implement actions query = r.db(db).table(table) if len(sys.argv) > 1: command = sys.argv[1] if command == 'changefeed': try: for change in query.changes().run(): print("Subscription update!") pp(change) except Exception as e: print("Table removed or connection aborted") elif command == 'insert': query.insert({'key1': 'value1', 'key2': 'value2'}).run() elif command == 'update': query.filter({'key1': 'value1'}).update({'key1': 'UPDATED!'}).run() elif command == 'clear': r.db_drop(db).run()