def load_data(self): logging.info("加载数据与模型") self.train_set, self.val_set = self_sample_by_class(self.train_file, ratio = 0.85) print(self.train_set.shape) print(self.val_set.shape) transform_fasttext(self.train_set, self.fasttext_train_file, self.column) transform_fasttext(self.val_set, self.fasttext_val_file, self.column)
async def process_update_priority(message: types.Message, state: FSMContext): logging.info("Processing task priority update...") if message.text == "Зеленый": Session = sessionmaker(bind=engine) session = Session() id_ = await state.get_data() id_ = id_["id_"] session.query(Task).filter(Task.id_ == id_).update( {Task.priority: 0}, synchronize_session=False) session.commit() elif message.text == "Желтый": Session = sessionmaker(bind=engine) session = Session() id_ = await state.get_data() id_ = id_["id_"] session.query(Task).filter(Task.id_ == id_).update( {Task.priority: 1}, synchronize_session=False) elif message.text == "Красный": Session = sessionmaker(bind=engine) session = Session() id_ = await state.get_data() id_ = id_["id_"] session.query(Task).filter(Task.id_ == id_).update( {Task.priority: 2}, synchronize_session=False) session.commit() elif message.text == "Cancel": await message.answer("Выхожу из интерфейса.", reply_markup=ReplyKeyboardRemove()) await state.finish() return
def parley(self): if self.first_time: """self.agent.observe( { 'id': 'World', 'text': 'Welcome to the ParlAI Chatbot demo. ' 'You are now paired with a bot - feel free to send a message.' 'Type [DONE] to finish the chat.', } )""" # Need to load persona here. self.first_time = False a = self.agent.get_new_act_message() if a is not None: if '[DONE]' in a['text']: self.episodeDone = True return None else: self.agent.observe(a) response = self.agent.act() logging.info("Reponding to id: " + response[0]['id'] + " \n with : " + response[0]['text']) if response is not None: self.agent.observe(response[0]) self.agent.send_data(response[0])
def create_cluster(ctx, proxy): r"""Idempotently create the cluster defined in [cluster_section]. Section of the configuration is 'cluster' by default. Use the cluster_section option on the profile subcommand to specifiy a different section from the config. E.g.: $> cumulus cluster --cluster_section 'other_section' create Clusters by default will create the profiles they need based on the profile_section passed to the cluster sub-command. E.g.: $> cumulus cluster --profile_section 'other_profile' \\ --cluster_section 'other_cluster' create This will create a profile from the [other_profile] section of the config, and a cluster from the [other_cluster] section. By default running: $> cumulus create cluster will idempotently create a profile from the [profile] section and a cluster from the [cluster] section. """ logging.info('Creating cluster "%s"' % proxy.cluster_name) if proxy.profile is None: ctx.invoke(create_profile) proxy.cluster = proxy.get_cluster_body() logging.info('Finished creating cluster "%s" (%s)' % (proxy.cluster_name, proxy.cluster['_id']))
def main(argv=sys.argv): # LOL FIXING SUM BUGS YO. sys.setrecursionlimit(10000) (opts, args) = parse_options(argv) logging.setLogLevel(level=opts.loglevel) joosc_opts = JooscOptions(opts.stage, opts.include_stdlib == True, opts.print_stdlib == True, opts.directory_crawl == True, opts.clean_output == True) if opts.test: logging.info("TESTING %s" % (opts.test)) # TODO: Decide when to toggle verbose. Old method was insufficient. test.setup_and_run(opts.test, opts.show_errors == True, False, joosc_opts) return 0 if len(args) < 1: logging.info("Nothing to compile.") return 0 joosc(args, joosc_opts)
def load_data(self): ''' 通过对输入的corpus进行加工,得到gensim能用的dictionary和corpus,用于后续训练lda模型 加载数据的训练集trainSet和预测集testSet :return: no ''' if os.path.exists(self.dict_file) == True: logging.info("wait load corpus and dictionary ...") dictionary = corpora.Dictionary.load(self.dict_file) corpus = corpora.MmCorpus(self.corpus_file) self.corpus = corpus self.dictionary = dictionary else: logging.info("please wait create corpus and dictionary...") texts = [[word for word in document.split(' ')] for document in self.raw_corpus] frequency = defaultdict(int) for text in texts: for token in text: frequency[token] += 1 texts = [[token for token in text if frequency[token] > 5] for text in texts] dictionary = corpora.Dictionary(texts) corpus = [dictionary.doc2bow(text) for text in texts] dictionary.save(self.dict_file) corpora.MmCorpus.serialize(self.corpus_file, corpus) self.corpus = corpus self.dictionary = dictionary
def train_feature_model(self): ''' 训练特征提取模型,或者加载特征提取模型 :return: lda模型 ''' if self.feature_mode == 'lda': if os.path.exists(self.feature_model_file): logging.info("load LDA model...") lda = models.LdaModel.load(self.feature_model_file) self.feature_model = lda else: lda = models.LdaModel(self.corpus, id2word=self.dictionary, num_topics=self.feature_num, update_every=0, passes=20) lda.save(self.feature_model_file) self.feature_model = lda elif self.feature_mode == 'lsi': if os.path.exists(self.feature_model_file): logging.info("load LSI model...") lsi = models.LsiModel.load(self.feature_model_file) self.feature_model = lsi else: lsi = models.LsiModel(self.corpus, id2word=self.dictionary, num_topics=self.feature_num) lsi.save(self.feature_model_file) self.feature_model = lsi return True
async def handle_action_choise(message: types.Message, state: FSMContext): logging.info("Handling action choice...") if message.text == "Delete": try: Session = sessionmaker(bind=engine) session = Session() id_ = await state.get_data() id_ = id_["id_"] task_to_delete = session.query(Task).filter(Task.id_ == id_).one() session.delete(task_to_delete) session.commit() if session.query(Task).filter(Task.id_ == id_).count() == 0: await message.answer("Задача успешно удалена!", reply_markup=ReplyKeyboardRemove()) except Exception as e: await message.answer(str(e)) return elif message.text == "Update": keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True) keyboard.row("Date", "Text", "Priority") await message.answer("Выберите, что вы хотите обновить:", reply_markup=keyboard) await ManageTasks.on_update.set() elif message.text == "Cancel": await message.answer("Выхожу из интерфейса.", reply_markup=ReplyKeyboardRemove()) await state.finish() return else: await message.answer("Пожалуйста, выберите действие из списка ниже")
def predict(self): ''' 将预测集的数据通过分类器进行预测,并得到类别结果和概率 :return: ''' y_pred = self.classify_model.predict(self.feature_test_X) y_prob = self.classify_model.predict_proba(self.feature_test_X) logging.info("测试数据预测完成,开始写入结果文件...") result_file = os.path.join( self.result_dir, "{}_{}_{}_{}d_{:.3f}.csv".format(self.feature_mode, self.column, self.classify_mode, self.feature_num, self.best_score)) result_prob_file = os.path.join( self.result_dir, "{}_{}_{}_{}d_{:.3f}_prob.csv".format( self.feature_mode, self.column, self.classify_mode, self.feature_num, self.best_score)) result_string = ['id,class\n'] for id, pred in enumerate(y_pred): string = "{},{}\n".format(id, pred + 1) result_string.append(string) write_data(''.join(result_string), result_file) save_prob_file(y_prob, result_prob_file) logging.info("数据提交完毕,请查看{}...".format(self.result_dir)) return True
async def process_update(message: types.Message, state: FSMContext): logging.info("Processing update value choice...") if message.text == "Date": await ManageTasks.on_update_date.set() await message.answer("Введите новое значение, ", reply_markup=ReplyKeyboardRemove()) elif message.text == "Text": await ManageTasks.on_update_text.set() await message.answer("Введите новое значение", reply_markup=ReplyKeyboardRemove()) elif message.text == "Priority": await ManageTasks.on_update_priority.set() keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True) keyboard.add("Зеленый") keyboard.add("Желтый") keyboard.add("Красный") keyboard.add("Cancel") await message.answer("Выберите новое значение", reply_markup=keyboard) elif message.text == "Cancel": await message.answer("Выхожу из интерфейса.", reply_markup=ReplyKeyboardRemove()) await state.finish() return
def handle_INPUT_EV_EJECT(self): self.emit_message(msgs.MEDIA_ACT_STOP) self.emit_message(msgs.UI_ACT_SHOW_MESSAGE, "Ejecting Disc", "", None) logging.info("ejecting disc") os.system("eject") self.emit_message(msgs.UI_ACT_HIDE_MESSAGE)
def test_sell(amount): result = sell(TEST_BRANCH, TEST_ISDN, amount) if result is None or result.has_error: logging.info(u'Хүсэлт амжилтгүй боллоо.') else: logging.info(u'TAN код илгээгдсэн.')
def __on_receive_description_xml(self, data, a, t, location, uuid, xml): """ Callback for checking the given UPnP device by parsing its description XML. Announces the availability of new devices. """ if (data): xml[0] += data else: # if the device is not in the processing table, it has said # "bye bye" while processing the initialization; in that case # simply ignore it if (not uuid in self.__processing): return del self.__processing[uuid] if (xml[0]): dom = MiniXML(xml[0], _NS_DESCR).get_dom() descr = DeviceDescription(location, dom) # announce availability of device logging.info("[ssdp monitor] device discovered: %s (%s)" \ % (descr.get_friendly_name(), descr.get_device_type())) logging.debug("[ssdp monitor] propagating device: %s" % uuid) self.emit_message(msgs.SSDP_EV_DEVICE_DISCOVERED, uuid, descr)
def __on_idle_timeout(self): """ Reacts on idle timeout and suspends the player. """ logging.info("[mediaplayer] media backend idle timeout") self.__state_machine.send_input(_INPUT_IDLE)
def process_IN_CREATE(self, event): print("Create event:", event.pathname) logging.info("Create event : " + event.pathname) if (os.path.isdir(event.pathname)): watch_manager.add_watch(event.pathname, config.MONTIOR_MASK, rec=True) else: if (os.path.splitext( event.pathname)[1] == config.IMAGE_FORMAT): #这里只用后缀来判断 不严谨 stream_id = get_path_dir(event.pathname, -1) label, percent = getLabel(stream_id, event.pathname) if (label != -1): #发送label logging.info("stream: " + str(stream_id) + " | label " + str(label) + " | percent" + str(percent)) http_notify(config.CLASSIFY_CALLBACK_ADDR, { 'stream_id': stream_id, 'label': label, 'percent': percent }) else: #发送流失败消息 logging.warning("stream " + stream_id + " get -1 from the getLabl") http_notify( config.STREAM_STATUS_CALLBACK_ADDR, { 'stream_id': stream_id, 'code': config.CALLBACK_CODE_ONLINE_ERROR_2 }) if (percent > config.DEFAULT_MIN_PERCENT): os.remove(event.pathname) else: logging.warning("There are error format into! : " + event.pathname)
def handle_HTTPSERVER_SVC_BIND_UDP(self, owner, addr, port): if ((addr, port) in self.__listeners): return "address already in use" try: sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.bind((addr, port)) except: logging.error("[httpserv] error binding to %s:%d\n%s", addr, port, logging.stacktrace()) return "could not bind to address" self.__listeners[(addr, port)] = _Listener(owner, sock, None) t = threading.Thread(target = self.__listen_for_dgram, args = [addr, port, sock, owner]) t.setDaemon(True) t.start() logging.info("[httpserv] bound to UDP %s:%d", addr, port) return ""
def prepare_data(args, random_state=None, num_val_baskets=None, num_test_baskets=None, max_basket_size=np.inf): rng = check_random_state(random_state) ds = load_dataset(dataset_name=args.dataset_name, num_baskets=args.num_baskets, use_metadata=args.use_metadata, random_state=rng, max_basket_size=max_basket_size, input_file=args.input_file) # train / val / test split logging.info("Spliting dataset into train / val / test") num_train_baskets = len(ds.baskets) - num_val_baskets - num_test_baskets train_ds, val_ds, test_ds = ds.split([num_train_baskets, num_val_baskets, num_test_baskets]) logging.info("%i train baskets" % len(train_ds.baskets)) basket_sizes = [len(x) for x in train_ds.baskets] logging.info("number items in training baskets, avg:%d, variance: %d" % (np.mean(basket_sizes), np.var(basket_sizes))) logging.info("%i val baskets" % len(val_ds.baskets)) # build mini-batch generator logging.info("%i test baskets" % len(test_ds.baskets)) train_data_loader = BasketDataLoader(train_ds, batch_size=args.batch_size) return (ds.product_catalog, ds.get_basket_size_buckets(), train_data_loader, val_ds, test_ds)
def test_sell(amount): result = sell(TEST_BRANCH, TEST_ISDN, amount) if result is None or result.has_error: logging.info(u"Хүсэлт амжилтгүй боллоо.") else: logging.info(u"TAN код илгээгдсэн.")
def net_initialize(self, startup_prog=None, pretrain_weights=None, fuse_bn=False, save_dir='.', sensitivities_file=None, eval_metric_loss=0.05): if hasattr(self, 'backbone'): backbone = self.backbone else: backbone = self.__class__.__name__ pretrain_weights = get_pretrain_weights(pretrain_weights, backbone, save_dir) if startup_prog is None: startup_prog = fluid.default_startup_program() self.exe.run(startup_prog) if pretrain_weights is not None: logging.info( "Load pretrain weights from {}.".format(pretrain_weights)) utils.utils.load_pretrain_weights(self.exe, self.train_prog, pretrain_weights, fuse_bn) # 进行裁剪 if sensitivities_file is not None: from .slim.prune_config import get_sensitivities sensitivities_file = get_sensitivities(sensitivities_file, self, save_dir) from .slim.prune import get_params_ratios, prune_program prune_params_ratios = get_params_ratios( sensitivities_file, eval_metric_loss=eval_metric_loss) prune_program(self, prune_params_ratios) self.status = 'Prune'
def save_model(self, save_dir): if not osp.isdir(save_dir): if osp.exists(save_dir): os.remove(save_dir) os.makedirs(save_dir) fluid.save(self.train_prog, osp.join(save_dir, 'model')) model_info = self.get_model_info() model_info['status'] = self.status with open(osp.join(save_dir, 'model.yml'), encoding='utf-8', mode='w') as f: yaml.dump(model_info, f) # 评估结果保存 if hasattr(self, 'eval_details'): with open(osp.join(save_dir, 'eval_details.json'), 'w') as f: json.dump(self.eval_details, f) if self.status == 'Prune': # 保存裁剪的shape shapes = {} for block in self.train_prog.blocks: for param in block.all_parameters(): pd_var = fluid.global_scope().find_var(param.name) pd_param = pd_var.get_tensor() shapes[param.name] = np.array(pd_param).shape with open(osp.join(save_dir, 'prune.yml'), encoding='utf-8', mode='w') as f: yaml.dump(shapes, f) # 模型保存成功的标志 open(osp.join(save_dir, '.success'), 'w').close() logging.info("Model saved in {}.".format(save_dir))
def export_inference_model(self, save_dir): test_input_names = [ var.name for var in list(self.test_inputs.values()) ] test_outputs = list(self.test_outputs.values()) fluid.io.save_inference_model(dirname=save_dir, executor=self.exe, params_filename='__params__', feeded_var_names=test_input_names, target_vars=test_outputs, main_program=self.test_prog) model_info = self.get_model_info() model_info['status'] = 'Infer' # Save input and output descrition of model model_info['_ModelInputsOutputs'] = dict() model_info['_ModelInputsOutputs']['test_inputs'] = [ [k, v.name] for k, v in self.test_inputs.items() ] model_info['_ModelInputsOutputs']['test_outputs'] = [ [k, v.name] for k, v in self.test_outputs.items() ] with open(osp.join(save_dir, 'model.yml'), encoding='utf-8', mode='w') as f: yaml.dump(model_info, f) # The flag of model for saving successfully open(osp.join(save_dir, '.success'), 'w').close() logging.info( "Model for inference deploy saved in {}.".format(save_dir))
def setup_data(self, path): logging.info(f"loading normalized fbdialog data: {path}") for (text, labels, reward, candidates), new_episode in super().setup_data(path): text = self.normalize_replies(text) labels = [self.normalize_replies(l) for l in labels] candidates = [self.normalize_replies(l) for l in labels] yield (text, labels, reward, candidates), new_episode
async def unload(ctx, extension): if "Manager" in [role.name for role in ctx.author.roles]: log.info("Unloading extension %s" % (extension)) client.unload_extension(extension) log.info("Unloaded extension %s" % (extension)) await ctx.send("Successfully unloaded %s!" % (extension)) else: await ctx.send("**Insufficient Permissions:** This command requires permission rank `MANAGER`")
async def on_ready(self): for guild in self.bot.guilds: if guild.id == DBOTS_GUILD: continue info(f"Setting up VoiceContext for guild {guild.name}") ctx = VoiceContext() self.contexts[guild.id] = ctx info("VoiceContext finished setting up")
def build(cls, arguments, dataset): args = arguments.args logging.info("Building model for %s" % (args.scores_file,)) model = cls._build_model_object(arguments, dataset.product_catalog, dataset.max_basket_size, dataset.seed) ofile = cls._load_model(arguments, model, dataset) return model, ofile
def mapper(self, key, values): logging.info("Pre processing " + str(len(values)) + " values") data_processor = self.get_data_processor() data_processor.set_data(values) data_set = data_processor.get_data_set() stats = self.get_pre_processor().calculate(data_set) logging.info("Calculated statistics: " + str(stats)) yield 'stats', stats
def __search(self, key): idx = 0 for freq, name in self.__stations: if (key in name.lower()): self.__list.scroll_to_item(idx + 1) logging.info("search: found '%s' for '%s'" % (name, key)) break idx += 1
def load_model(model_dir): if not osp.exists(osp.join(model_dir, "model.yml")): raise Exception("There's not model.yml in {}".format(model_dir)) with open(osp.join(model_dir, "model.yml")) as f: info = yaml.load(f.read(), Loader=yaml.Loader) status = info['status'] if not hasattr(models, info['Model']): raise Exception("There's no attribute {} in models".format( info['Model'])) model = getattr(models, info['Model'])(**info['_init_params']) if status == "Normal" or \ status == "Prune": startup_prog = fluid.Program() model.test_prog = fluid.Program() with fluid.program_guard(model.test_prog, startup_prog): with fluid.unique_name.guard(): model.test_inputs, model.test_outputs = model.build_net( mode='test') model.test_prog = model.test_prog.clone(for_test=True) model.exe.run(startup_prog) if status == "Prune": from .slim.prune import update_program model.test_prog = update_program(model.test_prog, model_dir, model.places[0]) import pickle with open(osp.join(model_dir, 'model.pdparams'), 'rb') as f: load_dict = pickle.load(f) fluid.io.set_program_state(model.test_prog, load_dict) elif status == "Infer" or \ status == "Quant": [prog, input_names, outputs] = fluid.io.load_inference_model(model_dir, model.exe, params_filename='__params__') model.test_prog = prog test_outputs_info = info['_ModelInputsOutputs']['test_outputs'] model.test_inputs = OrderedDict() model.test_outputs = OrderedDict() for name in input_names: model.test_inputs[name] = model.test_prog.global_block().var(name) for i, out in enumerate(outputs): var_desc = test_outputs_info[i] model.test_outputs[var_desc[0]] = out if 'Transforms' in info: model.test_transforms = build_transforms(info['Transforms']) model.eval_transforms = copy.deepcopy(model.test_transforms) if '_Attributes' in info: for k, v in info['_Attributes'].items(): if k in model.__dict__: model.__dict__[k] = v logging.info("Model[{}] loaded.".format(info['Model'])) return model
def __on_mount_volume(self, arg): ident = arg[0] path = arg[4][7:] label = arg[6] device = arg[11] self.__mounts[ident] = (device, path) logging.info("device mounted: %s at %s", label, path) self.emit_message(msgs.SYSTEM_EV_DRIVE_MOUNTED, label, path)
def __on_unmount_volume(self, ident): dev, path = self.__mounts.get(ident, (None, None)) if (dev and path): logging.info("device unmounted: %s", dev) self.emit_message(msgs.SYSTEM_EV_DRIVE_UNMOUNTED, path) else: logging.info("unspecified device unmounted: %s", ident) self.emit_message(msgs.SYSTEM_EV_DRIVE_UNMOUNTED, path)
def load_model(model_dir): if not osp.exists(osp.join(model_dir, "model.yml")): raise Exception("There's no model.yml in {}".format(model_dir)) with open(osp.join(model_dir, "model.yml")) as f: info = yaml.load(f.read(), Loader=yaml.Loader) status = info['status'] if not hasattr(models, info['Model']): raise Exception("There's no attribute {} in models".format( info['Model'])) model = getattr(models, info['Model'])(**info['_init_params']) if status in ["Normal", "QuantOnline"]: startup_prog = fluid.Program() model.test_prog = fluid.Program() with fluid.program_guard(model.test_prog, startup_prog): with fluid.unique_name.guard(): model.test_inputs, model.test_outputs = model.build_net( mode='test') model.test_prog = model.test_prog.clone(for_test=True) if status == "QuantOnline": print('test quant online') import paddleslim as slim model.test_prog = slim.quant.quant_aware(model.test_prog, model.exe.place, for_test=True) model.exe.run(startup_prog) fluid.load(model.test_prog, osp.join(model_dir, 'model')) if status == "QuantOnline": model.test_prog = slim.quant.convert(model.test_prog, model.exe.place) elif status in ['Infer', 'Quant']: [prog, input_names, outputs] = fluid.io.load_inference_model(model_dir, model.exe, params_filename='__params__') model.test_prog = prog test_outputs_info = info['_ModelInputsOutputs']['test_outputs'] model.test_inputs = OrderedDict() model.test_outputs = OrderedDict() for name in input_names: model.test_inputs[name] = model.test_prog.global_block().var(name) for i, out in enumerate(outputs): var_desc = test_outputs_info[i] model.test_outputs[var_desc[0]] = out if 'test_transforms' in info: model.test_transforms = build_transforms(info['test_transforms']) model.eval_transforms = copy.deepcopy(model.test_transforms) if '_Attributes' in info: for k, v in info['_Attributes'].items(): if k in model.__dict__: model.__dict__[k] = v logging.info("Model[{}] loaded.".format(info['Model'])) return model
def list_aws_instances(proxy): """Print a list of instances on AWS""" print('Listing AWS instances:') headers, data = get_aws_instance_info(proxy) print(tabulate(data, headers=headers)) print('\n') logging.info('Finished listing AWS instances')
def on_data(self, raw_data): if 'nosql' in self.storage: if self.is_tweet(raw_data): tweet_dict = self.to_json(raw_data) self.to_nosql(tweet_dict) else: logging.info(raw_data) if 'datefile' in self.storage: self.update_datefile() self.datefile.write(raw_data) super(MyStreamListener, self).on_data(raw_data)
def cache(): global plugins global ready while True: if ready: if config.debug: logging.info('cacheing plugins...') tempfile = config.plugins_cache + '.tmp' try: open(tempfile, 'wb').write(pickle.dumps(plugins)) os.rename(tempfile, config.plugins_cache) except: logging.info('Error: cache plugins') time.sleep(10)
def read(self, data): # remove trailing line delimiter if data.endswith('\\n'): data = data[0:len(data)-2] if data.find('\t') != -1: key, value = data.split('\t', 1) else: key, value = 0, data # try selecting a character as delimiter that is present in the data delimiter = ',' if value.find(',') == -1: # nope, no commas delimiter = ';' if value.find(';') == -1: # and also no semi-colons, so use whitespace # 'None' is the whitespace delimiter when using 'split' delimiter = None logging.info("Using " + ("whitespace" if delimiter is None else delimiter) + "as delimiter") lines = value.split('\\n') # test if there are any lines # TODO: is it better to return an empty np array here? if len(lines) == 0: return key, None # sometimes a line is corrupted, although it is correctly built # by java record reader # hack for messed up lines arr = [] len_line = len(lines[0].split(delimiter)) for line in lines: temp_line = line.split(delimiter) if len(temp_line) == len_line: arr.append(temp_line) else: sys.stderr.write('discarding line with wrong length:"' + line + '" \n') # convert to numpy array arr = np.array(arr) result = np.zeros(arr.shape) # strip and convert values to float for i in range(arr.shape[0]): for j in range(arr.shape[1]): result[i,j] = float(arr[i,j].strip()) return key, result;
def main(): app_dir = os.path.dirname(__file__) if app_dir: os.chdir(app_dir) logging.info('Graphite Alert Starting...') signal(SIGINT, signal_handler) global metrics metrics = load_metrics() global plugins try: plugins = load_plugins_from_cache() except: plugins = load_plugins(metrics) # start fetch t = Thread(target = fetch) t.setDaemon(True) t.start() # start check t = Thread(target = check) t.setDaemon(True) t.start() # start alert t = Thread(target = alert) t.setDaemon(True) t.start() # start dumps t = Thread(target = cache) t.setDaemon(True) t.start() while True: global ready if ready: run(host = config.listen_host, port = int(config.listen_port)) time.sleep(1)
def read(self, data): key, enc_value = data.split('\t', 1) value = base64.b64decode(enc_value) pos = 0 image_arrs = [] logging.info('decoded number of bytes: ' + str(len(value))) while pos < len(value): image_len = struct.unpack('>i', value[pos:pos+4])[0] pos += 4 logging.info('reading image of length: ' + str(image_len) + '\n') image_arr = skio.imread(io.BytesIO(value[pos:pos + image_len])) logging.info('done reading') image_arrs.append(image_arr) pos += image_len logging.info('Got ' + str(len(image_arrs)) + ' images') return key, image_arrs
def test_sell_confirm(tancode): result = sell_confirm(TEST_BRANCH, TEST_ISDN, tancode) if result is None or result.has_error or not result.trans_id: if result.is_invalid_tan: logging.info("Гүйлгээ амжилтгүй боллоо, таны TAN код буруу байна.") else: logging.info(u"Гүйлгээ амжилтгүй боллоо.") else: logging.info(u"Гүйлгээ амжилттай хийгдлээ, гүйлгээний дугаар %s." % result.trans_id)
def test_balance(): result = get_balance(TEST_ISDN) if result is None or result.has_error or result.balance is None: if result.is_not_found: logging.info(u"Хүсэлт амжилтгүй боллоо, таны CANDY данс идэвхгүй байна.") else: logging.info(u"Хүсэлт амжилтгүй боллоо.") else: logging.info(u"Таны CANDY үлдэгдэл %s байна." % result.balance)
def update_sellings(self): #print BASE_URL + "Selling/" url_opened_sellings_count = BASE_URL + "Selling/is_opened/count/" url_closed_sellings_count = BASE_URL + "Selling/is_closed/count/" url_opened_sellings = BASE_URL + "Selling/is_opened/" url_closed_sellings = BASE_URL + "Selling/is_closed/" update_interface = False # opened sellings result = self.call_remote(url_opened_sellings_count) if result: if self.opened_sellings_count != result[0]['result']: logging.info("Updating opened selling count to:" + str(result[0]['result'])) self.opened_sellings_count = result[0]['result'] result = self.call_remote(url_opened_sellings) self.opened_sellings = [] for i in result: self.opened_sellings.append((i['pk'], i['fields']['ticket'])) logging.info("Current opened sellings: " + str(self.opened_sellings)) update_interface = True # closed sellings result = self.call_remote(url_closed_sellings_count) if result: if self.closed_sellings_count != result[0]['result']: logging.info("Updating closed selling count to:" + str(result[0]['result'])) self.closed_sellings_count = result[0]['result'] result = self.call_remote(url_closed_sellings) self.closed_sellings = [] for i in result: self.closed_sellings.append((i['pk'], i['fields']['ticket'])) logging.info("Current closed sellings: " + str(self.closed_sellings)) update_interface = True if update_interface: self.update_sellings_widgets() return True
def write(self, key, img_list): logging.info('Writing ' + str(len(img_list)) + ' images') byte_stream = io.BytesIO() for img in img_list: # get image bytes temp_stream = io.BytesIO() skio.imsave(temp_stream, img) img_bytes = temp_stream.getvalue() temp_stream.close() # get length of bytes in four bytes img_len = len(img_bytes) logging.info('Writing image of length ' + str(img_len)) len_bytes = bytearray(struct.pack('>i', img_len)) # save length and image bytes to the result byte_stream.write(str(len_bytes)) byte_stream.write(img_bytes) final_bytes = byte_stream.getvalue() byte_stream.close() encoded = base64.b64encode(final_bytes) logging.info('Done writing. Final number of bytes: ' + str(len(final_bytes))) return '%s\t%s' % (key, encoded)
''' Daniel Kronovet [email protected] Code to consume tweets from the Twitter Streaming API. ''' import tweepy import config import keywords from utils import logging from streamlistener import MyStreamListener auth = tweepy.OAuthHandler(config.consumer_key, config.consumer_secret) auth.set_access_token(config.access_token, config.access_token_secret) api = tweepy.API(auth) if __name__ == '__main__': import sys storage = sys.argv[1] logging.info('#' * 32 + 'NEW RUN') myStream = tweepy.Stream(auth=auth, listener=MyStreamListener(storage)) myStream.filter( follow=keywords.users, track=keywords.keywords, stall_warnings=True )
def on_button_selling_list_clicked(self, widget, combo_selling_list): if combo_selling_list.get_active() == -1: logging.info("Getting ticket:" + combo_selling_list.get_active_text()) else: s_id, ticket_id = self.opened_sellings[combo_selling_list.get_active()] logging.info("Getting selling:" + str(s_id))
def reducer(self, key, values): vals = list(values) logging.info("Aggregating " + str(len(vals)) + " statistics") stats = self.get_pre_processor().aggregate(key, vals) logging.info("Aggregated statistics: " + str(stats)) yield key, stats
(opts, args) = parse_options(argv) logging.setLogLevel(level=opts.loglevel) joosc_opts = JooscOptions(opts.stage, opts.include_stdlib == True, opts.print_stdlib == True, opts.directory_crawl == True, opts.clean_output == True) if opts.test: logging.info("TESTING %s" % (opts.test)) # TODO: Decide when to toggle verbose. Old method was insufficient. test.setup_and_run(opts.test, opts.show_errors == True, False, joosc_opts) return 0 if len(args) < 1: logging.info("Nothing to compile.") return 0 joosc(args, joosc_opts) if __name__ == "__main__": try: main() logging.info("compiled successfully") except SystemExit as e: logging.info("exited with %d" % e.code) sys.exit(e.code)
def fit(self, inputs, targets): """ Train net with given data set. :param data_set: Data set for training. n times random sampling for online learning """ split_point = int(len(inputs) * self.split_ratio) data_set = NumericalDataSet(inputs[:split_point], targets[:split_point]) val_in = inputs[split_point:] val_targets = targets[split_point:] prev_layers = None prev_mlp = None self.train_acc_err = [] self.val_acc_err = [] for it in range(self.iterations): # randomly select observations as many times as there are # observations it_error = 0 start = time.time() for _ in range(data_set.get_nr_observations()): input_arr, target_arr = data_set.rand_observation() # feed-forward outputs = self.feedforward(input_arr) current_error = nputils.calc_squared_error(target_arr, outputs[-1]) it_error += current_error # mlp backpropagation and gradient descent mlp_outputs = outputs[-len(self.mlp.arr_layer_sizes):] mlp_deltas = self.mlp.backpropagation(mlp_outputs, target_arr) mlp_weight_updates = self.mlp.calculate_weight_updates(mlp_deltas, mlp_outputs) self.mlp.update_method.perform_update(self.mlp.weights_arr, mlp_weight_updates, current_error) # layer backpropagation and gradient descent # calculate backpropagated error of first mlp layer backprop_error = np.array([[x] for x in np.dot(self.mlp.weights_arr[0], mlp_deltas[0].transpose())]) for layer in reversed(self.layers): backprop_error = layer.backpropagate(backprop_error) # calculate the weight gradients and update the weights for layer in self.layers: layer.calc_gradients() layer.update(self.learning_rate) avg_error = it_error / data_set.nrObservations acc_err = self._accuracy_err(inputs, targets) self.train_acc_err.append(acc_err) #validation error acc_err = self._accuracy_err(val_in, val_targets) self.val_acc_err.append(acc_err) logging.info("Iteration #{} MSE: {}, TrainErr: {:.6f}, ValErr: {:.6f} ({:.2f}s)\n"\ .format(it + 1, avg_error, self.train_acc_err[-1], self.val_acc_err[-1], time.time()-start)) #break cond if it > 3 and val_in is not None and self.val_acc_err[-1] > self.val_acc_err[-4]: # revert self.layers = prev_layers self.mlp = prev_mlp plt.figure() plt.plot(self.train_acc_err) plt.plot(self.val_acc_err) plt.show(block=False) break #prev if it > 0: prev_layers = copy.deepcopy(self.layers) prev_mlp = copy.deepcopy(self.mlp)
Code to consume tweets from the Twitter Streaming API. ''' import argparse import tweepy import secrets import keywords from utils import logging from streamlistener import MyStreamListener ### Command Line Options parser = argparse.ArgumentParser(description='Open connection to Twitter Stream') parser.add_argument('storage', nargs='*', default=['stdout']) auth = tweepy.OAuthHandler(secrets.consumer_key, secrets.consumer_secret) auth.set_access_token(secrets.access_token, secrets.access_token_secret) api = tweepy.API(auth) if __name__ == '__main__': logging.info('NEW RUN ' * 8) args = parser.parse_args() myStream = tweepy.Stream(auth=auth, listener=MyStreamListener(args.storage)) myStream.filter( follow=keywords.users, track=keywords.keywords, stall_warnings=True )