def main(): args = process_args() os.environ["CUDA_VISIBLE_DEVICES"] = args.cuda if args.seed == -1: RANDOMSEED = None else: RANDOMSEED = args.seed torch.manual_seed(RANDOMSEED) torch.cuda.manual_seed(RANDOMSEED) embedding = 'random' torch.backends.cudnn.deterministic = True config = Config(args, embedding, 'POR') start_time = time.time() print("Loading data...") vocab, train_data, test_data = build_adaptive_dataset(config, args.word) train_iter2 = build_iterator(train_data, config, doubly_flag=False) test_iter = build_test_iterator(test_data, config) print("Time used: ", get_time_dif(start_time)) config.n_vocab = len(vocab) model = POR(config).cuda() init_model(model) print("start training...") train_por(config, model, train_iter, train_iter2, test_iter)
def do_shell(args): config = Config(args) helper = ModelHelper.load(args.model_path) embeddings = load_embeddings(args, helper) config.embed_size = embeddings.shape[1] with tf.Graph().as_default(): logger.info("Building model...",) start = time.time() model = NERModel(helper, config, embeddings) logger.info("took %.2f seconds", time.time() - start) init = tf.global_variables_initializer() saver = tf.train.Saver() with tf.Session() as session: session.run(init) saver.restore(session, model.config.model_output) while True: # Create simple REPL try: sentence = eval(input("input> ")) tokens = sentence.strip().split(" ") for sentence, _, predictions in model.output(session, [(tokens, ["O"] * len(tokens))]): predictions = [LBLS[l] for l in predictions] print_sentence(sys.stdout, sentence, [ ""] * len(tokens), predictions) except EOFError: print("Closing session.") break
def inject_config(): """Injects 'app_config' variable into jinja template, so it can be passed into angular. See base.html""" config_properties = Config.get_all_properties() if auth.is_admin( ) else Config.get_public_properties() app_config = config.CONFIG_DB.to_dict(include=config_properties) app_config['development'] = config.DEVELOPMENT return {'app_config': app_config}
def main(): args = process_args() os.environ["CUDA_VISIBLE_DEVICES"] = args.cuda if args.seed == -1: RANDOMSEED = None else: RANDOMSEED = args.seed torch.manual_seed(RANDOMSEED) torch.cuda.manual_seed(RANDOMSEED) IMP_WEIGHT = args.imp_weight if not IMP_WEIGHT: imp = torch.ones(args.class_num - 1, dtype=torch.float) elif IMP_WEIGHT == 1: pass else: raise ValueError('Incorrect importance weight parameter.') imp = imp.cuda() embedding = 'random' torch.backends.cudnn.deterministic = True config = Config(args, embedding, 'OR') start_time = time.time() print("Loading data...") vocab, train_data, test_data = build_dataset(config, args.word) train_iter = build_iterator(train_data, config, doubly_flag=False) # train_iter = buil_random_iterator(train_data,config) test_iter = build_test_iterator(test_data, config) print("Time used: ", get_time_dif(start_time)) config.n_vocab = len(vocab) model = OR(config).cuda() init_model(model) print("start training...") train_or(config, model, train_iter, test_iter, imp)
def __init__(self, *, name: str): super().__init__(name=name) config = self.config Config().instruments.clear() Config().services.clear() self.setContentsMargins(0, 0, 0, 0) w = QWidget() w.setContentsMargins(0, 0, 0, 0) splitter = Splitter(Qt.Vertical, name=name + '.splitter') splitter.setContentsMargins(0, 0, 0, 0) self.setCentralWidget(splitter) # filter view filter_view = FilterView(name=name + '.filter_view') splitter.addWidget(filter_view) # sound player player = SoundPlayer(name=name + '.sound_player') splitter.addWidget(player) # signal log view signal_log_view = SignalLogView(name=name + '.signal_log_view') # lo.addWidget(signal_log) splitter.addWidget(signal_log_view) # signal slot GlobalSignal().update_prices.connect(self.update_prices) # style sheet self.setStyleSheet('QSplitter::handle{background: white;}') return
def __init__(self, *, config: Config, enabled_instruments: Set[str]): labels = ['time', 'instrument', 'bidder', 'asker', 'sp', 'E', 'O', 'C'] super().__init__(labels=labels, config=config) self.root_config = root_config = config self.config = config = config.setdefault('signal_log', {}) self.resize_row(config.setdefault('max_log_n', self.MAX_LOG_N)) self.enabled_instruments = enabled_instruments height = config.setdefault('height', 200) self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Minimum) self.resize(0, height) width_list = config.setdefault('column_width', [150, 70, 50, 50, 40, 20, 20, 20]) for i, width in enumerate(width_list): self.setColumnWidth(i, width) # signal self.signalers = defaultdict(lambda: Signaler(config=root_config)) # type: Dict[Tuple[str,str,str], Signaler] self.signals = [] self.sound_player = SoundPlayer(root_config=root_config) self.signal_log_file = self.config.setdefault('signal_log_file', './signal_log.txt') # QSignal connect self.cellClicked.connect(self.cell_clicked)
def main(debug=True): config = Config() with tf.Graph().as_default(): lw = layers_and_weights() customs = { 'lw': lw } if debug: config.num_batches = 51 config.num_epochs = 1 p2p_model = P2PE_DNNS(config, **customs) init = tf.global_variables_initializer() writer = tf.summary.FileWriter('./tf_summary/crypto/1') with tf.Session() as sess: sess.run(init) writer.add_graph(sess.graph) for epoch in range(config.num_epochs): for batches in range(config.num_batches): batch_message = DataClass.get_batch_sized_data(config.batch_size, \ config.plain_text_length) if batches % 50 == 0: # write summaries. s = p2p_model.run_batch(sess, batch_message, True) print("Summarizing - " + str((config.num_batches * epoch) + batches)) writer.add_summary(s, (config.num_batches * epoch) + batches) else: res = p2p_model.run_batch(sess, batch_message) if not debug: print(res) return 0
def inject_config(): """Injects 'app_config' variable into jinja template, so it can be passed into angular. See base.html""" config_properties = Config.get_all_properties() if auth.is_admin() else Config.get_public_properties() app_config = config.CONFIG_DB.to_dict(include=config_properties) app_config['development'] = config.DEVELOPMENT return { 'app_config': app_config }
def select_instrument(self, instrument: str): self.tab_bar.rename_current_tab(instrument) fltr = Config().filters.setdefault(instrument, {}) self.spread_spin.setValue(fltr.get('spread', 9.9)) self.duration_spin.setValue(fltr.get('duration', 9.9)) self.service_view.update_instrument(instrument) self.rate_view.update_instrument(instrument) self.accounts_view.update_instrument(instrument)
def main(): with TransactionManager.transaction(): User.init_table() Video.init_table() Config.init_table() print("Succeeded to init db.") return 0
def rename_current_tab(self, name: str): index = self.currentIndex() if index >= self.count() - 1: return self.setTabText(index, name) selected = set(filter(str, [self.tabText(i) for i in range(self.count())])) Config().selected_instruments.clear() Config().selected_instruments.update(selected) Config().update_selected_instruments(selected)
def _post_put_hook(self, future): customer = self.customer.get() feedback_email = Config.get_master_db().feedback_email if customer: if self.status == REQUEST_STATUS['accept']: customer.is_customer = True customer.put() if feedback_email and customer.email: mail.send_mail( sender=feedback_email, to=customer.email, subject=u'[%s] - Ваш запрос одобрен' % (Config.get_master_db().brand_name), body=render_template( 'order/emails/customer_request_accept.txt', comment=self.manager_comment)) if self.status == REQUEST_STATUS['reject']: customer.is_customer = False customer.put() if feedback_email and customer.email: mail.send_mail( sender=feedback_email, to=customer.email, subject=u'[%s] - Ваш запрос отклонен' % (Config.get_master_db().brand_name), body=render_template( 'order/emails/customer_request_reject.txt', comment=self.manager_comment)) if self.status == REQUEST_STATUS['admin']: customer.admin = True customer.is_customer = True customer.put() if feedback_email and customer.email: mail.send_mail( sender=feedback_email, to=customer.email, subject=u'[%s] - Вы - администратор' % (Config.get_master_db().brand_name), body=render_template( 'order/emails/customer_request_admin.txt', comment=self.manager_comment)) if self.status == REQUEST_STATUS['now']: if feedback_email: managers = Manager.query() for manager in managers: if manager.email: mail.send_mail( sender=feedback_email, to=manager.email, subject=u'[%s] - Новый запрос на сотрудничество' % (Config.get_master_db().brand_name), body=render_template( 'order/emails/customer_request.txt', customer_request=self, customer=self.customer.get()))
def __init__(self, *, root_config: Config): super().__init__() self.root_config = root_config self.config = config = root_config.setdefault('window', dict(x=200, y=200, width=440, height=800)) self.move(config['x'], config['y']) self.resize(config['width'], config['height']) self.setContentsMargins(0, 0, 0, 0) w = QWidget() w.setContentsMargins(0, 0, 0, 0) splitter = QSplitter(Qt.Vertical) splitter.setContentsMargins(0, 0, 0, 0) self.setCentralWidget(splitter) enabled_instruments = set() # Price View self.price_view = price_view = PriceView(config=root_config, enabled_instruments=enabled_instruments) # lo.addWidget(price_view) splitter.addWidget(price_view) # CheckBox mute = QCheckBox('Mute') mute.setChecked(root_config.setdefault('mute', False)) check = QCheckBox('NotClosedOnly') check.setChecked(root_config.setdefault('not_closed_only_check', False)) def check_clicked_slot(key): @pyqtSlot(bool) def check_clicked(b): root_config[key] = b return check_clicked mute.clicked.connect(check_clicked_slot('mute')) check.clicked.connect(check_clicked_slot('not_closed_only_check')) w = QWidget() w.setContentsMargins(0, 0, 0, 0) w.setLayout(QHBoxLayout()) w.layout().setContentsMargins(0, 0, 0, 0) w.layout().addStretch() w.layout().addWidget(mute) # , alignment=Qt.AlignRight) w.layout().addWidget(check) # , alignment=Qt.AlignRight) splitter.addWidget(w) # Log Table self.signal_log = signal_log = SignalLogView(config=root_config, enabled_instruments=enabled_instruments) # lo.addWidget(signal_log) splitter.addWidget(signal_log) self.init_menu() self.config_window = None # type: QMainWindow
def generate(master: Image, master_size: int, small_size: int) -> Image: dir_src = Config.get_default_dir() images_sources = FileCheck.get_dir_content(dir_src) images = LoadImages.load_images_with_resize(dir_src, images_sources, small_size, small_size) master_w, master_h = master.size if master_w > master_h: master_w = master_h elif master_h > master_w: master_h = master_w master_h -= master_h % master_size master_w = master_h master.resize((master_w, master_h)) part_size = master_w // master_size master_parts = {} for i in range(master_size): if i not in master_parts: master_parts[i] = {} for j in range(master_size): master_parts[i][j] = np.array( master.crop((part_size * i, part_size * j, part_size * (i + 1), part_size * (j + 1))).getdata()) images_reduce = {} for small_image in Model.resize_all(images.copy(), part_size, part_size): images_reduce[len(images_reduce)] = np.array(small_image.getdata()) links = {} for i, line in master_parts.items(): for j, sub_image in line.items(): best_value = 195075 * part_size * part_size + 1 best_id = None for k, small_image in images_reduce.items(): value = ((sub_image - small_image)**2).sum() if value < best_value: best_value = value best_id = k links[i * master_size + j] = best_id Model.make_final(links, images, master_size, small_size).rotate(-90).save(Config.get_save_path())
def _pre_delete_hook(cls, key): customer_request = key.get() if customer_request and customer_request.customer: feedback_email = Config.get_master_db().feedback_email customer = customer_request.customer.get() if customer and customer.email and feedback_email: mail.send_mail( sender=feedback_email, to=customer.email, subject=u'[%s] - Ваш запрос на сотрудничество был сброшен' % (Config.get_master_db().brand_name), body=render_template( 'order/emails/customer_request_reset.txt'))
def train(self, train_path, save_path=DEFAULT_CONFIG['save_path'], dev_path=None, vectors_path=None, **kwargs): train_dataset = ss_tool.get_dataset(train_path) if dev_path: dev_dataset = ss_tool.get_dataset(dev_path) word_vocab, tag_vocab = ss_tool.get_vocab(train_dataset, dev_dataset) else: word_vocab, tag_vocab = ss_tool.get_vocab(train_dataset) self._word_vocab = word_vocab self._label_vocab = tag_vocab train_iter = ss_tool.get_iterator( train_dataset, batch_size=DEFAULT_CONFIG['batch_size']) config = Config(word_vocab, tag_vocab, save_path=save_path, vector_path=vectors_path, **kwargs) malstm = MaLSTM(config) self._model = malstm optim = torch.optim.Adam(self._model.parameters(), lr=config.lr) loss_func = torch.nn.MSELoss().to(DEVICE) # loss_func = torch.nn.BCEWithLogitsLoss(reduction='mean', pos_weight=torch.tensor(2)).to(DEVICE) for epoch in range(config.epoch): self._model.train() acc_loss = 0 for item in tqdm(train_iter): self._model.zero_grad() left_text = item.texta right_text = item.textb predict_dis = self._model(left_text, right_text) item_loss = loss_func(predict_dis, item.label.type(torch.float32)) acc_loss += item_loss.view(-1).cpu().item() item_loss.backward() optim.step() logger.info('epoch: {}, acc_loss: {}'.format(epoch, acc_loss)) if dev_path: dev_score = self._validate(dev_dataset) logger.info('dev score:{}'.format(dev_score)) adjust_learning_rate( optim, config.lr / (1 + (epoch + 1) * config.lr_decay)) config.save() self._model.save()
def config(key): if flask.request.method == "GET": value = Config.select(key) return flask.Response(response=json.dumps({ "key": key, "value": value })) elif flask.request.method == "POST": value = flask.request.form["value"] Config.insert(key, value) return flask.Response(response=json.dumps({ "key": key, "value": value }))
def test_change_time(self): c = Config() c.time_speed = 1 c.change_time_speed(1) self.assertEquals(c.time_speed, 2) c.time_speed = 1 c.change_time_speed(-1) self.assertEquals(c.time_speed, 0)
def main(parser): user_dic_path = 'userdic.txt' user_dic = _load_users_dict(user_dic_path) #print(users_dic) args = parser.parse_args() model_dir = Path(args.model_dir) model_config = Config(json_path=model_dir / 'config.json') # Vocab & Tokenizer tok_path = "./tokenizer_78b3253a26.model" ptr_tokenizer = SentencepieceTokenizer(tok_path) vocab_file = './kobert_model/kobertvocab_f38b8a4d6d.json' vocab_of_gluonnlp = nlp.vocab.BERTVocab.from_json(open(vocab_file, 'rt').read()) token_to_idx = vocab_of_gluonnlp.token_to_idx vocab = Vocabulary(token_to_idx=token_to_idx) tokenizer = Tokenizer(vocab=vocab, split_fn=ptr_tokenizer, pad_fn=keras_pad_fn, maxlen=model_config.maxlen) # load ner_to_index.json with open(model_dir / "ner_to_index.json", 'rb') as f: ner_to_index = json.load(f) index_to_ner = {v: k for k, v in ner_to_index.items()} # Model model = KobertCRF(config=model_config, num_classes=len(ner_to_index), vocab=vocab) model_state_dict = torch.load('{}/KobertCRF-lr5e-05-bs200/model.state'.format(model_dir)) model.load_state_dict(model_state_dict) model.eval() device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') model.to(device) decoder_from_res = DecoderFromNamedEntitySequence(user_dic=user_dic, tokenizer=tokenizer, index_to_ner=index_to_ner) while(True): input_text = input("입력하세요: ") list_of_input_ids = tokenizer.list_of_string_to_list_of_cls_sep_token_ids([input_text]) #print(list_of_input_ids) x_input = torch.tensor(list_of_input_ids).long() if torch.cuda.is_available(): x_input = x_input.cuda() ## for bert crf list_of_pred_ids = model(x_input) #print(list_of_pred_ids) list_of_ner_word, decoding_ner_sentence = decoder_from_res(input_text, list_of_input_ids=list_of_input_ids, list_of_pred_ids=list_of_pred_ids) ''' for item in list_of_ner_word: if item['word'] in user_dic: print('f**k') item['tag'] = user_dic[item['word']] ''' print("list_of_ner_word:", list_of_ner_word) print("decoding_ner_sentence:", decoding_ner_sentence[6:-5])
def __init__(self): Layer.__init__(self) self.config = Config() self.gallery = Gallery() self.transarctica = director.core.query_mover("Transarctica") self._load_background() self._load_interface()
def main(*args, **kwargs): inputs = Inputs() config = Config() with tf.variable_scope("inference") as scope: m = TextCNN(config, inputs) scope.reuse_variables() mvalid = TextCNN(Config, inputs) init = tf.group(tf.initialize_all_variables(), tf.initialize_local_variables()) sess = tf.Session() coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) sess.run(init) try: index = 0 while not coord.should_stop(): _, loss_value = sess.run([m.train_op, m.cost]) index += 1 print("step: %d, loss: %f" % (index, loss_value)) if index % 5 == 0: accuracy = sess.run(mvalid.validation_op) print("accuracy on validation is:" + str(accuracy)) except tf.errors.OutOfRangeError: print("Done traing:-------Epoch limit reached") except KeyboardInterrupt: print("keyboard interrput detected, stop training") finally: coord.request_stop() coord.join(threads) sess.close() del sess
def play_with_model(mode): # build tf flags tf_config = create_configurations() # process dataset if not os.path.exists(tf_config["save_dir"]) or not os.listdir( tf_config["save_dir"]): sys.stdout.write( "No preprocessed dataset found, create from {} raw data...\n". format(tf_config["dataset_name"])) sys.stdout.flush() process_europarl(tf_config) # create configurations sys.stdout.write("Load configurations...\n") sys.stdout.flush() config = Config(tf_config) if mode == "train": # prepare training dataset batches sys.stdout.write("Load dataset and create batches...\n") sys.stdout.flush() train_batches, test_batches = batchnize_dataset( tf_config["dataset"], config.batch_size, config.target_dict) # build model and start training sys.stdout.write("Building model...\n") sys.stdout.flush() seq2seq_model = SequenceToSequence(config, mode="train") seq2seq_model.train(train_batches, test_batches, epochs=config.epochs) elif mode == "decode": # build model and start training sys.stdout.write("Building model...\n") sys.stdout.flush() seq2seq_model = SequenceToSequence(config, mode="decode") seq2seq_model.restore_last_session() sys.stdout.write("> ") sys.stdout.flush() top_n = False # if beam search, return all decoded results or just the first one source_language = "french" if tf_config["fr_to_en"] else "english" sentence = sys.stdin.readline() sent_ids = sentence_to_ids(sentence, config.source_dict, source_language, tf_config["lower"], tf_config["keep_number"]) while sentence: predict_ids = seq2seq_model.inference(sent_ids) response = ids_to_sentence( predict_ids, config.rev_target_dict, config.target_dict)[0] # batch_size == 1 print(response) if top_n: print("\n".join(response)) else: print(response[0]) sys.stdout.write("> ") sys.stdout.flush() sentence = sys.stdin.readline() sent_ids = sentence_to_ids(sentence, config.source_dict, source_language, tf_config["lower"], tf_config["keep_number"]) else: raise ValueError( "ERROR: Unknown mode name {}, support modes: (train | decode)". format(mode))
def __init__(self): """initilaizer""" self.config = Config() self.direction = "W" self.is_in_reverse = False self.is_break_released = True self.speed = 0 self.Speed_Regulator = 100 self.Speed_Modifier = 1 self.target_speed = 0 self.train_layout = {} self.cargo_manifest = {} self.storage_cap = {} self.temp_modifiers = {} self.shovel_rate = {"Lignite": 0, "Anthracite": 0} self.refresh_counter = {"Lignite": 0, "Anthracite": 0} self.hpz = 160 self.engine_temp = 0 self.boiler_pressure = 0 self.train_total_weight = 1265 self.in_city = "" self.update_minitrain = False self.current_position = self.config.start_position #{"X": 1, "Y": 1} self.last_event_position = {"X": 1, "Y": 1} self.vis_range = self.config.base_vis_range self.opfor_id = -1 self.proximity_alarm = 0 self.map = None
def __console(self, level, message): # 创建一个FileHandler,用于写到本地 self.logname = os.path.join(Config().path_log(), '%s.log' % time.strftime('%Y_%m_%d')) fh = logging.FileHandler(self.logname, 'a', encoding='utf-8') # 这个是python3的追加模式 fh.setLevel(logging.DEBUG) fh.setFormatter(self.formatter) self.logger.addHandler(fh) # 创建一个StreamHandler,用于输出到控制台 ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) ch.setFormatter(self.formatter) self.logger.addHandler(ch) if level == 'info': self.logger.info(message) elif level == 'debug': self.logger.debug(message) elif level == 'warning': self.logger.warning(message) elif level == 'error': self.logger.error(message) # 这两行代码是为了避免日志输出重复问题 self.logger.removeHandler(ch) self.logger.removeHandler(fh) # 关闭打开的文件 fh.close()
def __init__(self): Layer.__init__(self) self.config = Config() self.gallery = Gallery() self.sound_on_off = 'on' self._load_background() self._load_interface()
def apply(self): text = self.text_edit.toPlainText() f_name = './tmp.yaml' with open(f_name, 'w') as f: f.write(text) config = Config(f_name) self.root_config.update(config)
def _pre_delete_hook(cls, key): customer_request = key.get() if customer_request and customer_request.customer: feedback_email = Config.get_master_db().feedback_email customer = customer_request.customer.get() if customer and customer.email and feedback_email: mail.send_mail( sender=feedback_email, to=customer.email, subject=u'[%s] - Ваш запрос на сотрудничество был сброшен' % ( Config.get_master_db().brand_name ), body=render_template( 'order/emails/customer_request_reset.txt' ) )
def setNewBassByCustomerName(self,customerName): headers = { 'Accept': "application/json", 'Content-Type': "application/json;charset=UTF-8" } # 字典转化为json try: baseDao = BaseDao.BaseDao() config = Config.Config() config.customerName = customerName configDao = ConfigDao() config = configDao.get(config) config = config.__dict__ configJson = json.dumps(config) # 打印字典 print(config) url = host + 'configProperties/add' data = configJson headers = headers result = baseDao.safeRequests(url, data, headers, "") if result.status_code == 200: jsonResult = result.json() if (jsonResult['retcode'] != 0): raise Exception("请求错误,请查看信息:", jsonResult) if jsonResult['data'] != '': return jsonResult['data'] else: print(Exception("错误信息", result.json())) except Exception as e: print(e)
def __init__(self): Layer.__init__(self) self.config = Config() self.gallery = Gallery() self.schedule_interval(self.music_start, interval=7) self.intro_sound = load("music/sound_start.wav", streaming=False) self.intro_sound.play() self.is_event_handler = True background = Sprite(self.gallery.content["display"]["title"]) self.optimal_scale = ( self.config.window_width * self.display_pos_size["display_title"]["W"]) / background.width background.image_anchor = 0, 0 background.scale = self.optimal_scale background.x = self.config.window_width * self.display_pos_size[ "display_title"]["X"] background.y = self.config.window_height * self.display_pos_size[ "display_title"]["Y"] self.left_margin = background.x self.bottom_margin = background.y self.optimal_width = background.width self.optimal_height = background.height self.add(background) place_holder_image = Sprite(self.gallery.content["screen"]["title"]) place_holder_image.position = director.window.width / 2, director.window.height / 2 place_holder_image.scale = self.optimal_scale place_holder_image.do(ScaleBy(5.2, duration=25)) self.add(place_holder_image)
def __init__(self): """initializer""" self.game = None self.config = Config() self.sender = "" self.timestamp = 0 self.TSP = 0 # event handlers events.push_handlers(start_game=self.start_game, sound_switch=self.sound_switch, quick_battle=self.quick_battle, modify_speed=self._change_time) self.intromusic = Player() source = load("music/music_title.wav") self.intromusic.queue(source) self.intromusic.volume=0.4 * self.config.sound_switch
def __init__(self, map_manager, id): """initializer""" gallery = Gallery() self.rotation_vars = {} self.rotation_vars["nomads"] = {} self.rotation_vars["mammoth_herd"] = {} self.rotation_vars["BLANK"] = {} self.rotation_vars["BLANK"]["sprite"] = gallery.content["switch"][ "off"] self.rotation_vars["nomads"]["sprite"] = gallery.content["actor"][ "nomads"] self.rotation_vars["mammoth_herd"]["sprite"] = gallery.content[ "actor"]["mammoth_herd"] Sprite.__init__(self, self.rotation_vars["BLANK"]["sprite"]) self.counter = 0 self.map = map_manager self.roamer = director.core.query_mover("Roamer" + str(id)) self.ID = id self.transarctica = director.core.query_mover("Transarctica") self.driver = TrainDriver() self.driver.target = self self.config = Config() self.anchor = self.config.tile_width / 2, self.config.tile_width / 2 self.next_update = 0 self.max_forward_speed = 100 / self.config.km_per_tile * self.config.tile_width / 3600
def main(*args, **kwargs): inputs = Inputs() print ('inputs shape: %s'%str(inputs.inputs.shape)) config = Config() with tf.variable_scope('inference') as scope: m = TextCNN(config, inputs) scope.reuse_variables() init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) sess = tf.Session() coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) sess.run(init) try: index = 0 while not coord.should_stop() and index<1: _, loss_value = sess.run([m.train_op, m.cost]) index += 1 print ('step: %d, loss: %f'%(index,loss_value)) except tf.errors.OutOfRangeError: print ('Done training: -----Epoch limit reached') except KeyboardInterrupt: print ('keyboard interrput detected, stop training') finally: coord.request_stop() coord.join(threads) sess.close() del sess
def post(self): """Signs out user. Also it sends back config object with public properties, in case signed out user was admin, we want override his config object with new one, since admin config contains even private properties""" auth.signout_user() app_config = config.CONFIG_DB.to_dict(include=Config.get_public_properties()) app_config['development'] = config.DEVELOPMENT return app_config
def _post_put_hook(self, future): customer = self.customer.get() feedback_email = Config.get_master_db().feedback_email if customer and feedback_email: if self.status == ORDER_STATUS['now']: managers = Manager.query(Manager.is_mailable == True) for manager in managers: if manager.email: mail.send_mail( sender=feedback_email, to=manager.email, subject=u'[%s] - Новый заказ на сумму %s' % ( Config.get_master_db().brand_name, self.price ), body=render_template( 'order/emails/order.txt', order=self ), html=render_template( 'order/emails/order.html', order=self ) )
def index(): master_db = Config.get_master_db() if master_db.recaptcha_public_key and master_db.recaptcha_private_key: if not flask.request.headers.getlist("X-Forwarded-For"): ip = flask.request.remote_addr else: ip = flask.request.headers.getlist("X-Forwarded-For")[0] form = FeedbackCaptchaForm(captcha={ 'ip_address': ip, 'public_key': master_db.recaptcha_public_key, 'private_key': master_db.recaptcha_private_key }) use_captcha = True else: form = FeedbackForm() use_captcha = False if form.validate_on_submit(): feedback = Feedback() form.populate_obj(feedback) feedback.put() feedback_email = master_db.feedback_email managers = Manager.query() if feedback_email and managers: subject = u'[%s] Сообщение - %s' % ( master_db.brand_name, form.subject.data ) body = u'%s\n\n%s' % (form.feedback.data, form.email.data) for manager in managers: if manager.email and manager.is_mailable: emails = collect_emails(manager.email) for email in emails: mail.send_mail( sender=master_db.feedback_email, to=email, subject=subject, reply_to=form.email.data or master_db.feedback_email, body=body ) flask.flash(u'Спасибо за Ваш отзыв!', category='success') return flask.redirect(flask.url_for('pages.index')) if not form.errors and current_user_id() > 0: form.email.data = current_user_db().email return flask.render_template( 'feedback/index.html', title=u'Обратная связь', html_class='feedback', form=form, use_captcha=use_captcha )
# -*- coding: utf-8 -*- try: # This part is surrounded in try/except because the this config.py file is # also used in the build.py script which is used to compile/minify the client # side files (*.less, *.coffee, *.js) and is not aware of the GAE from model import Config config_db = Config.get_master_db() BRAND_NAME = config_db.brand_name ANALYTICS_ID = config_db.analytics_id SECRET_KEY = config_db.flask_secret_key PUBNUB_PUBLISH = config_db.pubnub_publish PUBNUB_SUBSCRIBE = config_db.pubnub_subscribe except: pass import os CURRENT_VERSION_ID = os.environ.get('CURRENT_VERSION_ID', None) PRODUCTION = os.environ.get('SERVER_SOFTWARE', '').startswith('Google App Eng') DEVELOPMENT = not PRODUCTION DEBUG = DEVELOPMENT DEFAULT_DB_LIMIT = 64 ################################################################################ # Client modules, also used by the build.py script. ################################################################################ STYLES = [
import coffeescript, os from flask import Flask, request import handler import handlers from handlers import * from model import Config, User app = Flask(__name__) app.debug = True key = Config.getString('secret_key') if key == None: key = os.urandom(24) Config.set('secret_key', ''.join('%02x' % ord(c) for c in key)) else: key = ''.join(chr(int(key[i:i+2], 16)) for i in xrange(0, 48, 2)) app.secret_key = key def reroute(noId, withId): def sub(id=None, *args, **kwargs): try: if id == None: return noId(*args, **kwargs) else: return withId(id, *args, **kwargs) except: import traceback traceback.print_exc() sub.func_name = '__reroute_' + noId.func_name return sub for module, sub in handler.all.items():
def get(self): return CONFIG_DB.to_dict(include=Config.get_all_properties())
import argparse import logging from model import Config from service import BaseSpider parser = argparse.ArgumentParser() parser.add_argument('--base-dir', help='The directory where the results file will be written') parser.add_argument('-v', action='count', default=0, help='Increase the verbosity of log messages') args = parser.parse_args() config = Config() config.verbosity = args.v config.base_dir = args.base_dir log = logging.getLogger("spider") log.addHandler(logging.StreamHandler()) log.setLevel(config.verbosity) class DemoSpider(BaseSpider): def __init__(self, config): super().__init__(config, log) def crawl(self, url=None): # do something data = {"interesting_data": {"oil_price": 92.04}} self.save(data) spider = DemoSpider(config) spider.crawl(url="www.bls.gov")
def _post_put_hook(self, future): customer = self.customer.get() feedback_email = Config.get_master_db().feedback_email if customer: if self.status == REQUEST_STATUS['accept']: customer.is_customer = True customer.put() if feedback_email and customer.email: mail.send_mail( sender=feedback_email, to=customer.email, subject=u'[%s] - Ваш запрос одобрен' % ( Config.get_master_db().brand_name ), body=render_template( 'order/emails/customer_request_accept.txt', comment = self.manager_comment ) ) if self.status == REQUEST_STATUS['reject']: customer.is_customer = False customer.put() if feedback_email and customer.email: mail.send_mail( sender=feedback_email, to=customer.email, subject=u'[%s] - Ваш запрос отклонен' % ( Config.get_master_db().brand_name ), body=render_template( 'order/emails/customer_request_reject.txt', comment = self.manager_comment ) ) if self.status == REQUEST_STATUS['admin']: customer.admin = True customer.is_customer = True customer.put() if feedback_email and customer.email: mail.send_mail( sender=feedback_email, to=customer.email, subject=u'[%s] - Вы - администратор' % ( Config.get_master_db().brand_name ), body=render_template( 'order/emails/customer_request_admin.txt', comment = self.manager_comment ) ) if self.status == REQUEST_STATUS['now']: if feedback_email: managers = Manager.query() for manager in managers: if manager.email: mail.send_mail( sender=feedback_email, to=manager.email, subject=u'[%s] - Новый запрос на сотрудничество' % ( Config.get_master_db().brand_name ), body=render_template( 'order/emails/customer_request.txt', customer_request = self, customer = self.customer.get() ) )