def main(): setup_logging() parser = argparse.ArgumentParser() parser = add_arguments(parser, GQNModel) parser = add_arguments(parser, build_trainer) parser = add_arguments(parser, GQNDataModule) args = parser.parse_args() model = GQNModel(**bind_arguments(args, GQNModel)) trainer = build_trainer(**bind_arguments(args, build_trainer)) if hasattr(trainer.logger.experiment, 'wandb_experiment') and \ hasattr(trainer.logger.experiment.wandb_experiment, 'config'): trainer.logger.experiment.wandb_experiment.config.update( args, allow_val_change=True) # (train_dataloader, test_dataloader) = build_data(**bind_arguments(args, build_data)) # Set update epoch hook for datasets # train_dataset = train_dataloader.dataset # _old_on_train_epoch_start = trainer.on_train_epoch_start # def on_train_epoch_start(*args, **kwargs): # train_dataset.inner.set_epoch(trainer.current_epoch) # _old_on_train_epoch_start(*args, **kwargs) # trainer.on_train_epoch_start = on_train_epoch_start # Start the training datamodule = GQNDataModule(**bind_arguments(args, GQNDataModule)) trainer.fit(model, datamodule=datamodule)
def init_ui(self): uic.loadUi('UI/edit_db.ui', self) super().init_ui() for table_data_type in self.app.TABLE_DATA_CLASSES: table_name = table_data_type.table_name exec(f'self.{table_name} = QTableWidget(self)') current_table: QTableWidget = eval(f'self.{table_name}') # Add, Edit, Delete buttons btn_table_add = QPushButton('Add item', self) btn_table_edit = QPushButton('Edit item', self) btn_table_delete = QPushButton('Delete items', self) # Tab tab = QWidget() layout = QGridLayout() n_cols = 20 layout.addWidget(current_table, 1, 0, 1, n_cols) layout.addWidget(btn_table_add, 0, 0) layout.addWidget(btn_table_edit, 0, 1) layout.addWidget(btn_table_delete, 0, 2) layout.addWidget(QWidget(), 0, n_cols - 1) tab.setLayout(layout) self.tab_widget.addTab(tab, table_name) # Create TableData object table_data = table_data_type(current_table, self.cur) for i, j in zip( enumerate( [btn_table_add, btn_table_edit, btn_table_delete]), [ self.table_add_clicked, self.table_edit_clicked, self.table_delete_clicked ]): # Create permission control i[1].clicked.connect( add_arguments(self.permission_control(j, table_data, i[0]))) # i.clicked.connect(add_arguments(j, table_data)) # Edit if double clicked on item current_table.doubleClicked.connect( add_arguments( self.permission_control(self.table_edit_clicked, table_data, 1))) self.tables.append(table_data) # Update if tab changed def tab_changed(index): self.table_update(self.tables[index]) self.tab_widget.currentChanged.connect(tab_changed) tab_changed(self.tab_widget.currentIndex()) self.btn_back.clicked.connect(self.app.pop)
def predict_answer(model, data, vocab): for batch in data.next_batch(False): src, len_src, trg, len_trg = batch output = model(src, trg, len_src, teacher_forcing_ratio=0.0) _, output = torch.max(output, dim=2) output = output.data.cpu().numpy() src = src.data.cpu().numpy() for i in range(len(src)): a = vocab.convert_to_char(src[i]) print(''.join(a)) b = vocab.convert_to_word(output[i]) print(''.join(b)) if __name__ == '__main__': args = utils.add_arguments() vocab = pickle.load(open(args.dict_file, 'rb')) valid_data = DataGeneration(vocab, args.valid_file, args.batch_size, args.device) model = torch.load( args.model, map_location=lambda storage, loc: storage.cuda(args.device)) model.eval() predict_answer(model, valid_data, vocab)
from Policy_Net import Policy_Net from Value_Net import Value_Net from utils import add_arguments, discounted_rewards import tensorflow as tf import numpy as np import pickle as pkl import argparse import os from Env import Env import os import matplotlib.pyplot as plt os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' parser = argparse.ArgumentParser() add_arguments(parser) args = parser.parse_args() if not os.path.exists("Params"): os.mkdir("Params") with open("./Params/args.pickle", "wb") as f: pkl.dump(args, f) #network parameter sess = tf.Session() feature_depth = args.num_signal_feature num_asset = args.num_asset horizon = args.horizon policy_lr = args.learning_rate_policy_net value_lr = args.learning_rate_value_net
# -*- coding: utf-8 -*- from __future__ import print_function import argparse import random import math import model import utils import gen default_paraphrase = u"Niestety, psy czasem się mylą. Może ich zgubić choćby zapach palącego papierosy." parser = argparse.ArgumentParser(description='Paraphrase generator') utils.add_arguments(parser) model.add_arguments(parser) parser.add_argument("--paraphrase", type=unicode, default=default_paraphrase) parser.add_argument("--favorize-alpha", type=float, default=2.0) args = parser.parse_args() scale = 20 def semantic_distance(x, y): a = set(x.split()) b = set(y.split()) common = a & b if len(common) == 0: return 0.0 c = float(len(common)) S = 0.5 * math.log(len(a) / c, 2) + 0.5 * math.log(len(b) / c, 2)
# -*- coding: utf-8 -*- from __future__ import print_function import argparse import random import math import model import utils import gen default_paraphrase = u"Niestety, psy czasem się mylą. Może ich zgubić choćby zapach palącego papierosy." parser = argparse.ArgumentParser(description='Paraphrase generator') utils.add_arguments(parser) model.add_arguments(parser) parser.add_argument("--paraphrase", type=unicode, default=default_paraphrase) parser.add_argument("--favorize-alpha", type=float, default=2.0) args = parser.parse_args() scale = 20 def semantic_distance(x, y): a = set(x.split()) b = set(y.split()) common = a & b if len(common) == 0: return 0.0 c = float(len(common))