def main(args): # pylint:disable=redefined-outer-name """main: Entry point.""" utils.prepare_dirs(args) torch.manual_seed(args.random_seed) if args.num_gpu > 0: torch.cuda.manual_seed(args.random_seed) #args.network_type always 'cnn' #args.dataset alwyas 'imagenet' dataset = data.image.Image(args.data_path) #path of dataset trnr = trainer.Trainer(args, dataset) if args.mode == 'train': utils.save_args(args) trnr.train() elif args.mode == 'derive': assert args.load_path != "", ("`--load_path` should be given in " "`derive` mode") trnr.derive() elif args.mode == 'test': if not args.load_path: raise Exception("[!] You should specify `load_path` to load a " "pretrained model") trnr.test() elif args.mode == 'single': if not args.dag_path: raise Exception("[!] You should specify `dag_path` to load a dag") utils.save_args(args) trnr.train(single=True) else: raise Exception(f"[!] Mode not found: {args.mode}")
def run_experiment(cfg_dict): device = utils.get_device() expand_cfg(cfg_dict) wandb.login() with wandb.init(project=cfg_dict['project_name'], config=cfg_dict, notes=cfg_dict['run_description']) as wandb_run: cfg = wandb_run.config model = models.make_model(**cfg.model).to(device) model = model.apply(models.init_weights) trainloader = data.make_loader(**cfg.train_dataset) testloader = data.make_loader(**cfg.test_dataset) samples = [testloader.dataset[i][0] for i in range(8)] if wandb_run.name: filename = wandb_run.name else: filename = "checkpoint_" + datetime.date.today().strftime("%d%m%Y") save_path = os.path.join(cfg.save_dir, filename + "_best.pt") train = trainer.Trainer(save_path, device, model, trainloader, testloader, samples, **cfg.trainer) train.train()
def main(args): """main: Entry point.""" utils.prepare_dirs(args) if args.num_gpu > 0: torch.cuda.manual_seed(args.random_seed) if args.network_type == 'rnn': pass elif args.network_type == 'cnn': dataset = data.image.Image(args, args.data_path) else: raise NotImplementedError(f"{args.dataset} is not supported") trnr = trainer.Trainer(args, dataset) if args.mode == 'train': utils.save_args(args) trnr.train() elif args.mode == 'derive': assert args.load_path != "", ("`--load_path` should be given in " "`derive` mode") trnr.derive() else: if not args.load_path: raise Exception("[!] You should specify `load_path` to load a" "pretrained model") trnr.test()
def test_train(self): (x_test, t_test) = gd.get_normed_testdata() # 件数を絞りたいときに使う。以下ならテストデータをランダムに100件だけとりだす。 #(x_test,t_test) = gd.get_normed_testdata_choiced(100) (x_train, t_train) = gd.get_normed_traindata() # 件数を絞りたいときに使う。以下ならテストデータをランダムに1000件だけとりだす。 #(x_train,t_train) = gd.get_normed_traindata_choiced(1000) # 動かすネットワークを構築する network = ai.BokuNet(input_size=784, hidden_size_list=[100, 100, 100, 100, 100, 100], output_size=10, dropout=False, batchnorm=True, pklname='bokuparams.pkl') # ネットワークを使って学習する。epochは学習の実行単位のこと。テストデータが10000件で、batch_sizeが100なら、100回で1epochになる。 trainer = tr.Trainer(network, x_train, t_train, x_test, t_test, epoch=10, batch_size=100, pklname='bokuparams.pkl') trainer.train()
def main(args): # pylint:disable=redefined-outer-name """main: Entry point.""" utils.prepare_dirs(args) torch.manual_seed(args.random_seed) # Add this for the random seed numpy.random.seed(args.random_seed) random.seed(args.random_seed) torch.backends.cudnn.deterministic = True if args.num_gpu > 0: torch.cuda.manual_seed(args.random_seed) if args.network_type == 'rnn': dataset = data.text.Corpus(args.data_path) trnr = trainer.Trainer(args, dataset) elif 'cnn' in args.network_type: dataset = data.image.Image(args) trnr = trainer.CNNTrainer(args, dataset) else: raise NotImplementedError(f"{args.dataset} is not supported") if args.mode == 'train': utils.save_args(args) trnr.train() elif args.mode == 'derive': assert args.load_path != "", ("`--load_path` should be given in " "`derive` mode") trnr.derive() else: if not args.load_path: raise Exception("[!] You should specify `load_path` to load a " "pretrained model") trnr.test()
def test_train(self): #(x_test,t_test) = gd.get_normed_testdata() # 件数を絞りたいときに使う。以下ならテストデータをランダムに100件だけとりだす。 (x_test, t_test) = gd.get_normed_testdata_choiced(1000) #(x_train,t_train) = gd.get_normed_traindata() # 件数を絞りたいときに使う。以下ならテストデータをランダムに10000件だけとりだす。 (x_train, t_train) = gd.get_normed_traindata_choiced(10000) # 動かすネットワークを構築する network = ai.BokuNet(input_size=784, hidden_size_list=[100, 100], output_size=10, dropout=False, batchnorm=False, pklname='twolayers.pkl') # ネットワークを使って学習する。10エポックだけ繰り返す trainer = tr.Trainer(network, x_train, t_train, x_test, t_test, epoch=10, batch_size=100, pklname='twolayers.pkl') trainer.train()
def train(self, speaker_id_worker, train_audio): print("start training") #while not self.please_stop.set(): ###### check if we have to call a tester #TODO: handle an empty gmm_models '''speaker_id_worker,latest_audio_to_test=self.tester_queue.get(block=False) tester=te.Tester() tester.start(speaker_id_worker,latest_audio_to_test,self.gmm_models,self.tester_id_queue) print 'testing done''' ######odas was sure about which speaker is speaking in the audio file, we can directly train on it trainer = tr.Trainer(speaker_id_worker, train_audio, self.gmm_models, self.trainer_gmm_queue) trainer.run() #print 'training done' ####### update self.gmm_models with all models that are in queue id = None print 'starting updating gmm_models' try: id, gmm_model = self.trainer_gmm_queue.get(block=True) #print id, gmm_model except Empty: print 'Empty found' #break pass if id != None: #print id #print gmm_model self.gmm_models[id] = gmm_model
def main(): t = trainer.Trainer() args = t.args params = Params(args.batch_size, args.seq_len, args.model_size) # Initialize dataset dataset = TextDataLoader(args.batch_size, args.src_vocab, args.tgt_vocab, args.src_text, args.tgt_text, params.max_seq_len, args.src_vocab_size, args.tgt_vocab_size, args.sentences_size) enc_inputs, dec_inputs, _, _ = dataset.next_batch() # Model graph, mesh_to_impl, mtf_loss = Transformer(enc_inputs, dec_inputs, params, dataset.src_vocab_size, dataset.tgt_vocab_size, args.strategy, t.num_nodes, t.num_gpus) # Train run_options = tf.RunOptions(report_tensor_allocations_upon_oom=True) config = tf.ConfigProto(allow_soft_placement=False) t.train_model(graph, mesh_to_impl, mtf_loss, dataset, config=config, run_options=run_options)
def main(config): accs = [] models = [] accs_ensemble = [] correct_ensemble = [] pred_prob_ensemble = [] answer_ensemble = [] for _ in range(100): t = trainer.Trainer(config) acc = t.fit() accs.append(acc) print(np.array(accs).mean()) models.append(t) accs_individual, correct, pred_prob, answer = t.evaluate('test', ensemble=True) accs_ensemble.append(accs_individual) correct_ensemble.append(correct) pred_prob_ensemble.append(pred_prob) answer_ensemble.append(answer) init_prob = np.ones_like(pred_prob_ensemble[0]) for p in pred_prob_ensemble: init_prob = init_prob * p guess = init_prob.argmax(1) ensemble_total_accuracy = (guess == answer_ensemble[0]).mean() print('ensemble_accuracy_so_far', ensemble_total_accuracy) pred_prob_ensemble[0].argmax(1) == answer_ensemble[0] #pdb.set_trace() npaccs = np.array(accs) print(npaccs.mean(), npaccs.std(), npaccs.min())
def main(args): # pylint:disable=redefined-outer-name """main: Entry point.""" if args.mode == 'train': logger = utils.get_logger(to_file=True) else: logger = utils.get_logger() utils.prepare_dirs(args, logger) torch.manual_seed(args.random_seed) if args.num_gpu > 0: torch.cuda.manual_seed(args.random_seed) if args.dataset != 'tumor': raise NotImplementedError(f"{args.dataset} is not supported") trnr = trainer.Trainer(args, logger) if args.mode == 'train': utils.save_args(args, logger) trnr.train() elif args.mode == 'derive': assert args.load_path != "", ("`--load_path` should be given in " "`derive` mode") trnr.derive_final() elif args.mode == 'single': if not args.dag_path: raise Exception("[!] You should specify `dag_path` to load a dag") utils.save_args(args, logger) trnr.train(single=True) else: raise Exception(f"[!] Mode not found: {args.mode}")
def players_creator(): print("We will roll our magic dice once. If we will get odd number first " "player will start else second player starts the game.\n" "And the lucky number is...\n") n = randint(1, 10) #time.sleep(3) print(n) if n % 2 == 1: print("First Player starts.\n") player1 = trainer.Trainer() print("Now lets create second Player.\n") player2 = trainer.Trainer() else: print("Second Player starts.\n") player2 = trainer.Trainer() print("Now lets create first Player.\n") player1 = trainer.Trainer()
def main(config) -> None: talk_model = model.Model(config.model.vocab_size) talk_dataloader = dataloader.get_dataloader( config.data.data_category, config.train_conf.batch_size) loss = nn.CrossEntropyLoss() talk_trainer = trainer.Trainer(talk_model, talk_dataloader, loss, config.train_conf.optimizer, config.train_conf.optimizer_lr) talk_trainer.train(epoch=config.train_conf.epoch)
def main(args): torch.manual_seed(args.random_seed) if args.num_gpu > 0: torch.cuda.manual_seed(args.random_seed) dataset = data.text.Corpus('data/ptb') trnr = trainer.Trainer(args, dataset) trnr.train()
def on_select_trainer(self, *args): self.trainer = trainer.Trainer() trainerName = self.trainer_text.get() if (trainerName == "Playful"): self.trainer = playfulTrainer.PlayfulTrainer() if (trainerName == "Playful (Named)"): self.trainer = playfulTrainer.PlayfulTrainer(True) # self.insert_trainer_text("[Trainer switched to: {0}]".format(trainerName)) self.trainer.name = self.drinker_name.get()
def run(config): mytrainer = trainer.Trainer(config) if config['mode'] == 'train': X, Y = dataset.read_data(config['file_path']) mytrainer.add_data(X, Y) mytrainer.train() elif config['mode'] == 'test': mytrainer.test() elif config['mode'] == 'implement': mytrainer.implement()
def __init__(self): self.processInput() trainer = tr.Trainer(self.epsilon, self.algorithm, self.min_epsilon, self.episodes, self.max_steps, self.alpha, self.gamma) bestPolicy = trainer.StartTraining() trainer.GenerateOutput() print("Best policy learned: ", bestPolicy) input("Press enter to see what the agent learned") for _ in range(1000): trainer.test()
def main(**kwargs): # Parse JSON settings file general_params = kwargs['general_params'] num_of_episodes = general_params['num_of_episodes'] mode = general_params['mode'] t = trainer.Trainer(kwargs) if mode['train']: t.train(num_of_episodes=num_of_episodes) else: t.test(checkpoint_filename='checkpoint.pth', time_span=3)
def main(): cfg = config.Config(filename_queue="dataset/flowers.tfrecords", logdir="log_flowers") t = trainer.Trainer(cfg) if not os.path.exists(cfg.sampledir): os.makedirs(cfg.sampledir) _, im = t.sample(20) for i in range(20): imname = os.path.join(cfg.sampledir, str(i + 1) + ".jpg") scipy.misc.imsave(imname, im[i])
def next_action(self, button_no): scenario = self.scenario.get() if scenario == "Initialise": self.get_name(button_no) self.scenario.set("Initialise2") self.buttons_func(3,2) self.button1_text.set("Charmander") self.button2_text.set("Squirtle") elif scenario == "Initialise2": self.get_choice(button_no) self.player = tr.Trainer(self.name, [self.starter_choice], [5], [self.starter_type], 1, 0) self.professor = tr.Trainer("Professor", [self.professor_starter], [5], [self.professor_type], 0, 0) self.scenario.set("Battle") self.buttons_func(1,3) self.button1_text.set("Start battle") elif scenario == "Battle": self.text.set(self.player.battle_init(self.professor)) self.scenario.set("Battle choice") self.buttons_func(2,1) self.button1_text.set("Attack") self.button2_text.set("Use Potion") elif scenario == "Battle choice": result = self.player.battle_round(button_no, self.professor) self.text.set(result[0]) if result[1] == 2: self.scenario.set("End") self.buttons_func(1,2) self.button1_text.set("Thanks for playing") elif scenario == "End": self.master.destroy()
def main(): t = trainer.Trainer() args = t.args lr = 0.01 # Initialize dataset dataset = TextDataLoader(args.batch_size, args.src_vocab, None, args.src_text, None, args.seq_len, args.src_vocab_size, args.tgt_vocab_size, args.sentences_size) inputs, labels, _, _ = dataset.next_batch() # Convert inputs and labels to int32, due to a bug in mtf.one_hot that leads # to TypeError due to type mismatch inputs = tf.cast(inputs, tf.int32) labels = tf.cast(labels, tf.int32) vocab_size = utils.RoundUp(dataset.src_vocab_size, t.num_gpus) print("Vocab size: %d" % vocab_size) params = Params(args.batch_size, vocab_size, args.seq_len, t.num_nodes, t.num_gpus) # Model if args.strategy == 0: import rnnlm_data as rnn elif args.strategy == 1: import rnnlm_opt as rnn elif args.strategy == 2: import rnnlm_gnmt as rnn elif args.strategy == 3: import rnnlm_flexflow as rnn else: assert False graph, mesh_to_impl, mtf_loss = rnn.model(params, inputs, labels) #try: # soft_placement = rnn.model.soft_placement #except AttributeError: # soft_placement = False soft_placement = True # Train run_options = tf.RunOptions(report_tensor_allocations_upon_oom=True) config = tf.ConfigProto(allow_soft_placement=soft_placement, log_device_placement=True) t.train_model(graph, mesh_to_impl, mtf_loss, dataset, config=config, run_options=run_options)