def main(): args = get_args() if not args.silent: save_path = os.path.abspath(script_path + args.save_path) if not os.path.exists(save_path):os.mkdir(save_path) save_path = os.path.abspath(save_path + "/" + args.name) if not os.path.exists(save_path):os.mkdir(save_path) preview_path = os.path.abspath(save_path + "/preview") if not os.path.exists(preview_path):os.mkdir(preview_path) dataset = Dataset(args) if args.max_epoch is not None: epoch_iter = dataset.train_data_len // args.batch_size if dataset.train_data_len % args.batch_size != 0:epoch_iter += 1 args.max_iter = args.max_epoch * epoch_iter progress = print_progress(args.max_iter, args.batch_size, dataset.train_data_len) if args.gpu_num != 0: cuda.get_device_from_array(xp.array([i for i in range(args.gpu_num)])).use() model = make_model(args, dataset) netG_opt = make_optimizer(model.netG_0, args.adam_alpha, args.adam_beta1, args.adam_beta2) netD_opt = make_optimizer(model.netD_0, args.adam_alpha, args.adam_beta1, args.adam_beta2) updater = Updater(model, netG_opt, netD_opt, args.n_dis, args.batch_size, args.gpu_num, args.KL_loss_iter, args.KL_loss_conf, args.epoch_decay, args.max_iter) print("==========================================") print("Info:start train") start = time.time() for i in range(args.max_iter): data = toGPU(dataset.next(), args.gpu_num) updater.update(data, dataset.now_epoch) if dataset.now_iter % args.display_interval == 0: elapsed = time.time() - start progress(elapsed, dataset.get_state) np.save(save_path + "/loss_hist.npy", updater.loss_hist) start = time.time() if dataset.now_iter % args.snapshot_interval == 0 and not args.silent: data = dataset.sampling(args.sample_size) sample = sample_generate(model.netG_0, data, args.noise_dim, args.noise_dist) Image.fromarray(sample).save(preview_path + f"/image_{dataset.now_iter:08d}.png") serializers.save_npz(save_path + f"/Generator_{dataset.now_iter:08d}.npz",model.netG_0) serializers.save_npz(save_path + f"/Discriminator_{dataset.now_iter:08d}.npz",model.netD_0) if not args.silent: data = dataset.sampling(args.sample_size) sample = sample_generate(model.netG_0, data, args.noise_dim, args.noise_dist) Image.fromarray(sample).save(preview_path + f"/image_{dataset.now_iter:08d}.png") serializers.save_npz(save_path + f"/Generator_{dataset.now_iter:08d}.npz",model.netG_0) serializers.save_npz(save_path + f"/Discriminator_{dataset.now_iter:08d}.npz",model.netD_0) print("\n\n\n\n==========================================") print("Info:finish train")
if batch_idx == 0: visualize_results(epoch, inputs, outputs) # Save checkpoint. test_loss = test_loss / (batch_idx + 1) writer.add_scalar('test/loss', test_loss, epoch) return test_loss # # main script # # get command-line arguments args = get_args() args.network = 'deep_autoencoder' args.dataset = 'mnist' args.activation = 'tanh' # set random seed for reproducibility torch.manual_seed(args.seed) # choose activation fuction for model act_dict = { 'relu': torch.nn.ReLU(), 'sigmoid': torch.nn.Sigmoid(), 'tanh': torch.nn.Tanh() } act = torch.nn.Sigmoid() if args.activation is None else act_dict[
def select_gcard(args): filename = fs.dirname + "/valid_gcards.txt" with open(filename) as f: content = f.readlines() content = [x.strip("\n") for x in content] for linenum, line in enumerate(content): if not linenum == 0: print("({0}) - {1}".format(linenum, line)) else: print(line + "\n") selection = selector(content) while (selection not in np.arange(1, len(content)) or not (isinstance(selection, int))): print(("\n Selection not in valid range, try again, or " "hit ctrl+c to exit")) selection = selector(content) gcard_selected = content[selection].split(',')[0] print("Gcard for simultions will be {0}".format(gcard_selected)) return gcard_selected if __name__ == "__main__": args = get_args.get_args() select_gcard(args)