コード例 #1
0
ファイル: main.py プロジェクト: stsaten6/ENAS-cnn
def main(args):
    """main: Entry point."""
    utils.prepare_dirs(args)

    if args.num_gpu > 0:
        torch.cuda.manual_seed(args.random_seed)

    if args.network_type == 'rnn':
        pass
    elif args.network_type == 'cnn':
        dataset = data.image.Image(args, args.data_path)
    else:
        raise NotImplementedError(f"{args.dataset} is not supported")
    trnr = trainer.Trainer(args, dataset)

    if args.mode == 'train':
        utils.save_args(args)
        trnr.train()
    elif args.mode == 'derive':
        assert args.load_path != "", ("`--load_path` should be given in "
                                      "`derive` mode")
        trnr.derive()

    else:
        if not args.load_path:
            raise Exception("[!] You should specify `load_path` to load a"
                            "pretrained model")
        trnr.test()
コード例 #2
0
    def __init__(self, args):
        self.args = args
        self.device = args.device
        self.start_iter = 1
        self.train_iters = args.train_iters
        # coeffs
        self.lambda_A = args.lambda_A
        self.lambda_B = args.lambda_B
        self.lambda_idt = args.lambda_idt

        self.dataloader_A, self.dataloader_B = get_dataloader(args)

        self.D_B, self.G_AB = get_model(args)
        self.D_A, self.G_BA = get_model(args)

        self.criterion_GAN = GANLoss(use_lsgan=args.use_lsgan).to(args.device)
        self.criterion_cycle = nn.L1Loss()
        self.criterion_idt = nn.L1Loss()

        self.optimizer_D = torch.optim.Adam(
            itertools.chain(self.D_B.parameters(), self.D_A.parameters()),
            lr=args.lr, betas=(args.beta1, args.beta2), weight_decay=args.weight_decay)
        self.optimizer_G = torch.optim.Adam(
            itertools.chain(self.G_AB.parameters(), self.G_BA.parameters()),
            lr=args.lr, betas=(args.beta1, args.beta2), weight_decay=args.weight_decay)

        self.logger = self.get_logger(args)
        self.writer = SummaryWriter(args.log_dir)

        save_args(args.log_dir, args)
コード例 #3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--epochs', default=100, type=int, help='epoch number')
    parser.add_argument('--start_epoch', default=0, type=int, help='start epoch number')
    parser.add_argument('-b', '--batch_size', default=4, type=int, help='mini-batch size')
    parser.add_argument('--lr', '--learning_rate', default=1e-4, type=float, help='initial learning rate')
    parser.add_argument('--weight-decay', default=0.0, type=float, help='weight decay')
    parser.add_argument('-c', '--continue', dest='continue_path', type=str, required=False)
    parser.add_argument('--exp_name', default=config.exp_name, type=str, required=False)
    parser.add_argument('--valid', action='store_true')
    parser.add_argument('--style_loss', action='store_true')
    args = parser.parse_args()
    print(args)

    config.exp_name = args.exp_name
    config.make_dir()

    save_args(args, config.log_dir)
    net = network()
    vgg = vgg_for_style_transfer()

    net = torch.nn.DataParallel(net).cuda()
    vgg = torch.nn.DataParallel(vgg).cuda()
    sess = Session(config, net=net)

    train_loader = get_dataloaders(os.path.join(config.data_dir, 'train.json'),
                                   batch_size=args.batch_size, shuffle=True)
    valid_loader = get_dataloaders(os.path.join(config.data_dir, 'val.json'),
                                   batch_size=args.batch_size, shuffle=True)

    if args.continue_path and os.path.exists(args.continue_path):
        sess.load_checkpoint(args.continue_path)

    clock = sess.clock
    tb_writer = sess.tb_writer

    criterion = nn.L1Loss().cuda()

    optimizer = optim.Adam(sess.net.parameters(), args.lr, weight_decay=args.weight_decay)

    scheduler = ReduceLROnPlateau(optimizer, 'min', factor=0.5, patience=10, verbose=True)

    for e in range(args.epochs):
        train_model(train_loader, sess.net, vgg,
                                criterion, optimizer, clock.epoch, tb_writer)
        valid_out = valid_model(valid_loader, sess.net, vgg,
                                criterion, optimizer, clock.epoch, tb_writer)

        tb_writer.add_scalar('train/learning_rate', optimizer.param_groups[-1]['lr'], clock.epoch)
        scheduler.step(valid_out['epoch_loss'])

        if valid_out['epoch_loss'] < sess.best_val_loss:
            sess.best_val_loss = valid_out['epoch_loss']
            sess.save_checkpoint('best_model.pth.tar')

        if clock.epoch % 10 == 0:
            sess.save_checkpoint('epoch{}.pth.tar'.format(clock.epoch))
        sess.save_checkpoint('latest.pth.tar')

        clock.tock()
コード例 #4
0
def main(args):  # pylint:disable=redefined-outer-name
    """main: Entry point."""
    utils.prepare_dirs(args)

    torch.manual_seed(args.random_seed)
    # Add this for the random seed
    numpy.random.seed(args.random_seed)
    random.seed(args.random_seed)
    torch.backends.cudnn.deterministic = True

    if args.num_gpu > 0:
        torch.cuda.manual_seed(args.random_seed)

    if args.network_type == 'rnn':
        dataset = data.text.Corpus(args.data_path)
        trnr = trainer.Trainer(args, dataset)
    elif 'cnn' in args.network_type:
        dataset = data.image.Image(args)
        trnr = trainer.CNNTrainer(args, dataset)
    else:
        raise NotImplementedError(f"{args.dataset} is not supported")

    if args.mode == 'train':
        utils.save_args(args)
        trnr.train()
    elif args.mode == 'derive':
        assert args.load_path != "", ("`--load_path` should be given in "
                                      "`derive` mode")
        trnr.derive()
    else:
        if not args.load_path:
            raise Exception("[!] You should specify `load_path` to load a "
                            "pretrained model")
        trnr.test()
コード例 #5
0
ファイル: main.py プロジェクト: beomwookang/ENAS-pytorch
def main(args):  # pylint:disable=redefined-outer-name
    """main: Entry point."""
    utils.prepare_dirs(args)

    torch.manual_seed(args.random_seed)

    if args.num_gpu > 0:
        torch.cuda.manual_seed(args.random_seed)

    #args.network_type always 'cnn'
    #args.dataset alwyas 'imagenet'
    dataset = data.image.Image(args.data_path)  #path of dataset

    trnr = trainer.Trainer(args, dataset)

    if args.mode == 'train':
        utils.save_args(args)
        trnr.train()
    elif args.mode == 'derive':
        assert args.load_path != "", ("`--load_path` should be given in "
                                      "`derive` mode")
        trnr.derive()
    elif args.mode == 'test':
        if not args.load_path:
            raise Exception("[!] You should specify `load_path` to load a "
                            "pretrained model")
        trnr.test()
    elif args.mode == 'single':
        if not args.dag_path:
            raise Exception("[!] You should specify `dag_path` to load a dag")
        utils.save_args(args)
        trnr.train(single=True)
    else:
        raise Exception(f"[!] Mode not found: {args.mode}")
コード例 #6
0
def main(args):  # pylint:disable=redefined-outer-name
    """main: Entry point."""
    if args.mode == 'train':
        logger = utils.get_logger(to_file=True)
    else:
        logger = utils.get_logger()

    utils.prepare_dirs(args, logger)

    torch.manual_seed(args.random_seed)

    if args.num_gpu > 0:
        torch.cuda.manual_seed(args.random_seed)

    if args.dataset != 'tumor':
        raise NotImplementedError(f"{args.dataset} is not supported")

    trnr = trainer.Trainer(args, logger)

    if args.mode == 'train':
        utils.save_args(args, logger)
        trnr.train()
    elif args.mode == 'derive':
        assert args.load_path != "", ("`--load_path` should be given in "
                                      "`derive` mode")
        trnr.derive_final()

    elif args.mode == 'single':
        if not args.dag_path:
            raise Exception("[!] You should specify `dag_path` to load a dag")
        utils.save_args(args, logger)
        trnr.train(single=True)
    else:
        raise Exception(f"[!] Mode not found: {args.mode}")
コード例 #7
0
 def test_load_save_args(self):
     parser = argparse.ArgumentParser()
     args = parser.parse_args(args=[])
     args.__dict__ = {"name": "test", "foo": "bar"}
     path = os.path.join(TMP, "args")
     ensure_dir(path)
     save_args(args, path)
     args_loaded = load_args(path)
     self.assertEqual(args, args_loaded)
コード例 #8
0
def main():
    input_dim = 6
    spatial_dims = [0, 1, 2]
    args = utils.read_args()

    experiment_dir = utils.get_experiment_dir(args.name, args.run)
    utils.initialize_experiment_if_needed(experiment_dir, args.evaluate)
    # Logger will print to stdout and logfile
    utils.initialize_logger(experiment_dir)

    # Optionally restore arguments from previous training
    # Useful if training is interrupted
    if not args.evaluate:
        try:
            args = utils.load_args(experiment_dir)
        except:
            args.best_tpr = 0.0
            args.nb_epochs_complete = 0  # Track in case training interrupted
            utils.save_args(experiment_dir, args)  # Save initial args

    net = utils.create_or_restore_model(experiment_dir, args.nb_hidden,
                                        args.nb_layer, input_dim, spatial_dims)
    if torch.cuda.is_available():
        net = net.cuda()
        logging.warning("Training on GPU")
        logging.info("GPU type:\n{}".format(torch.cuda.get_device_name(0)))
    criterion = nn.functional.binary_cross_entropy
    if not args.evaluate:
        assert (args.train_file != None)
        assert (args.val_file != None)
        train_loader = construct_loader(args.train_file,
                                        args.nb_train,
                                        args.batch_size,
                                        shuffle=True)
        valid_loader = construct_loader(args.val_file, args.nb_val,
                                        args.batch_size)
        logging.info("Training on {} samples.".format(
            len(train_loader) * args.batch_size))
        logging.info("Validate on {} samples.".format(
            len(valid_loader) * args.batch_size))
        train(net, criterion, args, experiment_dir, train_loader, valid_loader)

    # Perform evaluation over test set
    try:
        net = utils.load_best_model(experiment_dir)
        logging.warning("\nBest model loaded for evaluation on test set.")
    except:
        logging.warning(
            "\nCould not load best model for test set. Using current.")
    assert (args.test_file != None)
    test_loader = construct_loader(args.test_file, args.nb_test,
                                   args.batch_size)
    test_stats = evaluate(net, criterion, experiment_dir, args, test_loader,
                          TEST_NAME)
コード例 #9
0
ファイル: search.py プロジェクト: qinglu0330/FGNAS
def main():
    utils.display_args(args)
    time.sleep(5)
    # setting up reproducibility with selected seed
    torch.manual_seed(args.seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    # setting up the working directory and recording args
    exp_tag = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
    work_dir = os.path.join(args.work_dir, args.dataset, exp_tag)
    os.makedirs("log", exist_ok=True)
    log_path = os.path.join("log", exp_tag + ".log")
    logger = get_logger(log_path)
    res_list = []
    config.DROPOUT = args.dropout
    start = time.time()
    for i in range(args.run):
        logger.info("=" * 50 + f"Run {i+1}" + "=" * 50)
        res = nas(logger)
        res_list.append(res)
    runnint_time = time.time() - start
    result = pd.concat(res_list,
                       axis=0,
                       keys=[f"Run {i}" for i in range(len(res_list))])

    if args.save is True:
        os.makedirs(work_dir, exist_ok=False)
        utils.save_args(args, os.path.join(work_dir, 'args.txt'))
        file_name = os.path.join(work_dir, "result.csv")

        result.to_csv(file_name, index=True)
        logger.info(f"saving result to {file_name}")
        fig_name = os.path.join(work_dir, "progress.png")
        plt.figure()
        avg_reward = result.groupby(
            result.index.get_level_values(1))["reward"].mean()
        plt.plot(avg_reward)
        plt.title("Best reward in {} runs is {:.4f}".format(
            args.run, result["reward"].max()))
        logger.info(f"saving figure to {fig_name}")
        plt.savefig(fig_name)
    logger.info("Best reward in {} runs is {:.4f}".format(
        args.run, result["reward"].max()))

    device = 'cpu' if torch.cuda.is_available() is False else \
        'cuda:{}'.format(args.gpu)
    if args.evaluate is True:
        test_result(args.dataset, res_list, device, logger, args.layers)
    logger.info(f"Total running time {runnint_time}")
    logger.info("*" * 50 + "End" + "*" * 50)
コード例 #10
0
ファイル: main.py プロジェクト: pband1256/GNN_preprocessing
def train(
          net,
          criterion,
          args, 
          experiment_dir, 
          train_loader, 
          valid_loader
          ):
  optimizer = torch.optim.Adamax(net.parameters(), lr=args.lrate)
  scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'max')
  # Nb epochs completed tracked in case training interrupted
  for i in range(args.nb_epochs_complete, args.nb_epoch):
    # Update learning rate in optimizer
    t0 = time.time()
    logging.info("\nEpoch {}".format(i+1))
    logging.info("Learning rate: {0:.3g}".format(args.lrate))
    
    train_stats = train_one_epoch(net,
                                  criterion,
                                  optimizer,
                                  args,
                                  experiment_dir,
                                  train_loader)
    val_stats = evaluate(net, criterion, experiment_dir, args,
                            valid_loader, 'Valid')
                                
    utils.track_epoch_stats(i, args.lrate, 0, train_stats, val_stats, experiment_dir)

    # Update learning rate, remaining nb epochs to train
    scheduler.step(val_stats[0])
    args.lrate = optimizer.param_groups[0]['lr']
    args.nb_epochs_complete += 1

    # Track best model performance
    if (val_stats[0] > args.best_tpr):
      logging.warning("Best performance on valid set.")
      args.best_tpr = float(val_stats[0])
      utils.update_best_plots(experiment_dir)
      utils.save_best_model(experiment_dir, net)
      utils.save_best_scores(i, val_stats[2], val_stats[0], val_stats[1], experiment_dir)

    utils.save_epoch_model(experiment_dir, net)
    utils.save_args(experiment_dir, args)
    logging.info("Epoch took {} seconds.".format(int(time.time()-t0)))
    
    if args.lrate < 10**-6:
        logging.warning("Minimum learning rate reched.")
        break

  logging.warning("Training completed.")
コード例 #11
0
ファイル: randomsearch.py プロジェクト: williamsz/AutoML
def main(args):
	utils.prepare_dirs(args)
	utils.save_args(args)

	dataset = get_dataset(args.dataset)
	random_results = []
	for step in range(args.controller_max_step):
		actions = random_actions(args)
		reward = get_reward(torch.LongTensor(actions), dataset)
		random_results.append(reward)
	top_scores = np.sort(list(set(random_results)))[-10:]

	result_map = {'RandomSearch': top_scores}
	df = pd.DataFrame(data=result_map)
	path = os.path.join(args.model_dir, 'df.csv')
	df.to_csv(path)
コード例 #12
0
def main(args):
    network_creator, env_creator = get_network_and_environment_creator(args)

    utils.save_args(args, args.debugging_folder, file_name=ARGS_FILE)
    logging.info('Saved args in the {0} folder'.format(args.debugging_folder))
    logging.info(args_to_str(args))

    batch_env = ConcurrentBatchEmulator(WorkerProcess, env_creator,
                                        args.num_workers, args.num_envs)
    set_exit_handler(concurrent_emulator_handler(batch_env))
    try:
        batch_env.start_workers()
        learner = PAACLearner(network_creator, batch_env, args)
        learner.set_eval_function(eval_network, learner.network, env_creator,
                                  50, learner.use_rnn)  # args to eval_network
        learner.train()
    finally:
        batch_env.close()
コード例 #13
0
def main(args):
    env_creator = get_environment_creator(args)
    network = create_network(args, env_creator.num_actions, env_creator.obs_shape)

    utils.save_args(args, args.debugging_folder, file_name=ARGS_FILE)
    logging.info('Saved args in the {0} folder'.format(args.debugging_folder))
    logging.info(args_to_str(args))

    #batch_env = SequentialBatchEmulator(env_creator, args.num_envs, init_env_id=1)
    batch_env = ConcurrentBatchEmulator(WorkerProcess, env_creator, args.num_workers, args.num_envs)
    set_exit_handler(concurrent_emulator_handler(batch_env))
    try:
        batch_env.start_workers()
        learner = ParallelActorCritic(network, batch_env, args)
        # evaluation results are saved as summaries of the training process:
        learner.evaluate = lambda network: eval_network(network, env_creator, 10)
        learner.train()
    finally:
        batch_env.close()
コード例 #14
0
def main(args):
    scores_rl = []
    scores_randomforest = []

    for _ in range(3):
        utils.prepare_dirs(args)
        utils.save_args(args)

        # max_step = range(100,1000,100)

        dataset = get_dataset(args.dataset)

        maxscore_randomforest = randomforest(dataset)
        scores_randomforest.append(maxscore_randomforest)
        # scores_autosklearn = auto_sklearn(dataset)
        # scores_tpot = tpot_(dataset)

        trnr = trainer.Trainer(dataset,
                               args.n_tranformers,
                               args.n_scalers,
                               args.n_constructers,
                               args.n_selecters,
                               args.n_models,
                               args.lstm_size,
                               args.temperature,
                               args.tanh_constant,
                               args.save_dir,
                               func_names=func_names,
                               model_dir=args.model_dir,
                               log_step=args.log_step,
                               controller_max_step=args.controller_max_step)
        bset = trnr.train_controller()
        scores_rl.append(bset)
        print(bset)
    print(scores_rl)

    # method_names = ['RandomForest', 'AutoSklearn', 'TPOT', 'RL']
    # result_map = {'RandomForest': scores_randomforest, 'AutoSklearn':scores_autosklearn,
    # 				'TPOT': scores_tpot, 'RL': scores_rl}
    result_map = {'RandomForest': scores_randomforest, 'RL': scores_rl}
    df = pd.DataFrame(data=result_map)
    path = os.path.join(args.model_dir, 'df.csv')
    df.to_csv(path)
コード例 #15
0
def main():
    # load arguments
    args, framework_args = get_args()

    # set seed
    np.random.seed(args.seed)

    # build problem, get initial samples
    problem, true_pfront, X_init, Y_init = build_problem(
        args.problem, args.n_var, args.n_obj, args.n_init_sample,
        args.n_process)
    args.n_var, args.n_obj = problem.n_var, problem.n_obj

    # initialize optimizer
    optimizer = get_algorithm(args.algo)(problem, args.n_iter, args.ref_point,
                                         framework_args)

    # save arguments & setup logger
    save_args(args, framework_args)
    logger = setup_logger(args)
    print(problem, optimizer, sep='\n')

    # initialize data exporter
    exporter = DataExport(optimizer, X_init, Y_init, args)

    # optimization
    solution = optimizer.solve(X_init, Y_init)

    # export true Pareto front to csv
    if true_pfront is not None:
        exporter.write_truefront_csv(true_pfront)

    for _ in range(args.n_iter):
        # get new design samples and corresponding performance
        X_next, Y_next = next(solution)

        # update & export current status to csv
        exporter.update(X_next, Y_next)
        exporter.write_csvs()

    # close logger
    if logger is not None:
        logger.close()
コード例 #16
0
ファイル: main.py プロジェクト: yiqisetian/ENAS-pytorch
def main(args):  # pylint:disable=redefined-outer-name
    """main: Entry point."""
    utils.prepare_dirs(args)  #data_dir="./data/ptb"

    torch.manual_seed(args.random_seed)

    if args.num_gpu > 0:
        #Sets the seed for generating random numbers for the current GPU. It’s safe to call this function if CUDA is not available; in that case, it is silently ignored.
        torch.cuda.manual_seed(args.random_seed)

    if args.network_type == 'rnn':
        dataset = data.text.Corpus(args.data_path)  #将文本数据读入字典,生成词对应的序号的Tensor
    elif args.dataset == 'cifar':
        dataset = data.image.Image(args.data_path)
    else:
        raise NotImplementedError("{}is not supported".format(args.dataset))

    trnr = trainer.Trainer(args, dataset)

    if args.mode == 'train':
        utils.save_args(args)
        trnr.train()
    elif args.mode == 'derive':
        assert args.load_path != "", (
            "`--load_path` should be given in `derive` mode")
        trnr.derive()
    elif args.mode == 'test':
        if not args.load_path:
            raise Exception(
                "[!] You should specify `load_path` to load a pretrained model"
            )
        trnr.test()
    elif args.mode == 'single':
        if not args.dag_path:
            raise Exception("[!] You should specify `dag_path` to load a dag")
        utils.save_args(args)
        trnr.train(single=True)
    else:
        raise Exception("[!] Mode not found: {}".format(args.mode))
コード例 #17
0
def main(args):  # pylint:disable=redefined-outer-name
    """main: Entry point."""
    utils.prepare_dirs(args)

    torch.manual_seed(args.random_seed)

    if args.num_gpu > 0:
        torch.cuda.manual_seed(args.random_seed)

    if args.network_type == 'rnn':
        dataset = data.text.Corpus(args.data_path)
    elif args.dataset == 'cifar':
        #dataset = data.image.Image(args.data_path)
        dataset = data.image.Image(args)
    else:
        raise NotImplementedError(f"{args.dataset} is not supported")

    trnr = trainer.Trainer(args, dataset)

    if args.mode == 'train':
        utils.save_args(args)
        trnr.train()
    elif args.mode == 'derive':
        assert args.load_path != "", ("`--load_path` should be given in "
                                      "`derive` mode")
        trnr.derive()
    elif args.mode == 'test':
        if not args.load_path:
            raise Exception("[!] You should specify `load_path` to load a "
                            "pretrained model")
        trnr.test()
    elif args.mode == 'single':
        if not args.dag_path:
            raise Exception("[!] You should specify `dag_path` to load a dag")
        utils.save_args(args)
        trnr.train(single=True)
    else:
        raise Exception(f"[!] Mode not found: {args.mode}")
コード例 #18
0
def main(args):  # pylint:disable=redefined-outer-name
    """main: Entry point."""

    utils.prepare_dirs(args)

    torch.manual_seed(args.random_seed)

    trnr = trainer.Trainer(args)

    if args.mode == 'train':
        utils.save_args(args)
        trnr.train()
    elif args.mode == 'derive':
        assert args.load_path != "", ("`--load_path` should be given in "
                                      "`derive` mode")
        trnr.derive()
    elif args.mode == 'single':
        if not args.dag_path:
            raise Exception("[!] You should specify `dag_path` to load a dag")
        utils.save_args(args)
        trnr.train(single=True)
    else:
        raise Exception(f"[!] Mode not found: {args.mode}")
コード例 #19
0
def main(args):
    tt_logger = TestTubeLogger(save_dir=args.log_path,
                               name="",
                               description=args.description,
                               debug=False,
                               create_git_tag=args.git_tag)
    tt_logger.experiment

    log_dir = Path(tt_logger.save_dir) / f"version_{tt_logger.version}"

    checkpoint_dir = log_dir / "checkpoints"
    os.makedirs(checkpoint_dir, exist_ok=True)
    chkpt_callback = ModelCheckpoint(
        checkpoint_dir,
        #  monitor='Loss/val_loss',
        save_last=True,
        #  mode='min',
        #  save_top_k=1,
        period=5)

    data_loader = CustomDataLoader.from_argparse_args(args, )

    model = Pipeline.from_argparse_args(args, )

    save_args(args, log_dir)

    trainer = Trainer.from_argparse_args(
        args,
        logger=tt_logger,
        checkpoint_callback=chkpt_callback,
        #   early_stop_callback=False,
        weights_summary='full',
        gpus=1,
        profiler=True,
    )

    trainer.fit(model, data_loader)
コード例 #20
0
def main(args):  # pylint:disable=redefined-outer-name
    """main: Entry point."""
    utils.prepare_dirs(args)

    torch.manual_seed(args.random_seed)

    if args.num_gpu > 0:
        torch.cuda.manual_seed(args.random_seed)

    if args.network_type == 'rnn':
        assert args.network_type in ('rnn', 'cnn')
        dataset = data.text.Corpus(args.data_path)
    elif args.dataset == 'cifar':
        args.network_type in ('rnn', 'cnn')
        dataset = data.image.Image(args)
    else:
        args.network_type not in ('rnn', 'cnn')
        dataset = data.tabular.Tabular(args)

    if args.network_type in ('rnn', 'cnn'):
        trnr = trainer.Trainer(args, dataset)
    else:
        trnr = trainer2.Trainer(args, dataset)

    if args.mode == 'train':
        utils.save_args(args)
        trnr.train()
    elif args.mode == 'derive':
        assert args.load_path != "", ("`--load_path` should be given in "
                                      "`derive` mode")
        trnr.derive()
    else:
        if not args.load_path:
            raise Exception("[!] You should specify `load_path` to load a "
                            "pretrained model")
        trnr.test()
コード例 #21
0
def main(args):
    tt_logger = TestTubeLogger(save_dir=args.log_path,
                               name="",
                               description=args.description,
                               create_git_tag=args.git_tag,
                               debug=args.debug)
    tt_logger.experiment

    log_dir = Path(tt_logger.save_dir) / f"version_{tt_logger.version}"

    checkpoint_dir = log_dir / "checkpoints"
    os.makedirs(checkpoint_dir, exist_ok=True)
    chkpt_callback = ModelCheckpoint(
        checkpoint_dir,
        monitor='Loss/val_loss/data_loader_idx_2',
        save_last=True,
        mode='min',
        save_top_k=10,
    )

    data_module = ReIDDataModule.from_argparse_args(args)

    model = ST_ReID(data_module.num_classes,
                    learning_rate=args.learning_rate,
                    criterion=args.criterion,
                    rerank=args.rerank)

    save_args(args, log_dir)

    trainer = Trainer.from_argparse_args(args,
                                         logger=[tt_logger],
                                         checkpoint_callback=chkpt_callback,
                                         profiler=True)

    trainer.fit(model, data_module)
    trainer.test(model)
コード例 #22
0
ファイル: main.py プロジェクト: prasadseemakurthi/dsr_rl
def experiment(config):
    """
    A function that runs an experiment.

    args
        config (dict) hyperparameters and experiment setup
    """
    with tf.Session() as sess:

        seed = config.pop('seed')

        if seed:
            seed = int(seed)
            random.seed(seed)
            tf.set_random_seed(seed)
            np.random.seed(seed)

        env_id = config.pop('env_id')
        LOGGER.info('using {} env'.format(env_id))

        env = gym.make(env_id)

        global_rewards = []
        global_step, episode = 0, 0

        config['env'] = env
        config['env_repr'] = repr(env)
        config['sess'] = sess

        render = int(config.pop('render'))

        agent = Agent(**config)

        rl_writer = tf.summary.FileWriter('./results/rl')
        save_args(config, 'results/args.txt')

        while global_step < config['total_steps']:
            episode += 1
            done = False
            rewards, actions = [], []
            observation = env.reset()

            while not done:
                global_step += 1

                # if episode % 1 == render:
                env.render()
                action = agent.act(observation)
                next_observation, reward, done, info = env.step(action)
                agent.remember(observation, action, reward, next_observation,
                               done)
                train_info = agent.learn()

                rewards.append(reward)
                actions.append(action)
                observation = next_observation

            ep_rew = sum(rewards)
            global_rewards.append(ep_rew)
            avg_reward = sum(global_rewards[-100:]) / len(
                global_rewards[-100:])

            if episode % 10 == 0:
                log_str = ' step {:.0f} ep {:.0f} reward {:.1f} avg {:.1f}'
                logging.info(
                    log_str.format(global_step, episode, ep_rew, avg_reward))

            summary = tf.Summary(value=[
                tf.Summary.Value(tag='episode_reward', simple_value=ep_rew)
            ])
            rl_writer.add_summary(summary, episode)
            avg_sum = tf.Summary(value=[
                tf.Summary.Value(tag='avg_last_100_ep',
                                 simple_value=avg_reward)
            ])
            rl_writer.add_summary(avg_sum, episode)
            rl_writer.flush()

    return config
コード例 #23
0
                    help='weight decay')
parser.add_argument('--adam-beta1',
                    type=float,
                    default=0.9,
                    metavar='',
                    help='Beta 1 parameter for Adam')
parser.add_argument('--adam-beta2',
                    type=float,
                    default=0.999,
                    metavar='',
                    help='Beta 2 parameter for Adam')

args = parser.parse_args()
random.seed(args.manual_seed)
torch.manual_seed(args.manual_seed)
utils.save_args(args)

print('\n\n****** Creating {} model ******'.format(args.net_type))
setup = Model(args)
print("model created successfully!")
print('\n\n****** Preparing {} dataset *******'.format(args.dataset_train))
dataloader = Dataloader(args, setup.input_size)
loader_train, loader_test = dataloader.create()
print('data prepared successfully!')

# initialize model:
if args.resume is None:
    model = setup.model
    model.apply(utils.weights_init)
    train = setup.train
    test = setup.test
コード例 #24
0
if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    # data
    parser.add_argument('--imsize', type=int, default=64)
    parser.add_argument('--n_workers', type=int, default=2)
    # model
    parser.add_argument('--nz', type=int, default=100)
    parser.add_argument('--nc', type=int, default=3)
    parser.add_argument('--ngf', type=int, default=64)
    parser.add_argument('--ndf', type=int, default=64)
    # training
    parser.add_argument('--device', type=str, default='cuda:0')
    parser.add_argument('--batch_size', type=int, default=64)
    parser.add_argument('--epochs', type=int, default=100)
    parser.add_argument('--lr', type=float, default=0.0002)
    parser.add_argument('--beta1', type=float, default=0.5)
    parser.add_argument('--beta2', type=float, default=0.999)
    # log
    parser.add_argument('--log_freq', type=int, default=100)
    parser.add_argument('--output_dir', type=str, default='./results')

    args, unknown_args = parser.parse_known_args()

    args.output_dir = './results/exp1.0'

    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

    save_args(args.output_dir, args)
    Experiment(args).run()
コード例 #25
0
def main(args):
    device = gpu_setup(args.gpu)
    append_args = ["dataset", "model"]
    checkpoint_dir = savedir_setup(args.savedir,
                                   args=args,
                                   append_args=append_args,
                                   basedir=args.saveroot)

    args.githash = check_githash()
    save_args(checkpoint_dir, args)

    dataloader = setup_dataloader(
        name=args.dataset,
        batch_size=args.batch,
        num_workers=args.workers,
    )

    dataset_size = len(dataloader.dataset)
    print("number of images (dataset size): ", dataset_size)

    model = setup_model(args.model,
                        dataset_size=dataset_size,
                        resume=args.resume,
                        biggan_imagenet_pretrained_model_path=args.pretrained)
    model.eval()
    #this has to be eval() even if it's training time
    #because we want to fix batchnorm running mean and var
    #still tune batchnrom scale and bias that is generated by linear layer in biggan

    optimizer, scheduler = setup_optimizer(
        model,
        lr_g_linear=args.lr_g_l,
        lr_g_batch_stat=args.lr_g_batch_stat,
        lr_bsa_linear=args.lr_bsa_l,
        lr_embed=args.lr_embed,
        lr_class_cond_embed=args.lr_c_embed,
        step=args.step,
        step_facter=args.step_facter,
    )

    criterion = AdaBIGGANLoss(
        scale_per=args.loss_per,
        scale_emd=args.loss_emd,
        scale_reg=args.loss_re,
        normalize_img=args.loss_norm_img,
        normalize_per=args.loss_norm_per,
        dist_per=args.loss_dist_per,
    )

    #start trainig loop
    losses = AverageMeter()
    print_freq = args.print_freq
    eval_freq = args.eval_freq
    save_freq = eval_freq
    max_iteration = args.iters
    log = {}
    log["log"] = []
    since = time.time()

    iteration = 0
    epoch = 0
    #prepare model and loss into device
    model = model.to(device)
    criterion = criterion.to(device)
    while (True):
        # Iterate over dataset (one epoch).
        for data in dataloader:
            img = data[0].to(device)
            indices = data[1].to(device)

            scheduler.step()

            #embeddings (i.e. z) + noise (i.e. epsilon)
            embeddings = model.embeddings(indices)
            embeddings_eps = torch.randn(embeddings.size(),
                                         device=device) * 0.01
            #see https://github.com/nogu-atsu/SmallGAN/blob/f604cd17516963d8eec292f3faddd70c227b609a/gen_models/ada_generator.py#L29

            #forward
            img_generated = model(embeddings + embeddings_eps)
            loss = criterion(img_generated, img, embeddings,
                             model.linear.weight)
            losses.update(loss.item(), img.size(0))

            #compute gradient and do SGD step
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if iteration % print_freq == 0:
                temp = "train loss: %0.5f " % loss.item()
                temp += "| smoothed loss %0.5f " % losses.avg
                log["log"].append({
                    "iteration": iteration,
                    "epoch": epoch,
                    "loss": losses.avg
                })
                print(iteration, temp)
                losses = AverageMeter()

            if iteration % eval_freq == 0 and iteration > 0:
                out_path = os.path.join(checkpoint_dir,
                                        "%d_recon.jpg" % iteration)
                generate_samples(model, out_path, dataloader.batch_size)

            if iteration % save_freq == 0 and iteration > 0:
                save_checkpoint(checkpoint_dir,
                                device,
                                model,
                                iteration=iteration)

            if iteration > max_iteration:
                break
            iteration += 1

        if iteration > max_iteration:
            break
        epoch += 1

    log_save_path = os.path.join(checkpoint_dir, "train-log.json")
    save_json(log, log_save_path)
コード例 #26
0
def main():
    args = parse_args()
    os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE"
    torch.manual_seed(args.seed)
    if torch.cuda.is_available():
        device = 'cuda'
        torch.cuda.manual_seed(args.seed)
    else:
        device = 'cpu'
    print(f"==> Using device: {device}")
    if args.checkpoint is None:
        time_stamp = str(datetime.datetime.now().strftime('-%Y%m%d%H%M%S'))
        args.checkpoint = args.model + time_stamp
    args.checkpoint = 'checkpoints/' + args.checkpoint
    if not os.path.isdir(args.checkpoint):
        mkdir_p(args.checkpoint)
        save_args(args)
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'),
                        title="ModelNet" + args.model)
        logger.set_names([
            "Epoch-Num", 'Learning-Rate', 'Train-Loss', 'Train-acc-B',
            'Train-acc', 'Valid-Loss', 'Valid-acc-B', 'Valid-acc'
        ])

    print('==> Preparing data..')
    train_loader = DataLoader(ModelNet40(partition='train',
                                         num_points=args.num_points),
                              num_workers=8,
                              batch_size=args.batch_size,
                              shuffle=True,
                              drop_last=True)
    test_loader = DataLoader(ModelNet40(partition='test',
                                        num_points=args.num_points),
                             num_workers=8,
                             batch_size=args.batch_size,
                             shuffle=True,
                             drop_last=False)

    # Model
    print('==> Building model..')
    net = models.__dict__[args.model]()
    criterion = nn.CrossEntropyLoss().to(device)
    net = net.to(device)
    # criterion = criterion.to(device)
    if device == 'cuda':
        net = torch.nn.DataParallel(net)
        cudnn.benchmark = True

    optimizer = torch.optim.SGD(net.parameters(),
                                lr=args.learning_rate,
                                momentum=0.9,
                                weight_decay=args.weight_decay)
    scheduler = CosineAnnealingLR(optimizer,
                                  args.epoch,
                                  eta_min=args.learning_rate / 100)

    best_test_acc = 0.  # best test accuracy
    best_train_acc = 0.
    best_test_acc_avg = 0.
    best_train_acc_avg = 0.
    best_test_loss = float("inf")
    best_train_loss = float("inf")

    start_epoch = 0  # start from epoch 0 or last checkpoint epoch
    for epoch in range(start_epoch, args.epoch):
        print('Epoch(%d/%s) Learning Rate %s:' %
              (epoch + 1, args.epoch, optimizer.param_groups[0]['lr']))
        train_out = train(net, train_loader, optimizer, criterion,
                          device)  # {"loss", "acc", "acc_avg", "time"}
        test_out = validate(net, test_loader, criterion, device)
        scheduler.step()

        if test_out["acc"] > best_test_acc:
            best_test_acc = test_out["acc"]
            is_best = True
        else:
            is_best = False

        best_test_acc = test_out["acc"] if (
            test_out["acc"] > best_test_acc) else best_test_acc
        best_train_acc = train_out["acc"] if (
            train_out["acc"] > best_train_acc) else best_train_acc
        best_test_acc_avg = test_out["acc_avg"] if (
            test_out["acc_avg"] > best_test_acc_avg) else best_test_acc_avg
        best_train_acc_avg = train_out["acc_avg"] if (
            train_out["acc_avg"] > best_train_acc_avg) else best_train_acc_avg
        best_test_loss = test_out["loss"] if (
            test_out["loss"] < best_test_loss) else best_test_loss
        best_train_loss = train_out["loss"] if (
            train_out["loss"] < best_train_loss) else best_train_loss

        save_model(net,
                   epoch,
                   path=args.checkpoint,
                   acc=test_out["acc"],
                   is_best=is_best)
        logger.append([
            epoch, optimizer.param_groups[0]['lr'], train_out["loss"],
            train_out["acc_avg"], train_out["acc"], test_out["loss"],
            test_out["acc_avg"], test_out["acc"]
        ])
        print(
            f"Training loss:{train_out['loss']} acc_avg:{train_out['acc_avg']} acc:{train_out['acc']} time:{train_out['time']}s)"
        )
        print(
            f"Testing loss:{test_out['loss']} acc_avg:{test_out['acc_avg']} acc:{test_out['acc']}% time:{test_out['time']}s) \n\n"
        )
    logger.close()

    print(f"++++++++" * 2 + "Final results" + "++++++++" * 2)
    print(
        f"++  Last Train time: {train_out['time']} | Last Test time: {test_out['time']}  ++"
    )
    print(
        f"++  Best Train loss: {best_train_loss} | Best Test loss: {best_test_loss}  ++"
    )
    print(
        f"++  Best Train acc_B: {best_train_acc_avg} | Best Test acc_B: {best_test_acc_avg}  ++"
    )
    print(
        f"++  Best Train acc: {best_train_acc} | Best Test acc: {best_test_acc}  ++"
    )
    print(f"++++++++" * 5)
コード例 #27
0
ファイル: make_data.py プロジェクト: hangjianli/stergm
    parser.add_argument('-f', '--form_terms', nargs="+", default=['edges'], help='Formation network statistics.')
    parser.add_argument('-d', '--diss_terms', nargs="+", default=['edges'], help='Dissolution network statistics.')
    # parser.add_argument('--data_format', choices=['real', 'synthetic'], default='synthetic',
    #                     help='The format of input data. Real data is a list of adj matrices while synthetic'
    #                          'data is a list with nw, terms, and extra elements.')
    parser.set_defaults()
    # Get commands from command line
    args = parser.parse_args()
    np.seterr(all='raise')

    # Get the input and output filenames
    output_dir = args.outdir + ('' if args.outdir.endswith('/') else '/')
    args_dir = utils.make_directory(output_dir, 'args')

    utils.save_args(args, args_dir + 'args.txt')

    input_data = args.data_name + ('' if args.data_name.endswith('.rds') else '.rds')
    H_outfile = output_dir + args.data_name + "_H.txt"
    y_outfile = output_dir + args.data_name + "_y.txt"

    # load the data
    data = readRDS("../data/" + input_data)
    data = np.array(data).astype(int)
    t = len(data)
    n = len(data[0])
    p = len(args.form_terms) + len(args.diss_terms)

    print(f"time series length: {t}")
    print(f"network size: {n} x {n}")
    print(f"statistics dimension: {p}")
コード例 #28
0
    experiment_dir = args.outdir + ('' if args.outdir.endswith('/') else '/')
    with open(experiment_dir + 'args/args.txt', 'r') as f:
        sim_args_str = f.read()
    sim_args = ast.literal_eval(sim_args_str)
    p = len(sim_args['form_terms']) + len(sim_args['diss_terms'])
    H = np.loadtxt(experiment_dir + sim_args['data_name'] + '_H.txt')
    y = np.loadtxt(experiment_dir + sim_args['data_name'] + '_y.txt')
    t = H.shape[0]
    H = H.reshape((t, -1, p)) # t x n^2(E) x p
    n = np.sqrt(H.shape[1]).astype(int)
    print(f"Data has dimension (t, n, p): ({t}, {n}, {p})")

    # Get the output filenames
    result_dir = utils.make_directory(experiment_dir, 'results')
    args_dir = utils.make_directory(experiment_dir, 'args')
    utils.save_args(args, args_dir + 'args_model.txt')

    theta_outfile = result_dir + 'theta_' + sim_args['data_name'] + ".txt"
    u_outfile = result_dir + 'u_' + sim_args['data_name'] + ".txt"
    z_outfile = result_dir + 'z_' + sim_args['data_name'] + ".txt"
    theta_plot_dir = result_dir + 'est_theta_diff.png'

    print('Initialize STERGM model...')
    model = mple_learn.STERGMGraph(
        lam=args.lam,
        admm_alpha=args.admm_alpha,
        rel_tol=args.rel_tol,
        max_steps=args.max_steps,
        newton_max_steps=args.max_steps_newton,
        converge_tol=args.conv_tol,
        gd_lr=args.gd_lr,
コード例 #29
0
ファイル: finetune.py プロジェクト: rocksat/PatchDrop
                    type=float,
                    default=0.8,
                    help='probability bounding factor')
parser.add_argument('--lr_size',
                    type=int,
                    default=8,
                    help='Policy Network Image Size')
parser.add_argument('--test_interval',
                    type=int,
                    default=5,
                    help='At what epoch to test the model')
args = parser.parse_args()

if not os.path.exists(args.cv_dir):
    os.system('mkdir ' + args.cv_dir)
utils.save_args(__file__, args)


def train(epoch):
    agent.train()
    rnet.train()

    matches, rewards, rewards_baseline, policies = [], [], [], []
    for batch_idx, (inputs, targets) in tqdm.tqdm(enumerate(trainloader),
                                                  total=len(trainloader)):

        inputs, targets = Variable(inputs), Variable(targets).cuda(async=True)
        if not args.parallel:
            inputs = inputs.cuda()

        # Get the low resolution agent images
コード例 #30
0
#parser.add_argument('--wide', default = 2, type = int, help = 'using wider resnet18')
args = parser.parse_args()

DEVICE = torch.device('cuda:{}'.format(args.d))
if args.exp is None:
    cur_dir = os.path.realpath('./')
    args.exp = cur_dir.split(os.path.sep)[-1]
log_dir = os.path.join('../../logs', args.exp)
exp_dir = os.path.join('../../exps', args.exp)
train_res_path = os.path.join(exp_dir, 'train_results.txt')
val_res_path = os.path.join(exp_dir, 'val_results.txt')
final_res_path = os.path.join(exp_dir, 'final_results.txt')
if not os.path.exists(exp_dir):
    os.mkdir(exp_dir)

save_args(args, exp_dir)
writer = SummaryWriter(log_dir)

clock = TrainClock()

learning_rate_policy = [[5, 0.01], [3, 0.001], [2, 0.0001]]
get_learing_rate = MultiStageLearningRatePolicy(learning_rate_policy)


def adjust_learning_rate(optimizer, epoch):
    #global get_lea
    lr = get_learing_rate(epoch)
    for param_group in optimizer.param_groups:

        param_group['lr'] = lr