Exemple #1
0
def sampleTestGen():
    args = Args(False)
    categ_arr = [16, 24, 25, 30, 31, 37, 40, 43, 46, 53, 55, 57]
    for e_i in [28]:
        model_name = "./{}/model/cvaehidden_kl_{}_{}_l{}.npz".format(
            args.dataname, args.dataname, e_i, args.n_latent)
        encdec = CVAEHidden(args)
        encdec = test(args, encdec, model_name, categ_arr, predictFlag=True)
Exemple #2
0
def sampleTest():
    args = Args(False)
    for e_i in [29]:
        encdec = VAE(args)
        model_name = "./{}/model/vae_biconcat_kl_{}_{}_l{}.npz".format(
            args.dataname, args.dataname, e_i, args.n_latent)
        encdec = test(args, encdec, model_name)
        testSentAdd(args, encdec)
Exemple #3
0
def load_or_calculate_forecasts(pt, t_model, dataset, model_name):
    dir_path = '/media/projects/daniel_lstm/results_log/' + model_name
    file_path = dir_path + '/%d.pt' % pt

    if use_existing_results and not train:  # Always want new results if retrained
        if os.path.isfile(file_path):
            y, yhat = torch.load(file_path)
        else:
            print('File not found for ', model_name)
            sys.exit(0)
    else:
        validation_loader = DataLoader(dataset=dataset, batch_size=batch_size)
        print('Testing')
        y, yhat = tt.test(pt, t_model, validation_loader)
        print('\n')

        if not os.path.isdir(dir_path):
            os.mkdir(dir_path)
        torch.save([y, yhat], file_path)

    return y, yhat
Exemple #4
0
def sampleTest():
    args = Args()
    dir_name = input("input dir name >> ")
    with open(dir_name + "/parameters.json", "r") as f:
        jsn = json.load(f)
    for jsn_key in jsn:
        setattr(args, jsn_key, jsn[jsn_key])
    args.gpu = -1
    encdec = VAE(args)
    epoch = input("input epoch >> ")
    model_name = "./{}/models/aibirds_word_aibirds_word_{}_60".format(
        dir_name, epoch)
    encdec = test(args, encdec, model_name)
    deconverter = txt2xml.txt2xml()
    os.makedirs("make_levels", exist_ok=True)
    sample_size = int(input("input sample_size >> "))
    for i in range(sample_size):
        tenti = testDec(args, encdec, 1)
        text = deconverter.vector2xml(tenti[0])
        with open("make_levels/level-" + str(i) + ".xml", "w") as f:
            f.write(text)
Exemple #5
0
def sampleTestTransfer():
    args = Args(False)
    categ_arr = [ri for ri in range(91, 92)]
    sent_arr_arr = [["皆さん も 見て 下さい"]]
    sent_arr_arr.append(["ハロ 〜 ォ 、 ミサト ! 元気 してた ?"])
    sent_arr_arr.append(["ギュネイ が 敵の 核ミサイル 群 を 阻止 してくれた 。 あれが 強化人間 の仕事 だ"])
    enc_dec_tupls = [("友利奈緒", "イカ娘"), ("アスカ", "シンジ"), ("シャア", "クェス")]
    for e_i in [28]:
        model_name = "./{}/model/cvaehidden_kl_{}_{}_l{}.npz".format(
            args.dataname, args.dataname, e_i, args.n_latent)
        encdec = CVAEHidden(args)
        encdec = test(args,
                      encdec,
                      model_name,
                      categ_arr[:1],
                      predictFlag=False)
        for (enc_chara, dec_chara), sent_arr in zip(enc_dec_tupls,
                                                    sent_arr_arr):
            print("{}→{}".format(enc_chara, dec_chara))
            enc_cat = [encdec.categ_vocab.stoi(enc_chara)]
            dec_cat = [encdec.categ_vocab.stoi(dec_chara)]
            encdec.shiftStyle(sent_arr, enc_cat, dec_cat)
Exemple #6
0
def train_core(args, trainloader, testloader):
    args.best_test_acc = 0  # best test accuracy
    args.start_epoch = 0  # start from epoch 0 or last checkpoint epoch
    args.lr = args.lr_start

    # Model
    print(
        '\n==> Building model: {} with activation {} and optimizer {} and random seed {}.'
        .format(args.current_model, args.current_activation, args.optimizer,
                args.seed))
    print(
        '>>This is model architecture #{} of {} and model permutation #{} out of a total of {} permutations.'
        .format(args.model_position + 1,
                len(args.model_list) + 1, args.permutation_number,
                args.model_permutations))

    try:
        model_string = str(args.current_model) + '(activation=' + str(
            args.current_activation) + ')'
        net = eval(model_string)
    except:
        print("This model using default activation only")
        self.activation = 'Disabled'
        model_string = str(args.current_model) + '()'
        net = eval(model_string)

    # print model parameters using function from utils
    args.total_model_params = print_model_params(net)

    ################################################################
    ###################### CUDA settings ###########################
    ################################################################

    torch.manual_seed(args.seed)

    if args.use_cuda:
        torch.cuda.manual_seed_all(args.seed)
        net.cuda()

        if args.gpus:
            args.gpus = [int(i) for i in args.gpus.split(',')]
            torch.cuda.set_device(args.gpus[0])
        else:
            net = torch.nn.DataParallel(net,
                                        device_ids=range(
                                            torch.cuda.device_count()))

        cudnn.deterministic = True
        cudnn.benchmark = args.optimize_cudnn
        torch.cuda.manual_seed_all(args.seed)

    #######################################################
    ############# Loss and Optimizer Settings #############
    #######################################################

    criterion = nn.CrossEntropyLoss()

    # Initialize the learning rate settings

    if args.lr_fixed_stages:
        print("Using lr_fixed_stages method to update learning rates")
        args.epoch_increment = args.epochs // args.lr_stages
        args.epoch_threshold = args.epoch_increment
        args.lr_schedule = "Stages" + "_" + str(args.lr_stages)

    elif args.use_cycle_lr_list:
        print("Using cycle_lr_list method to update learning rates")

        # over-write defaults with settings from hyperparams if available
        if hyperparams['cycle_lr_list']:
            args.cycle_lr_list = hyperparams['cycle_lr_list']
        args.cycle_lr_list = eval(args.cycle_lr_list)
        # commented out since should use lr_change_n_epoch instead
        # args.epoch_increment = args.epochs // len(args.cycle_lr_list)
        # args.epoch_threshold = args.epoch_increment
        args.lr_schedule = "CycleList"  #+ '_' + str(args.cycle_lr_list)
        print('Using lr cycle list: ', args.cycle_lr_list)
        args.lr = args.cycle_lr_list.pop(0)

    elif args.decreasing_cycle_lr:
        print("Using decreasing cycle method to update learning rates")
        args.lr_schedule = "DecreaseCycle"

    elif args.lr_decay:
        print("Using learing rate decay to update learning rates")
        args.lr_schedule = "LRDecay"

    elif args.inepoch_lr_decay:

        # over-write defaults with settings from hyperparams if available
        if hyperparams['inepoch_max']:
            args.inepoch_max = hyperparams['inepoch_max']
        if hyperparams['inepoch_min']:
            args.inepoch_min = hyperparams['inepoch_min']
        if hyperparams['inepoch_max_decay']:
            args.inepoch_max_decay = hyperparams['inepoch_max_decay']
        if hyperparams['inepoch_min_decay']:
            args.inepoch_min_decay = hyperparams['inepoch_min_decay']
        if hyperparams['inepoch_batch_step']:
            args.inepoch_batch_step = hyperparams['inepoch_batch_step']

        print(
            "Using in-batch learning rate decay to train with max {} and min {}"
            .format(args.inepoch_max, args.inepoch_min))
        print("Max decay rate: {}  Min decay rate {}".format(
            args.inepoch_max_decay, args.inepoch_min_decay))
        args.lr_schedule = "InEpochDecay"

    if args.lr_momentum:
        optimizer = eval(args.optimizer + '(net.parameters(), lr=' +
                         str(args.lr) + ', momentum=' + str(args.lr_momentum) +
                         ')')
    else:
        optimizer = eval(args.optimizer + '(net.parameters(), lr=' +
                         str(args.lr) + ')')

    #######################################################################
    ############### Set up files to save training info into ###############
    #######################################################################

    # Prepare to save training info to csv file
    if args.save_training_info:
        args = save_training_info(args)

    ##################################################
    ############### Main training loop ###############
    ##################################################

    for epoch in range(args.start_epoch, args.start_epoch + args.epochs):
        args, optimizer = train(epoch, net, criterion, optimizer, trainloader,
                                args)
        test(epoch, net, criterion, optimizer, testloader, args)

        ################################################################
        ################## Updating the learning rate ##################
        ################################################################

        if args.decreasing_cycle_lr:
            args, net, optimizer = decreasing_cycle_lr_update(
                epoch, args, net, optimizer)
        elif args.use_cycle_lr_list:
            args, net, optimizer = list_cycle_lr_update(
                epoch, args, net, optimizer)
        else:
            args, net, optimizer = update_lr(epoch, args, net, optimizer)

    if args.save_training_info:
        args.trainfile.close()
        args.testfile.close()

    if args.make_plots:
        make_loss_and_accuracy_plots(args)

    return args, optimizer, net