Exemple #1
0
def main(opt):
    dataset = data.Dataset(dataset=opt.dataset, pool_size=opt.pool_size, sample_size=opt.sample_size)
    dataset.show_inf()
    feature_size, att_size = dataset.feature_size, dataset.att_size
    discriminator = model.Discriminator(feature_size, att_size, opt.t1).cuda()
    generator = model.Generator(feature_size, att_size, opt.t2).cuda()
    train2.train(discriminator, generator, dataset, d_lr=opt.d_lr, g_lr=opt.g_lr,\
                 batch_size=opt.batch_size, alpha=opt.alpha, epochs=opt.epochs)
def main(_):
    # Print FLAGS values
    pprint(FLAGS.flag_values_dict())

    # Define GPU configuration
    os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
    os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu
    gpu_config = tf.ConfigProto()
    gpu_config.gpu_options.allow_growth = True

    # Define model name
    if not FLAGS.phase:
        setup_list = [
            f"ngf_{FLAGS.ngf}", f"ndf_{FLAGS.ndf}", f"nz_{FLAGS.nz}",
            f"lrD_{FLAGS.lrD}", f"lrG_{FLAGS.lrG}", f"var_{FLAGS.var}",
            f"phase_{FLAGS.phase}", f"da_{FLAGS.da}", f"clip_{FLAGS.clip}"
        ]
    else:
        setup_list = [
            f"ngf_{FLAGS.ngf}", f"ndf_{FLAGS.ndf}", f"nz_{FLAGS.nz}",
            f"lrD_{FLAGS.lrD}", f"lrG_{FLAGS.lrG}", f"var_{FLAGS.var}",
            f"phase_{FLAGS.phase}", f"da_{FLAGS.da}", f"clip_{FLAGS.clip}",
            f"nhl_{FLAGS.nhl}", f"nhw_{FLAGS.nhw}"
        ]

    model_name = '_'.join(setup_list)
    print(f"Model name: {model_name}")

    M = model(FLAGS, gpu_config)
    M.sess.run(tf.global_variables_initializer())

    if FLAGS.phase:
        # Previously learned autoencoder model name
        setup_list = [
            f"ngf_{FLAGS.ngf}", f"ndf_{FLAGS.ndf}", f"nz_{FLAGS.nz}",
            f"lrD_{FLAGS.lrD}", f"lrG_{FLAGS.lrG}", f"var_{FLAGS.var}",
            f"phase_0", f"da_{FLAGS.da}", f"clip_{FLAGS.clip}"
        ]
        lgan_name = '_'.join(setup_list)
        # just for now
        # lgan_name = 'ngf_64_ndf_64_nz_64_lrD_5e-05_lrG_0.001_dg_1_aug_0_lw_20.0_ow_0.01_var_3.0_phase_0_nosig'
        # lgan_name = 'ngf_64_ndf_64_nz_16_lw_20.0_ow_0.01_var_3.0_phase_0'
        var_lgan = tf.get_collection('trainable_variables', 'lgan/gen')
        path = tf.train.latest_checkpoint(
            os.path.join(FLAGS.ckptdir, lgan_name))
        tf.train.Saver(var_lgan).restore(M.sess, path)
        print(colored(f"LGAN model is restored from {path}", "blue"))

    saver = tf.train.Saver()

    # Train the main model
    train(M, FLAGS, saver=saver, model_name=model_name)
Exemple #3
0
def run(args):
    run_args = AttrDict()
    args_dict = {
        'dis_learning_rate': 0.0002,
        'gen_learning_rate': 0.0002,
        'batch_size': 1,
        'num_epochs': 10,
        'human_root_dir': "../trainHuman/",
        'cartoon_root_dir': "../trainCartoon/",
        'act_fn_gen': 'relu',
        'act_fn_dis': 'lrelu',
        'norm_type': 'instance',
        'num_res': 3,
        'dropout': False,
        'lambda_cycle': 10,
        'gray': False,
        'Conv2T': False
    }
    if (args.train):
        print(
            "---Trains 10 images for 10 epochs to indicate that our training loop works---"
        )
    else:
        print("---Will evaluate the specified trained model now---")
    folder_path = ""
    isVAE = False
    if args.Conv2T:
        args_dict['Conv2T'] = True
        args_dict['norm_type'] = 'batch'
        folder_path = "../conv2T"
    elif args.RegConv:
        args_dict['norm_type'] = 'batch'
        folder_path = "../RegConv"
    elif args.VAE:
        isVAE = True
        folder_path = "../VAE"
    elif (args.Gray):
        pass
    run_args.update(args_dict)
    if args.train:
        train2.train(run_args, device)
    else:
        evaluate.evaluate(folder_path, run_args, isVAE=isVAE)
HN1, HN2 = 20, 4
EPOCHS = 100
BATCH_SIZE = 50
LR = 0.0007

# Instantiate the network and prepare data
avg_mse = 1
while avg_mse > 0.9:
    net = Net(HN1, HN2)
    training_inputs = training_data[:, 0:4]
    training_labels = training_data[:, 4:]
    test_inputs = testing_data[:, 0:4]
    test_labels = testing_data[:, 4:]

    # Train and test the network
    train(net, training_inputs, training_labels, EPOCHS, LR, BATCH_SIZE)
    avg_mse, predictions_online, predictions_offline = test(
        test_inputs, test_labels, net)
    print(avg_mse)

predictions_online_inverse_transform = scaler_test.inverse_transform(
    predictions_online)
predictions_offline_inverse_transform = scaler_test.inverse_transform(
    predictions_offline)

online = pd.DataFrame(predictions_online_inverse_transform)
offline = pd.DataFrame(predictions_offline_inverse_transform)
avg_mse = pd.DataFrame([avg_mse, 0])

online.to_excel(
    'Data3/Optimised_Networks/manual_online3 {x}_{y}-{z}_{a}_{b}_{c}.xlsx'.
Exemple #5
0
HN1 = 15
EPOCHS = 30
BATCH_SIZE = 8
LR = [0.0001, 0.0002, 0.0003, 0.0004, 0.0005, 0.0006, 0.0007, 0.0008, 0.0009, 0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]
MODELS = {}

rnn = RNN(3, 5, 12, HN1, HL)
init_state = copy.deepcopy(rnn.state_dict())
for lr in LR:
    rnn.load_state_dict(init_state)
    training_inputs = training_data[:, 0:5]
    training_labels = training_data[:, 5:]
    test_inputs = testing_data[:, 0:5]
    test_labels = testing_data[:, 5:]

    training_inputs = np.split(training_inputs, 606)
    training_labels = np.split(training_labels, 606)
    test_inputs = np.split(test_inputs, 2)
    test_labels = np.split(test_labels, 2)

    train(rnn, training_inputs, training_labels, EPOCHS, lr, BATCH_SIZE)
    avg_mse = test(test_inputs, test_labels, rnn)

    MODELS['{a}_{x}_{z}_{b}'.format(a=HL, x=HN1, z=EPOCHS, b=lr)] = np.array(avg_mse)

with open('Data2/Search/manual_search_results_{x}HL_lr_GLMAX1_1_15_30.csv'.format(x=HL), 'w') as f:
    for key in MODELS.keys():
        f.write("%s: %s\n"%(key, MODELS[key]))

print(MODELS)
LR = 0.0009
MODELS = {}

rnn = RNN(3, 5, 12, HN1, HL)
init_state = copy.deepcopy(rnn.state_dict())
for bs in BATCH_SIZE:
    rnn.load_state_dict(init_state)
    training_inputs = training_data[:, 0:5]
    training_labels = training_data[:, 5:]
    test_inputs = testing_data[:, 0:5]
    test_labels = testing_data[:, 5:]

    training_inputs = np.split(training_inputs, 606)
    training_labels = np.split(training_labels, 606)
    test_inputs = np.split(test_inputs, 2)
    test_labels = np.split(test_labels, 2)

    train(rnn, training_inputs, training_labels, EPOCHS, LR, bs)
    avg_mse = test(test_inputs, test_labels, rnn)

    MODELS['{a}_{x}_{z}_{b}_{c}'.format(a=HL, x=HN1, z=EPOCHS, b=LR,
                                        c=bs)] = avg_mse

with open(
        'Data2/Search/manual_search_results_{x}HL_bs_GLMAX1_1_15_30.csv'.
        format(x=HL), 'w') as f:
    for key in MODELS.keys():
        f.write("%s: %s\n" % (key, MODELS[key]))

print(MODELS)
np.random.shuffle(training_data)

# Manual Search Training Loop
HL = 2
HN1 = 4
HN2 = 8
EPOCHS = 152
BATCH_SIZE = [5, 10, 15, 20, 30, 40, 50, 100, 200, 300, 400, 500]
LR = 0.006
MODELS = {}

net = Net(HN1, HN2)
init_state = copy.deepcopy(net.state_dict())
for bs in BATCH_SIZE:
    net.load_state_dict(init_state)
    training_inputs = training_data[:, 0:5]
    training_labels = training_data[:, 5:]
    test_inputs = testing_data[:, 0:5]
    test_labels = testing_data[:, 5:]
    
    train(net, training_inputs, training_labels, EPOCHS, LR, bs)
    avg_mse = test(test_inputs, test_labels, net)

    MODELS['{a}_{x}-{y}_{z}_{b}_{c}'.format(a=HL, x=HN1, y=HN2, z=EPOCHS, b=LR, c=bs)] = avg_mse

with open('Data3/Search/manual_search_results_{x}HL_bsTIMETEST.csv'.format(x=HL), 'w') as f:
    for key in MODELS.keys():
        f.write("%s: %s\n"%(key, MODELS[key]))

print(MODELS)
    train_topic_var = Variable(torch.LongTensor(topic_index))
    train_text_var = Variable(torch.LongTensor(text_index))
    train_label_var = Variable(torch.LongTensor(label_index))

    # dev_topic_var = Variable(torch.LongTensor(dev_topic_index))
    # dev_text_var = Variable(torch.LongTensor(dev_text_index))
    # dev_label_var = Variable(torch.LongTensor(dev_label_index))

    test_topic_var = Variable(torch.LongTensor(test_topic_index))
    test_text_var = Variable(torch.LongTensor(test_text_index))
    test_label_var = Variable(torch.LongTensor(test_label_index))
    # # print("ssss",test_topic_var)
    # print(test_text_var)
    # print(test_label_var)

    # dev_iter = dataProcessing.create_batches(dev_topic_var, dev_text_var, dev_label_var, params.batch_size)
    # print(dev_iter)
    test_iter = dataProcessing.create_batches(test_topic_var, test_text_var,
                                              test_label_var,
                                              params.batch_size)
    # print(test_iter)
    # print("train_var",train_topic_var)          #2414x6
    # print("train_text_var",train_text_var)      #2414x35
    # print("train_label_var",train_label_var)    #2414x1
    if params.use_lstm is True:
        model = model_BiLSTM.BiLSTM(params)
        if params.cuda_use is True:
            model = model.cuda()
        train2.train(train_topic_var, train_text_var, train_label_var, model,
                     label2id, id2label, params, test_iter)