Example #1
0
def run_train_split_test(method, moea, target):
    moea_name = dictionaries.get_moea_name(moea)
    target_name = dictionaries.get_target_composition(target)
    method_name = dictionaries.get_model_method_name(method)
    folder_name = 'split-train/' + method_name +'/'+ moea_name + '/' + target_name
    type = 2
    helpers.test_model(folder_name=folder_name, type = type, predict_model=method)
Example #2
0
def run_msdp_test(method, moea, target):
    moea_name = dictionaries.get_moea_name(moea)
    target_name = dictionaries.get_target_composition(target)
    method_name = dictionaries.get_model_method_name(method)
    folder_name = 'multi-objective/' + method_name +'/'+ moea_name + '/' + target_name
    type = 2
    helpers.test_model(folder_name=folder_name, type = type, predict_model=method)
buffer_size = 5000

c = 750
c_step = 0
e = 0.01
a = 0.6
b = 0.4
input_depth = 37
hidden0 = 128
hidden1 = 128
hidden2 = 128
output_depth = 4

replay = []

model, loss_fn, optimizer = get_model(input_depth, hidden0, hidden1, hidden2,
                                      output_depth, lr)

filename = 'checkpoint-2000.pt'
model, optimizer, replay = load_model(model, optimizer, filename)

env = UnityEnvironment(file_name="Banana.app")
brain_name = env.brain_names[0]
brain = env.brains[brain_name]

test_actor_env = (model, brain_name, env)
attemps = 100
filename = 'test_scores-{}.png'.format(attemps)

test_model(test_actor_env, attemps, filename, viewableSpeed=False)
Example #4
0
                     m.Linear(Hidden_Units, Hidden_Units), m.ReLU(),
                     m.Linear(Hidden_Units, Hidden_Units), m.Tanh(),
                     m.Linear(Hidden_Units, Output_Units), m.Tanh())

#Instantiate the optimizer
lr = 0.00095
sgd = m.SGD(params=model.param(), lr=lr)

#Train the model
EPOCHS = 150

model, train_error, validation_error = h.train_model(train_data, train_targets,\
                                        validation_data, validation_targets, model, sgd, nb_epochs = EPOCHS)
'''
#Plot both train and validation errors wrt the number of epochs

fig = plt.figure(figsize=(9, 5))
ax = fig.add_subplot(111)
ax.set_title('Evolution of the training and validation errors w.r.t the epoch number.')
plt.plot(validation_error,color='red')
plt.plot(train_error,color='blue')
plt.legend(['Training Error', 'Validation Error'])
ax.set_xlabel('Epochs')
ax.set_ylabel(' % of Error')
#plt.savefig('Error_results.png')
plt.grid()
plt.show()
'''

h.test_model(model, test_data, test_targets)
Example #5
0
def run_msdp_fpa_l1_linear_nsga2_toZero_test():
    folder_name = 'linear/多目标优化_FPA+L1_nsga2_toZero'
    type = 2
    helpers.test_model(folder_name=folder_name, type=type)
Example #6
0
def run_msdp_fpa_aae_nonz_linear_nsga2_DE_toZero_test():
    folder_name = 'linear/多目标优化_FPA+AAE+numofnonzero_nsga2_DE_toZero'
    type = 2
    helpers.test_model(folder_name=folder_name, type=type)
Example #7
0
def run_ssdp_linear_CoDE_test():
    folder_name = 'linear/CoDE'
    type = 1
    helpers.test_model(folder_name=folder_name, type=type)
Example #8
0
def run_ssdpM_test(method,soea):
    type = 2
    soea_name = dictionaries.get_soea_name(soea)
    method_name = dictionaries.get_model_method_name(method)
    folder_name = 'multi-objective/' + method_name + '/' + soea_name
    helpers.test_model(folder_name=folder_name, type = type, predict_model=method)
    final_test_acc = 0.
    for epoch in range(num_epochs):

        # Decay learning rate according to decay schedule
        helpers.adjust_learning_rate(optimizer, epoch, learning_rate_table)
        print("Starting Epoch {}/{}. lr = {}".format(
            epoch, num_epochs, learning_rate_table[epoch]))
        # Train
        train_acc, train_loss, percent_real = training_helpers.train_model(
            net, optimizer, ID_trainloader, 10 - NUM_HOLDOUT_CLASSES)
        print(
            "[{}] Epoch [ {} / {} ]; lr: {} TrainAccuracy: {:.5f} TrainLoss: {:.5f} %-Real: {}"
            .format(ITER, epoch, num_epochs, learning_rate_table[epoch],
                    train_acc, train_loss, percent_real))
        # Test
        test_acc, test_loss = helpers.test_model(net, device, ID_testloader,
                                                 MEAN, STD)
        print(
            "\t[{}] Epoch [ {} / {} ]; TestAccuracy: {:.5f} TestLoss: {:.5f}".
            format(ITER, epoch, num_epochs, test_acc, test_loss))
        final_test_acc = test_acc
    STAT_accuracy.append(final_test_acc)
    net.eval()

    # Optional: Save model checkpoint and move to next iter
    #helpers.save_checkpoint({'test_acc': final_test_acc,'state_dict': net.state_dict()}, False, "{}_K{}_J{}_SEED{}_ITER{}".format(SAVE_CKPT,int(100*K), int(NUM_HOLDOUT_CLASSES),SEED,ITER))

    #################################################################################################################
    # Test ID and OOD data

    ### Hook into Net for Mahalanobis layer
Example #10
0
                        model.word_embeddings.weight.data.numpy(),
                        'pos_embeddings':
                        model.cpu().pos_embeddings.weight.data.numpy(),
                        'optimizer':
                        optimizer.state_dict(),
                    }, LATEST_CHECKPOINT_RELATIVE_PATH,
                    BEST_CHECKPOINT_RELATIVE_PATH, is_best_model)
                break

        print('Finished testing at {}.'.format(
            time.strftime('%d-%m-%Y, %H:%M:%S')))
        logging.info('Finished testing at {}.'.format(
            time.strftime('%d-%m-%Y, %H:%M:%S')))
    elif args.mode == 'test':
        test_loss, test_arc_scores, test_label_scores = test_model(
            model, loss_function, conllu_sentences[args.language]['test'],
            args.language)
        print(test_loss)
    elif args.mode == 'predict':
        prediction_file = open(RESULTS_RELATIVE_PATH,
                               mode='a',
                               encoding='UTF-8')
        formatted_test_file = open(FORMATTED_TEST_RELATIVE_PATH,
                                   mode='a',
                                   encoding='UTF-8')
        for conllu_sentence in conllu_sentences[args.language]['test']:
            # save formatted version fo test file
            formatted_test_file.write(str(conllu_sentence))
            formatted_test_file.flush()
            # predict arc scores and labels
            predicted_arcs, predicted_labels = predict(model, conllu_sentence,
Example #11
0
parser = argparse.ArgumentParser()
parser.add_argument('file', help='model file')
parser.add_argument('dataset', help='dataset we want to test it on')
parser.add_argument('--mode',
                    default='offsets',
                    choices=['offsets', 'times', 'intervals'])
parser.add_argument('--n_samples', default=500, type=int)
args = parser.parse_args()

with open(args.file, 'rb') as f:
    model = torch.load(f)

dset = load_dataset(args.dataset)

data = test_model(model, dset, args.n_samples)

distr = {}

for i in range(len(data)):

    dset_idx, x, y, z, _ = data[i]
    r, s, g = dset[dset_idx][2]

    peak = np.argmax(z)
    if args.mode == 'offsets':
        val = peak - g
    elif args.mode == 'times':
        val = peak
    elif args.mode == 'intervals':
        val = peak - s
Example #12
0
### Create model
input_dim = 2
hidden_width = 25
output_dim = 2

model = HL.Sequential([
    HL.Linear(input_dim, hidden_width),
    HL.ReLu(),
    HL.Linear(hidden_width, hidden_width),
    HL.ReLu(),
    HL.Linear(hidden_width, hidden_width),
    HL.Tanh(),
    HL.Linear(hidden_width, output_dim),
    HL.Tanh()
])

### Train model and log training and validation error
model, train_error_list, test_error_list = HL.train_model(train_inputs,
                                                          train_targets,
                                                          validation_inputs,
                                                          validation_targets,
                                                          model,
                                                          learning_rate=0.0001,
                                                          epochs=300)

### Print final training error
print('train_error {:.02f}%'.format(train_error_list[-1]))

### Test error
HL.test_model(model, test_inputs, test_targets)