Esempio n. 1
0
                  batchsize,
                  'CNN: ' + args.net,
                  stime,
                  etime,
                  train_mean_loss,
                  train_ac,
                  test_mean_loss,
                  test_ac,
                  epoch,
                  LOG_FILENAME='log.txt')

if args.plotflag == 'on':
    import plot
    plot.plot_result(train_ac,
                     test_ac,
                     train_mean_loss,
                     test_mean_loss,
                     savename='result_cnn.jpg')

# Save the model and the optimizer
if args.saveflag == 'on':
    serializers.save_hdf5('cifar10_alex.model', model)
    serializers.save_hdf5('cifar10_alex.state', optimizer)
"""
def validation_parallel(model1, model2, data, mean_loss, ac, N, batchsize):
    sum_accuracy, sum_loss = 0, 0
    for i in six.moves.range(0, N, batchsize):
        val_x_batch = np.reshape(data['test']['x'][i:i + batchsize],
                                 (batchsize, 3, model.insize, model.insize))
        val_y_batch = data['test']['y'][i:i + batchsize]
Esempio n. 2
0
            if done:
                print(f'episode: {episode+1}/{episodes}, score: {total_reward}, steps: {step}, epsilon: {agent.epsilon}')
                save_model(id, agent, best_score, total_reward)
                break

        save_model(id, agent, best_score, total_reward)
        episode_rewards.append(total_reward)
    return episode_rewards


if __name__ == '__main__':

    params = dict()
    params['name'] = None
    params['gamma'] = 0.95
    params['batch_size'] = 500
    params['epsilon'] = 1
    params['epsilon_min'] = 0.01
    params['epsilon_max'] = 1
    params['epsilon_decay'] = 0.02
    params['learning_rate'] = 0.7

    results = dict()
    episodes = 500

    env = Snake()
    sum_of_rewards = train_dqn(episodes, env)
    results[params['name']] = sum_of_rewards

    plot_result(results, direct=True, k=20)
Esempio n. 3
0
from Chromosome import Chromosome
from ES import process_genes, get_best_gene
from plot import plot_result
from file_handler import read_from_file
import numpy as np
input_dots = read_from_file("./Dataset/Dataset1.csv")
chromosome = Chromosome(500, -.1, .1)
#.903 .51
fitness_array = []
while sorted(chromosome.evaluate(input_dots), reverse=True)[0] < 10:
    chromosome, fitness_array = process_genes(chromosome, input_dots, 10**-1,
                                              39, fitness_array)
print(sorted(chromosome.evaluate(input_dots), reverse=True)[0])
a, b = get_best_gene(chromosome, input_dots)
print(a, "  ", b)
plot_result(input_dots, a, b, fitness_array)


def find_loss(a, b, input_dots):
    z_array = []
    loss_count = 0
    for dot in input_dots:
        add_new = True
        zprim = dot[0] * a + dot[1] * b
        for z in z_array:
            if np.abs(zprim - z) < .01:
                add_new = False
                loss_count += 1
                break
        if add_new:
            z_array.append(zprim)
Esempio n. 4
0
        t = chainer.Variable(xp.asarray(val_y_batch), volatile='on')
        loss = model(x, t)

        sum_loss += float(loss.data) * len(t.data)
        sum_accuracy += float(model.accuracy.data) * len(t.data)

    print('test  mean loss={}, accuracy={}'.format(
        sum_loss / N_test, sum_accuracy / N_test))
    test_mean_loss.append(sum_loss / N_test)
    test_ac.append(sum_accuracy / N_test)


if args.logflag == 'on':
    import log
    etime = time.clock()
    log.write_cnn(N, N_test, batchsize, 'CNN: ' + args.net, stime, etime,
                  train_mean_loss, train_ac, test_mean_loss, test_ac, epoch,
                  LOG_FILENAME='log.txt')


if args.plotflag == 'on':
    import plot
    plot.plot_result(train_ac, test_ac, train_mean_loss, test_mean_loss,
                     savename='result_cnn.jpg')


# Save the model and the optimizer
if args.saveflag == 'on':
    serializers.save_hdf5('cifar10_alex.model', model)
    serializers.save_hdf5('cifar10_alex.state', optimizer)
Esempio n. 5
0
def train(gpu, method, epoch, batchsize, n_unit, conv_layers, dataset, smiles,
          M, n_split, split_idx, order):
    n = len(dataset)
    assert len(order) == n
    left_idx = (n // n_split) * split_idx
    is_right_most_split = (n_split == split_idx + 1)
    if is_right_most_split:
        test_order = order[left_idx:]
        train_order = order[:left_idx]
    else:
        right_idx = (n // n_split) * (split_idx + 1)
        test_order = order[left_idx:right_idx]
        train_order = np.concatenate([order[:left_idx], order[right_idx:]])

    new_order = np.concatenate([train_order, test_order])
    n_train = len(train_order)

    # Standard Scaler for labels
    ss = StandardScaler()
    labels = dataset.get_datasets()[-1]
    train_label = labels[new_order[:n_train]]
    ss = ss.fit(train_label)  # fit only by train
    labels = ss.transform(dataset.get_datasets()[-1])
    dataset = NumpyTupleDataset(*(dataset.get_datasets()[:-1] + (labels, )))

    dataset_train = SubDataset(dataset, 0, n_train, new_order)
    dataset_test = SubDataset(dataset, n_train, n, new_order)

    # Network
    model = predictor.build_predictor(method,
                                      n_unit,
                                      conv_layers,
                                      1,
                                      dropout_ratio=0.25,
                                      n_layers=1)

    train_iter = I.SerialIterator(dataset_train, batchsize)
    val_iter = I.SerialIterator(dataset_test,
                                batchsize,
                                repeat=False,
                                shuffle=False)

    def scaled_abs_error(x0, x1):
        if isinstance(x0, Variable):
            x0 = cuda.to_cpu(x0.data)
        if isinstance(x1, Variable):
            x1 = cuda.to_cpu(x1.data)
        scaled_x0 = ss.inverse_transform(cuda.to_cpu(x0))
        scaled_x1 = ss.inverse_transform(cuda.to_cpu(x1))
        diff = scaled_x0 - scaled_x1
        return np.mean(np.absolute(diff), axis=0)[0]

    regressor = Regressor(model,
                          lossfun=F.mean_squared_error,
                          metrics_fun={'abs_error': scaled_abs_error},
                          device=gpu)

    optimizer = O.Adam(alpha=0.0005)
    optimizer.setup(regressor)

    updater = training.StandardUpdater(train_iter,
                                       optimizer,
                                       device=gpu,
                                       converter=concat_mols)

    dir_path = get_dir_path(batchsize, n_unit, conv_layers, M, method)
    dir_path = os.path.join(dir_path, str(split_idx) + "-" + str(n_split))
    os.makedirs(dir_path, exist_ok=True)
    print('creating ', dir_path)
    np.save(os.path.join(dir_path, "test_idx"), np.array(test_order))

    trainer = training.Trainer(updater, (epoch, 'epoch'), out=dir_path)
    trainer.extend(
        E.Evaluator(val_iter, regressor, device=gpu, converter=concat_mols))
    trainer.extend(E.LogReport())
    trainer.extend(
        E.PrintReport([
            'epoch', 'main/loss', 'main/abs_error', 'validation/main/loss',
            'validation/main/abs_error', 'elapsed_time'
        ]))
    trainer.extend(E.ProgressBar())
    trainer.run()

    # --- Plot regression evaluation result ---
    dataset_test = SubDataset(dataset, n_train, n, new_order)
    batch_all = concat_mols(dataset_test, device=gpu)
    serializers.save_npz(os.path.join(dir_path, "model.npz"), model)
    result = model(batch_all[0], batch_all[1])
    result = ss.inverse_transform(cuda.to_cpu(result.data))
    answer = ss.inverse_transform(cuda.to_cpu(batch_all[2]))
    plot_result(result,
                answer,
                save_filepath=os.path.join(dir_path, "result.png"))

    # --- Plot regression evaluation result end ---
    np.save(os.path.join(dir_path, "output.npy"), result)
    np.save(os.path.join(dir_path, "answer.npy"), answer)
    smiles_part = np.array(smiles)[test_order]
    np.save(os.path.join(dir_path, "smiles.npy"), smiles_part)

    # calculate saliency and save it.
    save_result(dataset, model, dir_path, M)
Esempio n. 6
0
        y_test_pred = np.stack(y_test_pred, out)
        y_tests = np.stack(y_tests, ttt)
    test_inputs.append(inputs)

############ EVAL ################

#y_test_pred = model(X_test)

# invert predictions
#y_train_pred = scaler.inverse_transform(y_train_pred)
#y_train = scaler.inverse_transform(y_train)
#y_test_pred = scaler.inverse_transform(y_test_pred)
#y_test = scaler.inverse_transform(y_test)

# calculate root mean squared error
#trainScore = math.sqrt(mean_squared_error(y_train[:,0], y_train_pred[:,0]))
#print('Train Score: %.2f RMSE' % (trainScore))
#testScore = math.sqrt(mean_squared_error(y_test[:,0], y_test_pred[:,0]))
#print('Test Score: %.2f RMSE' % (testScore))

case_num = list(range(0, 10))
for c in case_num:
    print("Case:", c)
    print("GT and pred:", y_test[c], y_test_pred[c])

    original = scaler.inverse_transform(test_inputs[c])
    original = [y for x in original for y in x]
    #print("Window:", original)

    plot_result(original, y_test[c], y_test_pred[c], c)