Esempio n. 1
0
def experiment(threshold,
               iterations,
               train_loss,
               n_conv,
               optimizer,
               batch_size=1,
               batch_norm=False,
               learning_rate=1e-3,
               summary_dir=None):
    model = ConvNet(filters=4,
                    n_conv=n_conv,
                    train_loss=train_loss,
                    batch_norm=batch_norm,
                    optimizer=optimizer,
                    learning_rate=learning_rate,
                    summary_dir=summary_dir)
    print('train_loss:', train_loss.value, 'optimizer:', optimizer.value,
          'n_conv:', n_conv, 'batch_norm:', batch_norm, 'batch_size:',
          batch_size, 'learning_rate:', learning_rate)
    ret = dict()
    val_input_batch, val_output_batch = get_data(threshold, 100, verbose=True)
    best_accuracy = (0.0, 0)
    for i in tqdm(range(iterations)):
        input_batch, output_batch = get_data(threshold, batch_size)
        out = model.train(input_batch, output_batch)
        if i == 0:
            for k in out.keys():
                ret[k] = []
            ret['accuracy'] = []
        for k, v in out.items():
            ret[k].append(v)

        if i % 250 == 0:
            accuracy = model.accuracy(val_input_batch, val_output_batch)
            if accuracy > best_accuracy[0]:
                best_accuracy = (accuracy, i)
            ret['accuracy'].append((i, accuracy))
            #print('[%d] accuracy: %.3g' % (i, accuracy))
    print('Best accuracy %.3g at iteration %d.' % best_accuracy)
    return ret
Esempio n. 2
0
train_acc_list = []
test_acc_list = []

iters_num = 10000
batch_size = 100
train_size = x_train.shape[0]
iter_per_epoch = max(train_size / batch_size, 1)

net = ConvNet()
optim = SGD(net.params, lr=0.1, momentum=0.9)
# optim = AdaGrad(net.params)
# optim = Adam(net.params)

for i in range(iters_num):
    batch_mask = np.random.choice(train_size, batch_size)
    x_batch = x_train[batch_mask]
    t_batch = t_train[batch_mask]

    grad = net.gradient(x_batch, t_batch)
    net.params = optim.update(net.params, grad)

    loss = net.loss(x_batch, t_batch)
    train_loss_list.append(loss)

    if i % iter_per_epoch == 0:
        train_acc = net.accuracy(x_train, t_train)
        test_acc = net.accuracy(x_test, t_test)
        train_acc_list.append(train_acc)
        test_acc_list.append(test_acc)
        print("train acc, test acc | " + str(train_acc) + ", " + str(test_acc))