Ejemplo n.º 1
0
def train(adj, input_feats, target_labels, config, weights=None):
    num_vertices = adj.shape[0]
    label_kind = np.max(target_labels) + 1
    feat_dim = input_feats.shape[-1]
    layer_config = (feat_dim, config['hidden_dim'], label_kind)

    model, loss = GCN("GCN", adj, weights, layer_config)
    # model, loss = MLP("MLP", weights, layer_config)

    # Construct masks for training and testing
    train_size = int(num_vertices * config['train_portion'])
    train_mask = np.zeros(target_labels.shape, dtype=bool)
    train_mask[:train_size] = True
    np.random.shuffle(train_mask)
    test_mask = ~train_mask

    for epoch in range(config['max_epoch']):
        LOG_INFO('Training @ %d epoch...' % (epoch))
        train_net(model, loss, config, input_feats,
                  target_labels, train_mask, label_kind)

        if (epoch + 1) % config['test_epoch'] == 0:
            LOG_INFO('Testing @ %d epoch...' % (epoch))
            test_net(model, loss, input_feats,
                     target_labels, test_mask, label_kind)

        if (epoch + 1) % 50 == 0:
            config['learning_rate'] *= 0.5
Ejemplo n.º 2
0
def main():
    test_loss_list = []
    test_acc_list = []
    train_loss_list = []
    train_acc_list = []

    args = get_parser()

    model, config, loss = get_model(args)

    
    starttime = datetime.datetime.now()    
    for epoch in range(args.max_epoch):
        LOG_INFO('Training @ %d epoch...' % (epoch))
        train_loss, train_acc = train_net(model, loss, config, train_data, train_label, config['batch_size'], config['disp_freq'])
        train_loss_list.extend(train_loss)
        train_acc_list.extend(train_acc)
        if epoch > 5:
            config['learning_rate'] = config['learning_rate'] * args.learning_rate_decay
        if epoch % config['test_epoch'] == 0:
            LOG_INFO('Testing @ %d epoch...' % (epoch))
            test_loss, test_acc = test_net(model, loss, test_data, test_label, config['batch_size'])
            test_loss_list.append(test_loss)
            test_acc_list.append(test_acc)
    endtime = datetime.datetime.now()
    print ("total training time:",(endtime - starttime).seconds)

    save(args, train_loss_list, train_acc_list, test_loss_list, test_acc_list)
Ejemplo n.º 3
0
def train_and_save(model, loss, train_data, test_data, train_label,
                   test_label):
    best_test_loss = -1
    update_round_before = 0
    train_loss_list = []
    train_acc_list = []
    test_loss_list = []
    test_acc_list = []
    epoch_final = 0
    for epoch in range(config['max_epoch']):
        LOG_INFO('Training @ %d epoch...' % (epoch))
        train_loss_now, train_acc_now = train_net(model, loss, config,
                                                  train_data, train_label,
                                                  config['batch_size'],
                                                  config['disp_freq'])
        train_loss_list.append(train_loss_now)
        train_acc_list.append(train_acc_now)
        if epoch % config['test_epoch'] == 0:
            LOG_INFO('Testing @ %d epoch...' % (epoch))
            test_loss_now, test_acc_now = test_net(model, loss, test_data,
                                                   test_label,
                                                   config['batch_size'])
            test_loss_list.append(test_loss_now)
            test_acc_list.append(test_acc_now)
            if best_test_loss == -1:
                update_round_before = 0
                best_test_loss = test_loss_now
            elif test_loss_now <= best_test_loss:
                update_round_before = 0
                best_test_loss = test_loss_now
            else:
                update_round_before += 1
                if update_round_before >= 5:
                    epoch_final = epoch + 1
                    break
    save_dir = os.path.join('result', config['name'])
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    result_dict = {
        "train_loss": train_loss_list,
        "train_acc": train_acc_list,
        "test_loss": test_loss_list,
        "test_acc": test_acc_list
    }
    with open(os.path.join(save_dir, "result.json"), 'w') as f:
        json.dump(result_dict, f)
    x = range(epoch_final)
    plt.cla()
    plt.plot(x, train_loss_list, label="train loss")
    plt.plot(x, test_loss_list, label="test loss")
    plt.legend()
    plt.savefig(os.path.join(save_dir, "loss.png"))
    plt.cla()
    plt.plot(x, train_acc_list, label="train acc")
    plt.plot(x, test_acc_list, label="test acc")
    plt.legend()
    plt.savefig(os.path.join(save_dir, "acc.png"))
Ejemplo n.º 4
0
def run(net_func, save_loss_path, save_acc_path, result_dir="result/"):
    model, config = net_func()
    loss_, acc_ = [], []

    for epoch in range(config['max_epoch']):
        LOG_INFO('Training @ %d epoch...' % (epoch))
        a, b = train_net(model, loss, config, train_data, train_label,
                         config['batch_size'], config['disp_freq'])
        loss_ += a
        acc_ += b
        if epoch % config['test_epoch'] == 0:
            LOG_INFO('Testing @ %d epoch...' % (epoch))
            test_net(model, loss, test_data, test_label, config['batch_size'])
    test_net(model, loss, test_data, test_label, config['batch_size'])

    if not os.path.exists(result_dir):
        os.mkdir(result_dir)
    np.save(result_dir + save_loss_path, loss_)
    np.save(result_dir + save_acc_path, acc_)
def train(model, loss, train_data, test_data, train_label, test_label):
    best_test_loss = -1
    update_round_before = 0
    for epoch in range(config['max_epoch']):
        LOG_INFO('Training @ %d epoch...' % (epoch))
        train_loss_now, train_acc_now = train_net(model, loss, config,
                                                  train_data, train_label,
                                                  config['batch_size'],
                                                  config['disp_freq'])
        if epoch % config['test_epoch'] == 0:
            LOG_INFO('Testing @ %d epoch...' % (epoch))
            test_loss_now, test_acc_now = test_net(model, loss, test_data,
                                                   test_label,
                                                   config['batch_size'])
            if best_test_loss == -1:
                update_round_before = 0
                best_test_loss = test_loss_now
            elif test_loss_now <= best_test_loss:
                update_round_before = 0
                best_test_loss = test_loss_now
            else:
                update_round_before += 1
                if update_round_before >= 5:
                    break
Ejemplo n.º 6
0
draw_graph = True
loss_vals_train = []
loss_vals_test = []

for epoch in range(config['max_epoch']):
    LOG_INFO('Training @ %d epoch...' % (epoch))
    loss_vals = train_net(model, loss, config, train_data, train_label,
                          config['batch_size'], config['disp_freq'],
                          draw_graph)
    loss_vals_train = list(np.concatenate((loss_vals_train, loss_vals)))

    draw_graph = False

    LOG_INFO('Testing @ %d epoch...' % (epoch))
    loss_mean, acc_mean = test_net(model, loss, test_data, test_label,
                                   config['batch_size'])
    loss_vals_test.append(loss_mean)

# Plot graphs

plt.yticks(np.arange(0, 1, 0.1))  # Must change accordingly

fig = plt.figure()
plt.plot(range(1, len(loss_vals_train) + 1), loss_vals_train)
plt.xlabel('Visualised Iterations')
plt.ylabel('Loss')
fig.savefig('plots/train_' + usedNN + '_' + usedActivation + '_' + usedLoss +
            '.png')

fig = plt.figure()
plt.plot(range(1, config['max_epoch'] + 1), loss_vals_test)
Ejemplo n.º 7
0
start = datetime.now()
display_start = str(start).split(' ')[1][:-3]
log_list = []
current_iter_count = 0


# try
if use_parameters:
    parameters = np.load('parameters.npz')
    model.layer_list[0].W = parameters['conv1_w']
    model.layer_list[0].b = parameters['conv1_b']
    model.layer_list[3].W = parameters['conv2_w']
    model.layer_list[3].b = parameters['conv2_b']
    model.layer_list[-1].W = parameters['fc3_w']
    model.layer_list[-1].b = parameters['fc3_b']
    acc_value = test_net(model, loss, test_data, test_label, config['batch_size'])
    print('acc = ' + str(acc_value))


record_inputs, record_labels = data_iterator(train_data, train_label, config['batch_size']).next()
indices = []
labels_set = set()
for i in range(len(record_labels)):
    if record_labels[i] not in labels_set and len(indices) < img_record_num:
        labels_set.add(record_labels[i])
        indices.append(i)
    elif len(indices) == img_record_num:
        break


for epoch in range(config['max_epoch']):
Ejemplo n.º 8
0
config = {
    'learning_rate': 1e-2,
    'weight_decay': 1e-4,
    'momentum': 0.9,
    'batch_size': 100,
    'max_epoch': 300,
    'disp_freq': 50,
    'test_epoch': 200
}

loss_iters = []
acc_iters = []

train_loss = []
train_acc = []
for epoch in range(config['max_epoch']):
    LOG_INFO('Training @ %d epoch...' % (epoch))
    train_net(model, loss, config, train_data, train_label,
              config['batch_size'], config['disp_freq'], train_loss, train_acc)

    if epoch % config['test_epoch'] == 0:
        LOG_INFO('Testing @ %d epoch...' % (epoch))
        test_net(model, loss, test_data, test_label, config['batch_size'],
                 loss_iters, acc_iters)

print(loss_iters)
print(acc_iters)
print(train_loss)
print(train_acc)
}
loss_ = []
acc_ = []
test_loss = np.zeros(2) + 100
for epoch in range(config['max_epoch']):
    vis = getvis(model, test_data, test_label)
    for i in range(4):
        vis_square(vis[i], i)
    vis_square(
        model.layer_list[0].W.reshape(-1, model.layer_list[0].W.shape[2],
                                      model.layer_list[0].W.shape[3]), -1)
    LOG_INFO('Training @ %d epoch...' % (epoch))
    a, b = train_net(model, loss, config, train_data, train_label,
                     config['batch_size'], config['disp_freq'], 600000)
    loss_.append(a)
    acc_.append(b)
    if epoch % config['test_epoch'] == 0:
        LOG_INFO('Testing @ %d epoch...' % (epoch))
        test_ = test_net(model, loss, test_data, test_label, 100)
        if epoch % 10 == 0 and epoch > 0:
            # if test_loss.min() < test_ and abs(test_loss.max() - test_loss.min()) / test_loss.min() < 0.01 or epoch % 7 == 0:
            config['learning_rate'] = max(config['learning_rate'] / 2, 1e-6)
            print('lr: ', config['learning_rate'])
        test_loss[1:] = test_loss[:-1].copy()
        test_loss[0] = test_
vis = getvis(model, test_data, test_label)
for i in range(4):
    vis_square(vis[i], i)
np.save('loss', loss_)
np.save('acc', acc_)
Ejemplo n.º 10
0
def start(settings):
    start_time = time.time()
    model = settings['model']
    loss = settings['loss']
    config = settings['config']
    stop_time = config['stop_time']
    if stop_time > 0:
        valid_data = train_data[50000:]
        valid_label = train_label[50000:]
        new_train_data = train_data[:50000]
        new_train_label = train_label[:50000]
    highest_acc = 0
    times = 0
    best_model = None
    acc_list = list()
    loss_list = list()
    epoch_list = list()
    time_list = list()

    # test before training
    if stop_time <= 0:
        acc, m_loss = test_net(model, loss, test_data, test_label,
                               config['batch_size'])
        acc_list.append(acc)
        loss_list.append(m_loss)
        epoch_list.append(0)
        time_list.append(time.time() - start_time)
    for epoch in range(config['max_epoch']):
        LOG_INFO('Training @ %d epoch...' % epoch)
        if stop_time > 0:
            train_net(model, loss, config, new_train_data, new_train_label,
                      config['batch_size'], config['disp_freq'])
        else:
            train_net(model, loss, config, train_data, train_label,
                      config['batch_size'], config['disp_freq'])

        if (epoch + 1) % config['test_epoch'] == 0 or epoch < 10:
            LOG_INFO('Testing @ %d epoch...' % epoch)
            if stop_time > 0:
                acc, m_loss = test_net(model, loss, valid_data, valid_label,
                                       config['batch_size'])
            else:
                acc, m_loss = test_net(model, loss, test_data, test_label,
                                       config['batch_size'])
            acc_list.append(acc)
            loss_list.append(m_loss)
            epoch_list.append(epoch + 1)
            time_list.append(time.time() - start_time)
            if stop_time > 0:
                if highest_acc <= acc:
                    highest_acc = acc
                    times = 0
                    best_model = copy_model(model)
                else:
                    times += 1
                    if times >= config['stop_time']:
                        break

    if stop_time > 0:
        model = best_model
    final_acc, final_loss = test_net(model, loss, test_data, test_label,
                                     config['batch_size'])
    end_time = time.time()
    LOG_INFO("Final acc %.4f" % final_acc)
    LOG_INFO("Time used: %d s" % (end_time - start_time))
    x = np.array(epoch_list)
    ya = np.array(acc_list)
    yl = np.array(loss_list)
    t = np.array(time_list)
    return [final_acc, end_time - start_time, x, ya, yl, t]
Ejemplo n.º 11
0
plot_visual = []

start_time = time.time()

for epoch in range(config['max_epoch']):
    LOG_INFO('Training @ %d epoch...' % (epoch))
    train_loss, train_acc = train_net(model, loss, config, train_data,
                                      train_label, config['batch_size'],
                                      config['disp_freq'])
    # save loss & accuracy data for training epoch
    train_loss_list.append(train_loss)
    train_acc_list.append(train_acc)

    LOG_INFO('Testing @ %d epoch...' % (epoch))
    test_loss, test_acc, visual = test_net(model, loss, test_data, test_label,
                                           config['batch_size'], epoch,
                                           config['layer_vis'])
    # save loss & accuracy data for test epoch
    test_loss_list.append(test_loss)
    test_acc_list.append(test_acc)
    # plot visual during testing of the last epoch
    if epoch == (config['max_epoch'] - 1):
        plot_visual = visual[0:25, :, :]

end_time = time.time()

plot_loss_acc(config['max_epoch'],
              train_loss_list,
              test_loss_list,
              model.name,
              loss.name,
Ejemplo n.º 12
0
acc_list = []

start = time.time()

for epoch in range(config['max_epoch']):
    LOG_INFO('Training @ %d epoch...' % (epoch))
    loss_epoch, acc_epoch = train_net(model, loss, config, train_data,
                                      train_label, config['batch_size'],
                                      config['disp_freq'])

    loss_list += loss_epoch
    acc_list += acc_epoch

    if epoch % config['test_epoch'] == 0:
        LOG_INFO('Testing @ %d epoch...' % (epoch))
        test_loss, test_acc = test_net(model, loss, test_data, test_label,
                                       config['batch_size'])

last_loss, last_acc = test_net(model, loss, test_data, test_label,
                               config['batch_size'])

elapsed = time.time() - start

print("time elapsed {:.0f}, loss {:.4f}, acc {:.4f}".format(
    elapsed, last_loss, last_acc))

# save results
num_iter = len(loss_list)
with SummaryWriter('result_{}'.format(output_dir)) as tb_writer:
    for step in range(num_iter):
        tb_writer.add_scalar('train/loss', loss_list[step], global_step=step)
        tb_writer.add_scalar('train/acc', acc_list[step], global_step=step)
Ejemplo n.º 13
0
# defining config message...
config = {
    'learning_rate': 0.0000650,
    'weight_decay': 0.8820,
    'momentum': 0.6709,
    'batch_size': 100,
    'max_epoch': 1,
    'disp_freq': 100
}

# training...
train_losses = []
train_acc = []
train_counter = []

for epoch in range(config['max_epoch']):
    LOG_INFO('Training @ %d epoch...' % (epoch))
    train_net(model, loss, config, train_data, train_label,
              config['batch_size'], config['disp_freq'], epoch, train_losses,
              train_acc, train_counter)
# ploting...
plot(0, train_losses, train_acc, train_counter)

# testing...
LOG_INFO('Testing...')
file = open('loss_info/loss.txt', 'a')
file.write('\n====================')
info = [1]
test_net(model, loss, test_data, test_label, 10000, info)
Ejemplo n.º 14
0
}

start_time = datetime.now()
writer = SummaryWriter(comment=f'-{args.layers}_{args.loss}_{args.activation}')

# step for tensorboard
train_step, test_step = 0, 0

for epoch in range(config['max_epoch']):
    LOG_INFO('Training @ %d epoch...' % (epoch))
    losses, accuracies = train_net(model, loss, config, train_data,
                                   train_label, config['batch_size'],
                                   config['disp_freq'])
    for loss_val, accuracy in zip(losses, accuracies):
        writer.add_scalar("Loss/train", loss_val, train_step)
        writer.add_scalar("Accuracy/train", accuracy, train_step)
        train_step += 1

    if epoch % config['test_epoch'] == 0:
        LOG_INFO('Testing @ %d epoch...' % (epoch))
        loss_val, accuracy = test_net(model, loss, test_data, test_label,
                                      config['batch_size'])
        writer.add_scalar("Loss/test", loss_val, test_step)
        writer.add_scalar("Accuracy/test", accuracy, test_step)
        test_step += 1

end_time = datetime.now()
writer.add_text("time", f"{(end_time-start_time).microseconds} ms")
writer.flush()
writer.close()
Ejemplo n.º 15
0
}

loss_plot_list = []
acc_plot_list = []
iter_plot_list = []
acc_test_plot_list = []

for epoch in range(config['max_epoch']):
    LOG_INFO('Training @ %d epoch...' % (epoch))
    train_net(model, loss, config, train_data, train_label,
              config['batch_size'], config['disp_freq'], loss_plot_list,
              acc_plot_list, iter_plot_list)

    if epoch % config['test_epoch'] == 0:
        LOG_INFO('Testing @ %d epoch...' % (epoch))
        test_net(model, loss, test_data, test_label, config['batch_size'],
                 acc_test_plot_list)

with open("acc_" + filename + ".txt", 'w') as json_file:
    json_file.write(json.dumps(acc_test_plot_list, indent=4))
with open("loss_" + filename + ".txt", 'w') as json_file:
    json_file.write(json.dumps(loss_plot_list, indent=4))

#
# for i in range(10):
#     for input, label in data_iterator(train_data, train_label, 10000):
#         number = label == i
#         label = label[number][:100]
#         input = input[number][:100]
#         target = onehot_encoding(label, 10)
#         output = model.layer_list[4].forward(input)
#         vis_square(output[:,0,:,:])
Ejemplo n.º 16
0
# You should adjust these hyperparameters
# NOTE: one iteration means model forward-backwards one batch of samples.
#       one epoch means model has gone through all the training samples.
#       'disp_freq' denotes number of iterations in one epoch to display information.

config = {
    'learning_rate': 0.01,
    'weight_decay': 0,
    'momentum': 0.7,
    'batch_size': 100,
    'max_epoch': 300,
    'disp_freq': 5,
    'test_epoch': 2
}
lo_list=[]
ac_list=[]
categories=[1,2,3,4]
for epoch in range(config['max_epoch']):
    LOG_INFO('Training @ %d epoch...' % (epoch))
    train_net(model, loss, config, train_data, train_label, config['batch_size'], config['disp_freq'])

    if epoch % config['test_epoch'] == 0:
        LOG_INFO('Testing @ %d epoch...' % (epoch))
        lo,ac=test_net(model, loss, test_data, test_label, config['batch_size'])
        lo_list.append(lo)
        ac_list.append(ac)


show4category(model, test_data, test_label, categories)
show(lo_list,ac_list,config['max_epoch'],config['test_epoch'])
Ejemplo n.º 17
0
config = {
    'learning_rate': 0.12,
    'weight_decay': 0,
    'momentum': 0.78,
    'batch_size': 40,
    'max_epoch': 30,
    'disp_freq': 10,
    'test_epoch': 1
}


train_log_data = np.array(['']).astype('S')
test_log_data = np.array(['']).astype('S')
for epoch in range(config['max_epoch']):
    LOG_INFO('Training @ %d epoch...' % (epoch))
    train_net(model, loss, config, train_data, train_label, config['batch_size'], config['disp_freq'])
    log,output_1 = train_net(model, loss, config, train_data, train_label, config['batch_size'], config['disp_freq'])
    train_log_data = np.concatenate((train_log_data,log))
    if epoch % config['test_epoch'] == 0:
        LOG_INFO('Testing @ %d epoch...' % (epoch))
        test_net(model, loss, test_data, test_label, config['batch_size'])
        log_,output_2 = test_net(model, loss, test_data, test_label, config['batch_size'])
        test_log_data = np.concatenate((test_log_data,log_))

np.savetxt('train_log',train_log_data,fmt='%s')
np.savetxt('test_log',test_log_data,fmt='%s')
with h5py.File('image') as f:
    f.create_dataset('train_output',data = output_1)
    f.create_dataset('test_output',data = output_2)