Example #1
0
def train_model(model, sess, writer, x, num_epoch, batch_size=100, lr=0.001):
    iteration_per_epoch = int(math.floor(x.shape[0] / batch_size))
    for epoch in range(num_epoch):
        total_loss = 0
        x = shuffle_data(x)
        for i in range(iteration_per_epoch):
            x_batch = x[i*batch_size:(i+1)*batch_size, :]
            batch_loss = model.partial_train(sess, writer, x_batch, lr)
            total_loss += batch_loss
        total_loss /= iteration_per_epoch
        print('Epoch = {0}, loss = {1}.'.format(epoch, total_loss))
Example #2
0
def train_model(config_name, model_name, num_epoch=100, batch_size=16, lr=0.0001):
    x_train, y_train, x_test, y_test = make_cifar10_dataset()

    margs = config(config_name)
    num_block = margs['num_block']
    num_layer_per_block = margs['num_layer_per_block']
    num_filter = margs['num_filter']
    fc_dim = margs['fc_dim']
    latent_dim = margs['latent_dim']
    if model_name == 'ConvAe':
        model = ConvAe(num_block, num_layer_per_block, num_filter, fc_dim=fc_dim, latent_dim=latent_dim)
    elif model_name == 'ConvVae':
        model = ConvVae(num_block, num_layer_per_block, num_filter, fc_dim=fc_dim, latent_dim=latent_dim)
    else:
        print('No model named {0}.'.format(model_name))
        return

    with tf.Session() as sess:
        saver = tf.train.Saver()
        writer = tf.summary.FileWriter('graph', sess.graph)

        sess.run(tf.global_variables_initializer())    

        iteration_per_epoch = int(math.floor(x_train.shape[0] / batch_size))
        for epoch in range(num_epoch):
            shuffle_data(x_train)
            total_loss = 0
            for i in range(iteration_per_epoch):
                x_batch = x_train[i*batch_size:(i+1)*batch_size, :, :, :]
                loss, _, summary = sess.run([model.loss, model.optimizer, model.summary], feed_dict={model.x: x_batch, model.lr: lr})
                total_loss += loss
                writer.add_summary(summary, model.global_step.eval(sess))
            total_loss /= iteration_per_epoch
            print('Epoch = {0}, loss = {1}.'.format(epoch, total_loss))

        saver.save(sess, 'model/model.ckpt')
Example #3
0
def train_model(model,
                x,
                x2,
                y,
                y2,
                sess,
                writer,
                num_epoch,
                flog,
                batch_size=100,
                lr=0.05):
    iteration_per_epoch = int(math.floor(x.shape[0] / batch_size))
    print('{0:8s}\t{1:8s}\t{2:8s}\t{3:8s}\t{4:8s}'.format(
        'Epoch', 'Lr', 'Loss', 'Acc1', 'Acc2'))
    flog.write('{0:8s}\t{1:8s}\t{2:8s}\t{3:8s}\t{4:8s}\n'.format(
        'Epoch', 'Lr', 'Loss', 'Acc1', 'Acc2'))
    for epoch in range(num_epoch):
        total_loss = 0
        x, y = shuffle_data(x, y)
        for i in range(iteration_per_epoch):
            batch_x = x[i * batch_size:(i + 1) * batch_size, :, :, :]
            batch_y = y[i * batch_size:(i + 1) * batch_size, :]
            batch_loss = model.partial_train(batch_x, batch_y, sess, writer,
                                             lr, i % 10 == 0)
            total_loss += batch_loss
        total_loss /= iteration_per_epoch
        train_accuracy = model.test(x[0:100, :, :, :], y[0:100, :], sess)
        test_accuracy = model.test(x2[0:500, :, :, :], y2[0:500, :], sess)
        print('{0:8d}\t{1:8.4f}\t{2:8.4f}\t{3:8.4f}\t{4:8.4f}'.format(
            epoch, lr, total_loss, train_accuracy, test_accuracy))
        flog.write('{0:8d}\t{1:8.4f}\t{2:8.4f}\t{3:8.4f}\t{4:8.4f}\n'.format(
            epoch, lr, total_loss, train_accuracy, test_accuracy))
        if epoch % 30 == 29:
            lr *= 0.5
            train_accuracy = test_model(model, x, y, sess)
            test_accuracy = test_model(model, x2, y2, sess)
            print('{0:8s}\t{1:8s}\t{2:8s}\t{3:8.4f}\t{4:8.4f}'.format(
                '--', '--', '--', train_accuracy, test_accuracy))
            flog.write('{0:8s}\t{1:8s}\t{2:8s}\t{3:8.4f}\t{4:8.4f}\n'.format(
                '--', '--', '--', train_accuracy, test_accuracy))
Example #4
0
def train_model(model,
                sess,
                writer,
                x,
                y,
                num_epoch,
                batch_size=100,
                lr=0.001):
    iteration_per_epoch = int(math.floor(x.shape[0] / batch_size))
    for epoch in range(num_epoch):
        x, y = shuffle_data(x, y)
        total_loss = 0
        for i in range(iteration_per_epoch):
            x_batch = x[i * batch_size:(i + 1) * batch_size, :]
            y_batch = y[i * batch_size:(i + 1) * batch_size, :]
            batch_loss = model.partial_train(x_batch, y_batch, lr, sess,
                                             writer)
            total_loss += batch_loss
        total_loss /= iteration_per_epoch
        accuracy = model.test_batch(x[0:batch_size, :], y[0:batch_size, :],
                                    sess)
        print('Epoch = {0}, loss = {1}, accuracy = {2}.'.format(
            epoch, total_loss, accuracy))
Example #5
0
def train_model(model,
                sess,
                writer,
                x,
                num_epoch,
                lr,
                batch_size=32,
                policy_type=constant_beta):
    iteration_per_epoch = int(math.floor(x.shape[0] / batch_size))
    total_iter = 0
    for epoch in range(num_epoch):
        x = shuffle_data(x)
        beta = policy_type(epoch, num_epoch)
        total_loss = 0
        for i in range(iteration_per_epoch):
            x_batch = x[i * batch_size:(i + 1) * batch_size, :, :, :]
            total_iter += 1
            batch_loss = model.partial_train(x_batch, lr, sess, writer,
                                             total_iter % 10 == 0, beta)
            total_loss += batch_loss
        total_loss /= iteration_per_epoch
        print('Epoch = {0}, beta = {1}, lr = {2}, loss = {3}.'.format(
            epoch, beta, lr, total_loss))
Example #6
0
def process(options):
    # fix the random seed for reproducability
    np.random.seed(options.seed)

    # load the data
    trainX, trainY, valX, valY, testX, testY = load_dataset(options.data_path)
    trainX = np.transpose(trainX, [0, 2, 1])
    valX = np.transpose(valX, [0, 2, 1])
    testX = np.transpose(testX, [0, 2, 1])

    # get meta data
    if options.get_data_shapes:
        options.timesteps = trainX.shape[2]
        options.channels = trainX.shape[1]
        options.classes = len(np.unique(trainY))
        print('Found %s timesteps with %s channel(s) and %s classes' %
              (options.timesteps, options.channels, options.classes))

    # shuffle the data
    if options.shuffle:
        trainX, trainY, valX, valY, testX, testY, perm_list = shuffle_data(
            trainX, trainY, valX, valY, testX, testY)
        np.save(options.train_dir + "/shuffle.npy", perm_list)

    # shrink data
    if options.shrink_data < 100:
        train_shrink = int(len(trainY) * options.shrink_data / 100)
        val_shrink = int(len(valY) * options.shrink_data / 100)
        test_shrink = int(len(testY) * options.shrink_data / 100)

        trainX = trainX[:train_shrink]
        valX = valX[:val_shrink]
        testX = testX[:test_shrink]
        trainY = trainY[:train_shrink]
        valY = valY[:val_shrink]
        testY = testY[:test_shrink]

    # create mislabels
    if options.mislabel:
        trainY, trainY_correct, mis_idx = create_mislabel(
            options.mislabel_perc, trainY)
        np.save(options.train_dir + "/mislabel.npy", mis_idx)

    # create mislabels for validation
    if options.mislabel_val:
        valY, valY_correct, mis_idx = create_mislabel(options.mislabel_perc,
                                                      valY)
        np.save(options.train_dir + "/mislabel_val.npy", mis_idx)

    # remove based on importance value
    if options.remove_low or options.remove_high:
        if options.mislabel:
            trainX, trainY, remove_list, corrected = remove_importance(
                options, trainX, trainY, mis_idx)
            np.save(options.train_dir + "/removed_correction.npy", corrected)
        else:
            trainX, trainY, remove_list, _ = remove_importance(
                options, trainX, trainY, None)
        np.save(options.train_dir + "/removement_list.npy", remove_list)

    # manual correct data
    if options.manual_correction > 0 and options.mislabel:
        correction_list = create_correction_set(options, trainY)
        np.save(options.train_dir + "/correction_list.npy", correction_list)
        trainY, corrected = correct_mislabel(trainY, trainY_correct,
                                             correction_list)
        np.save(options.train_dir + "/manual_correction.npy", corrected)

    print('Train set:', str(trainX.shape))
    print('Val set:', str(valX.shape))
    print('Test set:', str(testX.shape))
    print('Classes:', str(options.classes))

    # Initialize the Train object
    net = Train(options.timesteps, options.channels, options.classes)
    net.build_graph()
    # Start the training session
    if options.train:
        net.train(options, trainX, trainY, valX, valY)
    # testing
    if options.test:
        perform_testing(options, net, trainX, trainY, valX, valY, testX, testY)
    # collect train loss
    if options.collect_train_loss:
        get_train_sample_losses(net, options, trainX, trainY)
    # influences calc
    if options.influences:
        influence_feeder, inspector, sess = set_up_influence_feeder(
            options, net, trainX, trainY, valX, valY, testX, testY)
        if options.compute_score:
            if options.each_class:
                compute_influence_each(options, influence_feeder, inspector,
                                       sess)
            else:
                influence_scores = compute_influence_example(
                    options, influence_feeder, inspector, sess)
                if options.show_influence:
                    show_most_influencing(influence_feeder.train_data,
                                          influence_feeder.train_label,
                                          influence_scores,
                                          options.show_number)
    # influence show
    if options.show_influence and not options.each_class:
        influence_scores = np.load(options.influence_file, allow_pickle=True)
        show_most_influencing(trainX, trainY, influence_scores,
                              options.show_number)
    # representer influence
    if options.compute_representer_influence:
        net.test(options, trainX, trainY)
        perform_representer_influence(options)
    # compare labels
    if options.predict:
        setX = trainX
        setY = trainY
        if options.predict_set == 1:
            setX = valX
            setY = valY
        if options.predict_set == 2:
            setX = testX
            setY = testY
        prediction_array, _, _, acc = net.test(
            options, setX[options.predict_start:options.predict_end],
            setY[options.predict_start:options.predict_end])
        compare_labels(setY[options.predict_start:options.predict_end],
                       prediction_array)
        print('Final accuracy: %s' % acc)