Esempio n. 1
0
import NN, data_loader, perceptron

#training_data, test_data = data_loader.load_circle_data()
training_data, test_data = data_loader.load_mnist_data()

#domain = circles
domain = 'mnist'
batch_size = 10
learning_rate = 0.1
activation_function = 'tanh'
hidden_layer_width = 5
data_dim = len(training_data[0][0])

net = NN.create_NN(domain, batch_size, learning_rate, activation_function,
                   hidden_layer_width)
print net.train(training_data)
print net.evaluate(test_data)

net = NN.create_NN(domain, batch_size, learning_rate, activation_function,
                   hidden_layer_width)
print net.train_with_learning_curve(training_data)
print net.evaluate(test_data)

perc = perceptron.Perceptron(data_dim)
print perc.train(training_data)
print perc.evaluate(test_data)

perc = perceptron.Perceptron(data_dim)
print perc.train_with_learning_curve(training_data)
print perc.evaluate(test_data)
Esempio n. 2
0
        batch_size=64,
        test_batch_size=1000,
        epochs=10,
        lr=0.01,
        momentum=0.5,
        no_cuda=True,
        seed=1,
        log_interval=10,
    ):
        self.batch_size = batch_size
        self.test_batch_size = test_batch_size
        self.epochs = epochs
        self.lr = lr
        self.momentum = momentum
        self.no_cuda = no_cuda
        self.seed = seed
        self.log_interval = log_interval


if __name__ == '__main__':
    init_data = InitDataSet()

    train_loader, test_loader = load_mnist_data(init_data)
    tests.load_test(train_loader)
    tests.load_test(test_loader)

    nn = NeuralNetwork(init_data)
    nn.train(train_loader)
    # tests.nn_test(nn)
    # nn.train()
import NN, data_loader, perceptron
import numpy as np

data_dict = {
    'circles': {
        'training_data': None,
        'test_data': None
    },
    'mnist': {
        'training_data': None,
        'test_data': None
    }
}
(data_dict['circles']['training_data'], data_dict['circles']['test_data']) = data_loader.load_circle_data()
(data_dict['mnist']['training_data'], data_dict['mnist']['test_data']) = data_loader.load_mnist_data()

# Parameters to tune
domains = ['circles', 'mnist']
batch_sizes = [10, 50, 100]
learning_rates = [0.1, 0.01]
activation_functions = ['relu', 'tanh']
hidden_layer_widths = [10, 50]

for domain in domains:
    print '====================', domain, '===================='
    training_data = data_dict[domain]['training_data']
    np.random.shuffle(training_data)
    folder_length = len(training_data) / 5 + 1
    foldered_training_data = [training_data[x:x+folder_length] for x in range(0, len(training_data), folder_length)]

    max_accuracy = 0
Esempio n. 4
0
def vime_main(label_data_rate, model_sets, label_no, p_m, alpha, K, beta):
    """VIME Main function.
  
  Args:
    - model_sets: supervised model sets
    - label_no: number of labeled data to be used
    - p_m: corruption probability
    - alpha: hyper-parameter to control two self-supervied loss
    - K: number of augmented data
    - beta: hyper-parameter to control two semi-supervied loss
    
  Returns:
    - results: performances of supervised, VIME-self and VIME-semi performance
  """

    # Define outputs
    results = np.zeros([len(model_sets) + 2])

    # Load data
    x_train, y_train, x_unlab, x_test, y_test = load_mnist_data(
        label_data_rate)

    # Use subset of labeled data
    x_train = x_train[:label_no, :]
    y_train = y_train[:label_no, :]

    # Metric
    metric = 'acc'

    # Train supervised models
    for m_it in range(len(model_sets)):
        model_name = model_sets[m_it]
        results[m_it] = supervised_model_training(x_train, y_train, x_test,
                                                  y_test, model_name, metric)

    # Train VIME-Self
    vime_self_parameters = dict()
    vime_self_parameters['batch_size'] = 128
    vime_self_parameters['epochs'] = 10
    vime_self_encoder = vime_self(x_unlab, p_m, alpha, vime_self_parameters)

    # Save encoder
    if not os.path.exists('save_model'):
        os.makedirs('save_model')

    file_name = './save_model/encoder_model.h5'
    vime_self_encoder.save(file_name)

    # Test VIME-Self
    x_train_hat = vime_self_encoder.predict(x_train)
    x_test_hat = vime_self_encoder.predict(x_test)

    model_name = 'mlp'
    results[len(model_sets)] = supervised_model_training(
        x_train_hat, y_train, x_test_hat, y_test, model_name, metric)

    # Train VIME-Semi
    vime_semi_parameters = dict()
    vime_semi_parameters['hidden_dim'] = 100
    vime_semi_parameters['batch_size'] = 128
    vime_semi_parameters['iterations'] = 1000
    y_test_hat = vime_semi(x_train, y_train, x_unlab, x_test,
                           vime_semi_parameters, p_m, K, beta, file_name)

    # Test VIME-Semi
    results[len(model_sets) + 1] = perf_metric(metric, y_test, y_test_hat)

    # Print the results for each iteration
    print(np.round(results, 4))

    return results