Exemple #1
0
def classify(train_dataset, train_labels, valid_dataset, valid_labels,
             test_dataset, test_labels):
    num_samples, feature_size = train_dataset.shape
    num_labels = train_labels.shape[1]
    batch_size = 80
    keep_prob = 0.90
    beta = 0.0000
    weights = [1.0, 100.0]
    num_epochs = [60]
    init_lrs = [0.001]
    hidden_layer_sizes = [
        [400],
        # [800, 640], [160, 80], [80, 40],
        # [400, 360, 320],
        # [160, 120, 80], [120, 80, 40],
    ]
    for hidden_layer_size in hidden_layer_sizes:
        for init_lr in init_lrs:
            for num_epoch in num_epochs:
                num_steps = int(train_dataset.shape[0] / batch_size *
                                num_epoch)
                decay_steps = num_steps / num_epoch
                mp_classifier = MultilayerPerceptron(feature_size,
                                                     hidden_layer_size,
                                                     num_labels,
                                                     init_lr,
                                                     decay_steps,
                                                     beta,
                                                     tf.nn.relu,
                                                     tf.nn.l2_loss,
                                                     weights,
                                                     tf.train.AdamOptimizer,
                                                     name='GAN-MLP-UNSW')
                mp_classifier.train_with_labels(train_dataset, train_labels,
                                                batch_size, num_steps,
                                                valid_dataset, valid_labels,
                                                test_dataset, test_labels,
                                                keep_prob)
                hyperparameter = {
                    'hidden_layer_size': hidden_layer_size,
                    'init_lr': init_lr,
                    'num_epochs': num_epoch,
                    'num_steps': num_steps,
                    'regularization beta': beta,
                    'optimizer': 'AdamOptimizer',
                    'keep_prob': keep_prob,
                    'act_func': 'RELU',
                    'class_weights': weights,
                    'batch_size': batch_size,
                }
                hyperparameter_summary(mp_classifier.dirname, hyperparameter)
                f = open(mp_classifier.dirname + '/test.log')
                print(f.read())
                f.close()
                mp_classifier.exit()
Exemple #2
0
            decay_steps = num_steps // num_epoch
            mp_classifier = MultilayerPerceptron(feature_size,
                                                 hidden_layer_size,
                                                 num_labels, init_lr,
                                                 decay_steps, beta,
                                                 tf.nn.relu,
                                                 tf.nn.l2_loss, weights,
                                                 tf.train.AdamOptimizer,
                                                 name='PureMLP-UNSW2C')
            mp_classifier.train_with_labels(train_dataset, train_labels,
                                            batch_size, num_steps,
                                            valid_dataset, valid_labels,
                                            test_dataset, test_labels,
                                            keep_prob)
            hyperparameter = {'hidden_layer_size': hidden_layer_size,
                              'init_lr': init_lr,
                              'num_epochs': num_epoch,
                              'num_steps': num_steps,
                              'regularization beta': beta,
                              'optimizer': 'AdamOptimizer',
                              'keep_prob': keep_prob,
                              'act_func': 'RELU',
                              'class_weights': weights,
                              'batch_size': batch_size, }
            hyperparameter_summary(mp_classifier.dirname,
                                   hyperparameter)
            f = open(mp_classifier.dirname + '/test.log')
            print(f.read())
            f.close()
            mp_classifier.exit()
Exemple #3
0
                                 trans_func=tf.nn.sigmoid,
                                 num_labels=2,
                                 name=encoder_name)
rbm.train_with_labels(train_dataset, train_labels, int(num_steps),
                      valid_dataset, rbm_lr)
test_loss = rbm.calc_reconstruct_loss(test_dataset)
print("Testset reconstruction error: %f" % test_loss)
hyperparameter = {
    '#hidden units': num_hidden_rbm,
    'init_lr': rbm_lr,
    'num_epochs': num_epoch,
    'num_steps': num_steps,
    'act_func': 'sigmoid',
    'batch_size': batch_size,
}
hyperparameter_summary(rbm.dirname, hyperparameter)

hrand = np.random.random((train_dataset.shape[0], num_hidden_rbm))
rbm_train_dataset = rbm.encode_dataset(train_dataset, hrand)
print('Encoded training set', rbm_train_dataset.shape)
hrand = np.random.random((valid_dataset.shape[0], num_hidden_rbm))
rbm_valid_dataset = rbm.encode_dataset(valid_dataset, hrand)
print('Encoded valid set', rbm_valid_dataset.shape)
hrand = np.random.random((test_dataset.shape[0], num_hidden_rbm))
rbm_test_dataset = rbm.encode_dataset(test_dataset, hrand)
print('Encoded test set', rbm_test_dataset.shape)

maybe_npsave('trainset.rbm', rbm_train_dataset, True)
maybe_npsave('validset.rbm', rbm_valid_dataset, True)
maybe_npsave('testset.rbm', rbm_test_dataset, True)