Esempio n. 1
0
def main():
    # Read config file
    cfg = util.read_config('config/digit.yaml')

    # Load digit data from dataset
    x_train, x_test, y_train, y_test = load_data(cfg['dataset'])
    x_train, y_train = util.shuffle_data(x_train, y_train)
    x_test, y_test = util.shuffle_data(x_test, y_test)

    # Default model name as loaded from file, overwritten if training
    model_name = cfg['nn']['model_name']
    model_dir = cfg['nn']['model_dir']

    with tf.Session() as sess:
        if cfg['nn']['train']:
            # Train network on our training data
            print('[ANN] Training new network...')
            model, model_name = train_network(sess, x_train, y_train, cfg)
        else:
            print('[ANN] Testing network {0}...'.format(model_name))
            model = util.load_model(
                os.path.join(model_dir, model_name + "_model"))

        # Test network on our testing data
        results = test_network(sess, model, x_test, y_test, cfg)

        # TODO: Tristan to reimplement analyse results to get confusion matrix and roc curve
        conf_mat = {}
        # conf_mat = util.analyse_results(y_test, results)
        util.store_results(conf_mat, os.path.join(model_dir,
                                                  model_name + "_cm"))
Esempio n. 2
0
def main():
    # Read config file
    cfg = util.read_config('config/mushroom.yaml')

    # Load mushroom data from dataset
    x_train, x_test, y_train, y_test = load_data(cfg['dataset'],
                                                 cfg['test_ratio_offset'])
    x_train, y_train = util.shuffle_data(x_train, y_train)
    x_test, y_test = util.shuffle_data(x_test, y_test)

    # Default model name as loaded from file, overwritten if training
    model_name = cfg['nn']['model_name']
    model_dir = cfg['nn']['model_dir']

    with tf.Session() as sess:
        if cfg['nn']['train']:
            # Train network on our training data
            print('[ANN] Training new network...')
            model, model_name, train_stats = train_network(
                sess, x_train, y_train, cfg)
        else:
            loaded_results = util.load_results(
                os.path.join(model_dir, model_name + "_cm"))
            # Setup our continous plot
            plt.title('Error vs Epoch')
            plt.plot(loaded_results['train_stats']['train_errors'],
                     color='r',
                     label='training')
            plt.plot(loaded_results['train_stats']['valid_errors'],
                     color='b',
                     label='validation')
            plt.xlabel('Epoch')
            plt.ylabel('Error')
            plt.legend()
            plt.grid()
            plt.show()

            print('[ANN] Testing network {0}...'.format(model_name))
            model = util.load_model(
                os.path.join(model_dir, model_name + "_model"))
            train_stats = loaded_results['train_stats']

        # Test network on our testing data
        results = test_network(sess, model, x_test, y_test, cfg)
        conf_mat, sk_fpr, sk_tpr, roc_auc = util.analyse_results(
            y_test, results)
        print('[ANN] ROC Area Under Curve: {0:.2f}'.format(roc_auc))
        plot_roc(sk_fpr, sk_tpr, roc_auc)
        results_to_save = {
            'conf_mat': conf_mat,
            'train_stats': train_stats,
            'roc_auc': float(roc_auc)
        }
        util.store_results(results_to_save,
                           os.path.join(model_dir, model_name + "_cm"))
Esempio n. 3
0
def main():
	cfg = util.read_config('config/digit.yaml')

	x_train, x_test, y_train, y_test = load_data(cfg['dataset'])
	x_train, y_train = util.shuffle_data(x_train, y_train)
	x_test, y_test   = util.shuffle_data(x_test, y_test)

	i=0
	y_test3 = []
	while i<len(y_test):
		y_test3.append(y_test[i][0])
		i += 1
	y_test3 = np.array(y_test3)

	model_name = cfg['svm']['model_name']
	model_dir = cfg['svm']['model_dir']

#	svm = train_svm_multi(x_train, y_train, cfg)
#	svm = train_svm(x_train, y_train, cfg)
#	y_test2 = test_svm(svm, x_test)
#	plot_confusion_matrix(y_test3, y_test2)

	train_test_svm_multi_roc_ver(x_train, y_train, x_test, y_test3, cfg)
Esempio n. 4
0
def load_data(data_file):
	with open(data_file, 'r') as csvfile:
		csvreader = csv.reader(csvfile, delimiter=',')
		x = []
		y = []

		for row in csvreader:
			x_conv = (list(map(float, row[:-1])))
			y_conv = ([float(row[-1])])
			x.append(x_conv)
			y.append(y_conv)
			#print(x_conv)
			#print(y_conv)
		#y=relabel(y)
		x, y = util.shuffle_data(x, y)
		x_train, x_test, y_train, y_test = util.split_data(x, y, 0.9)

		return x_train, x_test, y_train, y_test
	print('[ERR] Failed to load data from file \'{0}\''.format(data_file))
	exit()
Esempio n. 5
0
def load_data(data_file, test_ratio_offset):
    with open(data_file, 'r') as csvfile:
        csvreader = csv.reader(csvfile, delimiter=',')
        x = []
        y = []

        for row in csvreader:
            for i in range(0, len(row) - 1):
                if row[1 + i] == '?':
                    row[1 + i] = 'a'

            x_conv = list(map(float, map(ord, row[1:])))
            y_conv = [0. if row[0] == 'p' else 1.]
            # Remove question marks
            for u in x_conv:
                if u == 63.:
                    u = -1.
            x.append(x_conv)
            y.append(y_conv)

        split_ratio = 0.9

        x, y = util.shuffle_data(x, y)
        x_train, x_test, y_train, y_test = util.split_data(x, y, split_ratio)

        # Check that we can create a confusion matrix
        # 	(have at least 1 positive and negative sample in test set)

        while (len([result for result in y_test if result[0] == 0.]) <
               (0.5 - test_ratio_offset) * len(y_test)
               or len([result for result in y_test if result[0] == 1.]) <
               (0.5 - test_ratio_offset) * len(y_test)):
            x_train, x_test, y_train, y_test = util.split_data(
                x, y, split_ratio)

        return x_train, x_test, y_train, y_test
    print('[ERR] Failed to load data from file \'{0}\''.format(data_file))
    exit()
Esempio n. 6
0
def train_network(sess, x, y, cfg):
    # Alias our training config to reduce code
    t_cfg = cfg['nn']

    # Alias config vars to reduce code
    neurons = t_cfg['parameters']['neurons']
    epochs = t_cfg['parameters']['epochs']
    learning_rate = t_cfg['parameters']['learning_rate']
    err_thresh = t_cfg['error_threshold']
    model_dir = t_cfg['model_dir']
    avg_factor = t_cfg['avg_factor']
    save_epoch = t_cfg['save_epoch']
    valid_thresh = t_cfg['valid_threshold']

    print(
        '[ANN] \tTraining parameters: epochs={0}, learning_rate={1:.2f}, neurons={2}'
        .format(epochs, learning_rate, neurons))

    # Create validation set
    x_train, x_valid, y_train, y_valid = util.split_data(x, y, 0.9)
    x_valid, y_valid = util.shuffle_data(x_valid, y_valid)

    # Create placeholders for tensors
    x_ = tf.placeholder(tf.float32, [None, 22], name='x_placeholder')
    y_ = tf.placeholder(tf.float32, [None, 1], name='y_placeholder')

    # Generate new random weights for new network
    weights = {
        'fc1': tf.Variable(tf.random_normal([22, neurons]), name='w_fc1'),
        'fc2': tf.Variable(tf.random_normal([neurons, neurons]), name='w_fc2'),
        'fc3': tf.Variable(tf.random_normal([neurons, 1]), name='w_fc3'),
    }

    # Generate new random biases for new network
    biases = {
        'fc1': tf.Variable(tf.random_normal([neurons]), name='b_fc1'),
        'fc2': tf.Variable(tf.random_normal([neurons]), name='b_fc2'),
        'fc3': tf.Variable(tf.random_normal([1]), name='b_fc3'),
    }

    # Construct our network and return the last layer to output the result
    final_layer = construct_network(x_, weights, biases, neurons)

    # Define error function
    cost_train = tf.reduce_mean(
        tf.losses.mean_squared_error(labels=y_, predictions=final_layer))
    cost_valid = tf.reduce_mean(
        tf.losses.mean_squared_error(labels=y_, predictions=final_layer))

    # Define optimiser and minimise error function task
    optimiser_train = tf.train.GradientDescentOptimizer(
        learning_rate=learning_rate).minimize(cost_train)
    optimiser_valid = tf.train.GradientDescentOptimizer(
        learning_rate=learning_rate).minimize(cost_valid)

    # Initialise global variables of the session
    sess.run(tf.global_variables_initializer())

    # Create error logging storage
    train_errors = []
    valid_errors = []

    # Setup our continous plot
    fig = plt.figure()
    plt.title('Error vs Epoch')
    plt.plot(train_errors[:epochs], color='r', label='training')
    plt.plot(valid_errors[:epochs], color='b', label='validation')
    plt.xlabel('Epoch')
    plt.ylabel('Error')
    plt.legend()
    plt.grid()
    plt.ion()
    plt.show()

    # Measure training time
    t_start = time.time()

    diff_err = 1.
    vel_err = 0.
    acc_err = 0.

    # Generate a new random model name for new network model
    model_name = ''.join(
        random.choice(string.ascii_lowercase + string.digits)
        for _ in range(4))

    for i in range(epochs):
        # Run network on training and validation sets
        _, train_error = sess.run([optimiser_train, cost_train],
                                  feed_dict={
                                      x_: x_train,
                                      y_: y_train
                                  })
        _, valid_error = sess.run([optimiser_train, cost_train],
                                  feed_dict={
                                      x_: x_valid,
                                      y_: y_valid
                                  })

        # If we're at a save epoch, save!
        if i % save_epoch == 0:
            model = util.save_model(
                sess, weights, biases, neurons, train_errors,
                os.path.join(model_dir, model_name + "_model"))

        # Add new errors to list
        train_errors.append(train_error)
        valid_errors.append(valid_error)

        # If we have at least an averageable amount of samples
        if i > avg_factor:
            avg_train_error = 0
            avg_valid_error = 0
            # Get sum over last n epochs
            for j in range(0, avg_factor):
                avg_train_error += train_errors[i - j]
                avg_valid_error += valid_errors[i - j]
            # Average them
            avg_train_error /= avg_factor
            avg_valid_error /= avg_factor

            # Calculate change in velocity of error difference
            acc_err = vel_err - (diff_err -
                                 abs(avg_valid_error - avg_train_error))

            # Calculate change in error difference (positive -> convergence, negative -> divergence)
            vel_err = diff_err - abs(avg_valid_error - avg_train_error)

            # Calculate error difference between validation and training
            diff_err = abs(avg_valid_error - avg_train_error)
            # print('[ANN] Epoch: {0:4d}, Δerr = {1:7.4f}, 𝛿(Δerr) = {2:7.4f}, 𝛿(𝛿(Δerr)) = {3:7.4f}'.format(i, diff_err, vel_err, acc_err)) # DEBUG

        # If we already have our target error, terminate early
        if train_error <= err_thresh or (diff_err > valid_thresh
                                         and vel_err < 0.):
            break

        # Set plot settings
        if i > 0:
            plt.plot(train_errors[:epochs], color='r', label='training')
            plt.plot(valid_errors[:epochs], color='b', label='validation')
            plt.axis([0, i, 0., 1.])
            plt.draw()
            plt.pause(0.001)

    plt.ioff()

    t_elapsed = time.time() - t_start

    # Calculate new simple accuracy from final error
    accuracy = 1 - train_error

    # Save model to file
    model = util.save_model(sess, weights, biases, neurons, train_errors,
                            os.path.join(model_dir, model_name + "_model"))

    print('\n[ANN] Training Completed:')

    # Calculate number of minutes, seconds and milliseconds elapsed
    t_m = t_elapsed / 60
    t_s = t_elapsed % 60
    t_ms = (t_s % 1) * 1000

    print('[ANN]\tModel name: {0}'.format(model_name))
    print('[ANN]\tSimple model accuracy: {0:.3f}%'.format(accuracy * 100))
    print('[ANN]\tTime elapsed: {0:2d}m {1:2d}s {2:3d}ms'.format(
        int(t_m), int(t_s), int(t_ms)))

    return model, model_name, {
        'num_layers': len(weights),
        'layer_width': neurons,
        'learning_rate': learning_rate,
        'time_to_train': t_elapsed,
        'train_errors': [float(i) for i in train_errors],
        'valid_errors': [float(i) for i in valid_errors]
    }
Esempio n. 7
0
    def train(self):
        self.config = tf.ConfigProto()
        self.config.gpu_options.allow_growth = True
        self.sess = tf.Session(config=self.config)
        with self.sess:
            if self.load_model():
                print(' [*] Load SUCCESS!\n')
            else:
                print(' [!] Load Failed...\n')
                self.sess.run(tf.global_variables_initializer())
            train_writer = tf.summary.FileWriter("./logs", self.sess.graph)
            merged = tf.summary.merge_all()
            self.counter = 1
            word2int, int2word, vocab_size, self.training_data = get_hack_data(
            )
            self.go = word2int["<GO>"]
            self.end = word2int["<EOS>"]
            self.pad = word2int["<PAD>"]
            #print(self.training_data.shape)
            k = (len(self.training_data) // self.batch_sizer)
            self.start_time = time.time()
            loss_g_val, loss_d_val = 0, 0
            self.training_data = self.training_data[0:(self.batch_sizer * k)]
            test_counter = 0
            print('Starting the Training....')
            print(self.end)

            for e in range(0, self.epoch):
                epoch_loss = 0.
                self.training_data = shuffle_data(self.training_data)
                mean_epoch_loss = []
                for i in range(0, k):
                    print(i)
                    batch = self.training_data[i * self.batch_sizer:(i + 1) *
                                               self.batch_sizer]
                    length = len(max(batch, key=len))
                    batched_data, l = pad_sequences(batch, word2int)
                    batched_data = np.asarray(batched_data, dtype="int32")
                    _, loss_val, loss_histo = self.sess.run(
                        [self.optim, self.loss, self.summary_loss],
                        feed_dict={
                            self.input: batched_data,
                            self.targets: batched_data,
                            self.max_seq_len: length,
                            self.seq_length: l,
                            self.batch_si: self.batch_sizer,
                            self.go_index: self.go
                        })
                    train_writer.add_summary(loss_histo, self.counter)
                    self.counter = self.counter + 1
                    epoch_loss += loss_val
                    mean_epoch_loss.append(loss_val)
                mean = np.mean(mean_epoch_loss)
                std = np.std(mean_epoch_loss)
                epoch_loss /= k
                print('Validation loss mean: ', mean)
                print('Validation loss std: ', std)
                print("Loss of Seq2Seq Model: %f" % epoch_loss)
                print("Epoch%d" % (e))

                if e % 1 == 0:
                    save_path = self.saver.save(
                        self.sess,
                        "C:/Users/Andreas/Desktop/seq2seq - continous/checkpoint/model.ckpt",
                        global_step=self.save_epoch)
                    print("model saved: %s" % save_path)

                    data = get_requests_from_file(
                        "C:/Users/Andreas/Desktop/seq2seq - continous/data/anomaly.txt"
                    )
                    random_number = np.random.randint(0, len(data))

                    data = generate_sentence_int([data[random_number]],
                                                 word2int)
                    batched_test_data, l = pad_sequences(data, word2int)
                    batched_test_data = np.asarray(batched_test_data,
                                                   dtype="int32")
                    ba_si = 1
                    size = l[0]
                    print(batched_test_data)
                    w, test, loss_eval = self.sess.run(
                        [self.probs, self.decoder_output, self.loss],
                        feed_dict={
                            self.input: batched_test_data,
                            self.max_seq_len: size,
                            self.seq_length: l,
                            self.batch_si: ba_si,
                            self.go_index: self.go,
                            self.eos_index: self.end,
                            self.targets: batched_test_data
                        })

                    coefs = np.array([
                        w[j][batched_test_data[0][j]]
                        for j in range(len(batched_test_data))
                    ])
                    print(coefs)
                    coefs = coefs / coefs.max()
                    print(coefs)
                    print(coefs.shape)
                    intsent = np.argmax(test, axis=2)
                    tester = getsentencce(intsent[0], int2word)
                    print(tester)
                    self.save_epoch += 1
                    print("Loss of test_data: %f" % loss_eval)

            print("training finished")
Esempio n. 8
0
def train():
    TIMESTAMP = "{0:%Y-%m-%d-%H-%M/}".format(datetime.now())
    log.log_info('program start')
    data, num_good, num_bad = util.load_train_data(num_data // 2)
    log.log_debug('Data loading completed')

    # resample
    data, length = util.resample(data, 600)
    data = util.reshape(data, length)
    good_data_origin = data[:num_good, :]
    bad_data_origin = data[num_good:, :]

    # extract bad data for test and train
    permutation = list(np.random.permutation(len(bad_data_origin)))
    shuffled_bad_data = bad_data_origin[permutation, :]
    test_bad_data = shuffled_bad_data[:int(num_bad * 0.3), :]
    train_bad_data_origin = shuffled_bad_data[int(num_bad * 0.3):, :]
    # extract corresponding good data for test and train
    permutation = list(np.random.permutation(len(good_data_origin)))
    shuffled_good_data = good_data_origin[permutation, :]
    test_good_data = shuffled_good_data[:len(test_bad_data), :]
    train_good_data = shuffled_good_data[len(test_bad_data):, :]

    assert len(test_bad_data) == len(test_good_data)
    # construct test data
    test_y = np.array([1.] * len(test_good_data) + [0.] * len(test_bad_data), dtype=np.float).reshape(
        (len(test_bad_data) + len(test_good_data), 1))
    test_x = np.vstack((test_good_data, test_bad_data))

    # expand the number of bad data for train
    train_x = np.vstack((train_good_data, train_bad_data_origin))
    train_y = np.array([1.] * len(train_good_data) + [0.] * len(train_bad_data_origin), dtype=np.float).reshape(
        (len(train_bad_data_origin) + len(train_good_data), 1))

    train_x, train_y, num_expand = util.expand(train_x, train_y)

    # regularize
    for i in range(len(train_x)):
        train_x[i, :, 0] = util.regularize(train_x[i, :, 0])
        train_x[i, :, 1] = util.regularize(train_x[i, :, 1])
        train_x[i, :, 2] = util.regularize(train_x[i, :, 2])
    for i in range(len(test_x)):
        test_x[i, :, 0] = util.regularize(test_x[i, :, 0])
        test_x[i, :, 1] = util.regularize(test_x[i, :, 1])
        test_x[i, :, 2] = util.regularize(test_x[i, :, 2])

    # random
    train_x, train_y = util.shuffle_data(train_x, train_y)

    log.log_debug('prepare completed')
    log.log_info('convolution layers: ' + str(conv_layers))
    log.log_info('filters: ' + str(filters))
    log.log_info('full connected layers: ' + str(fc_layers))
    log.log_info('learning rate: %f' % learning_rate)
    log.log_info('keep prob: ' + str(keep_prob))
    log.log_info('the number of expanding bad data: ' + str(num_expand))
    log.log_info('mini batch size: ' + str(mini_batch_size))

    if mini_batch_size != 0:
        assert mini_batch_size <= len(train_x)

    cnn = Cnn(conv_layers, fc_layers, filters, learning_rate)
    (m, n_W0, n_C0) = train_x.shape
    n_y = train_y.shape[1]

    # construction calculation graph
    cnn.initialize(n_W0, n_C0, n_y)
    cost = cnn.cost()
    optimizer = cnn.get_optimizer(cost)
    predict, accuracy = cnn.predict()

    init = tf.global_variables_initializer()
    saver = tf.train.Saver()

    with tf.Session() as sess:

        # log for tensorboard
        merged = tf.summary.merge_all()
        train_writer = tf.summary.FileWriter("resource/tsb/train/" + TIMESTAMP, sess.graph)
        test_writer = tf.summary.FileWriter("resource/tsb/test/" + TIMESTAMP)

        if enable_debug:
            sess = tf_debug.LocalCLIDebugWrapperSession(sess)

        sess.run(init)

        for i in range(1, num_epochs + 1):
            if mini_batch_size != 0:
                num_mini_batches = int(m / mini_batch_size)
                mini_batches = util.random_mini_batches(train_x, train_y, mini_batch_size)

                cost_value = 0
                for mini_batch in mini_batches:
                    (mini_batch_x, mini_batch_y) = mini_batch
                    _, temp_cost = sess.run([optimizer, cost], feed_dict={cnn.x: mini_batch_x, cnn.y: mini_batch_y,
                                                                          cnn.keep_prob: keep_prob})
                    cost_value += temp_cost
                cost_value /= num_mini_batches
            else:
                _, cost_value = sess.run([optimizer, cost],
                                         feed_dict={cnn.x: train_x, cnn.y: train_y, cnn.keep_prob: keep_prob})

            # disable dropout
            summary_train, train_accuracy = sess.run([merged, accuracy],
                                                     feed_dict={cnn.x: train_x, cnn.y: train_y,
                                                                cnn.keep_prob: 1})
            summary_test, test_accuracy = sess.run([merged, accuracy],
                                                   feed_dict={cnn.x: test_x, cnn.y: test_y, cnn.keep_prob: 1})

            train_writer.add_summary(summary_train, i - 1)
            test_writer.add_summary(summary_test, i - 1)

            if print_detail and (i % 10 == 0 or i == 1):
                info = '\nIteration %d\n' % i + \
                       'Cost: %f\n' % cost_value + \
                       'Train accuracy: %f\n' % train_accuracy + \
                       'Test accuracy: %f' % test_accuracy
                log.log_info(info)

            # stop when test>0.95 and train>0.99
            if test_accuracy >= 0.95 and train_accuracy >= 0.99:
                info = '\nIteration %d\n' % i + \
                       'Cost: %f\n' % cost_value + \
                       'Train accuracy: %f\n' % train_accuracy + \
                       'Test accuracy: %f' % test_accuracy
                log.log_info(info)
                saver.save(sess, "resource/model/" + TIMESTAMP)
                break
            saver.save(sess, "resource/model/" + TIMESTAMP)
        train_writer.close()
        test_writer.close()

    log.log_info('program end')
Esempio n. 9
0
    def train(self):
        self.config = tf.ConfigProto()
        self.config.gpu_options.allow_growth = True
        self.sess = tf.Session(config=self.config)

        with self.sess:
            if self.load_model():
                print(' [*] Load SUCCESS!\n')
            else:
                print(' [!] Load Failed...\n')
                self.sess.run(tf.global_variables_initializer())

            train_writer = tf.summary.FileWriter("./logs", self.sess.graph)
            merged = tf.summary.merge_all()
            self.counter = 1
            self.training_data = load_data_art()
            print(self.training_data.shape)
            k = (len(self.training_data) // self.batch_size)
            self.start_time = time.time()
            loss_g_val, loss_d_val = 0, 0
            self.training_data = self.training_data[0:(self.batch_size * k)]
            test_counter = 0
            for e in range(0, self.epoch):
                epoch_loss_d = 0.
                epoch_loss_g = 0.
                self.training_data = shuffle_data(self.training_data)
                for i in range(0, k):
                    self.batch_z = np.random.uniform(
                        -1, 1, [self.batch_size, self.z_dim])
                    self.batch = self.training_data[i *
                                                    self.batch_size:(i + 1) *
                                                    self.batch_size]
                    self.batch = np.asarray(self.batch)
                    _, loss_d_val, loss_d = self.sess.run(
                        [self.d_optim, self.d_loss, self.summary_d_loss],
                        feed_dict={
                            self.input: self.batch,
                            self.z: self.batch_z
                        })
                    train_writer.add_summary(loss_d, self.counter)
                    _, loss_g_val, loss_g = self.sess.run(
                        [self.g_optim, self.g_loss, self.summary_g_loss],
                        feed_dict={
                            self.z: self.batch_z,
                            self.input: self.batch
                        })
                    train_writer.add_summary(loss_g, self.counter)
                    self.counter = self.counter + 1
                    epoch_loss_d += loss_d_val
                    epoch_loss_g += loss_g_val
                epoch_loss_d /= k
                epoch_loss_g /= k
                print("Loss of D: %f" % epoch_loss_d)
                print("Loss of G: %f" % epoch_loss_g)
                print("Epoch%d" % (e))
                if e % 1 == 0:
                    save_path = self.saver.save(
                        self.sess,
                        "C:/Users/Andreas/Desktop/C-GAN/checkpoint/model.ckpt",
                        global_step=self.save_epoch)
                    print("model saved: %s" % save_path)
                    self.gen_noise = np.random.uniform(-1, 1, [1, self.z_dim])
                    fake_art = self.sess.run(
                        [self.Gen], feed_dict={self.z: self.gen_noise})
                    save_image(fake_art, self.name_art, self.save_epoch)
                    self.save_epoch += 1
            print("training finished")
Esempio n. 10
0
trainset_y, validset_y = np.split(trainset_y, [split_pos])

N = trainset_x.shape[0]
N_valid = validset_x.shape[0]

print("====================================")
print("Train set (%d images):" % N)
print(trainset_x.shape)
print("Valid set (%d images):" % N_valid)
print(validset_x.shape)
print("====================================\n\n")

# Sub-sample dataset, if needed for class balance
trainset_X = trainset_x
trainset_Y = trainset_y
(trainset_x, trainset_y) = util.shuffle_data(trainset_X, trainset_Y, K)
N = trainset_x.shape[0]

# Export meta-data
with open('%s/meta.csv' % log_dir, mode='w') as meta:
    meta_writer = csv.writer(meta,
                             delimiter=',',
                             quotechar='"',
                             quoting=csv.QUOTE_MINIMAL)
    meta_writer.writerow([
        'dataset', 'lrate', 'batch_size', 'batch_norm', 'augmentation',
        'optimizer', 'activation', 'initialization', 'filter_size',
        'depth_conv', 'depth_fc', 'width_conv', 'width_fc'
    ])
    meta_writer.writerow([
        data_nmn, learning_rate, batch_size, bn, augment, opt, act, itype,