Example #1
0
    def train(self, train_set, annotate, batch_size, epoch):
        batches = shuffle(train_set, annotate, self.C, self.S, batch_size,
                          epoch)
        for i, batch in enumerate(batches):
            x_batch, datum = batch
            feed_dict = {
                self.inp: x_batch,
                self.drop: .5,
                self.true_class: datum[0],
                self.confs1: datum[1],
                self.confs2: datum[2],
                self.true_coo: datum[3],
                self.upleft: datum[4],
                self.botright: datum[5],
                self.class_idtf: datum[6],
                self.conid1: datum[7],
                self.conid2: datum[8],
                self.cooid1: datum[9],
                self.cooid2: datum[10],
            }
            _, loss = self.sess.run([self.train_op, self.loss], feed_dict)
            print 'step {} - batch {} - loss {}'.format(
                1 + i + self.step, 1 + i, loss)
            if (i + 1) % (self.save_every / batch_size) == 0:
                print 'save checkpoint and binaries at step {}'.format(
                    self.step + i + 1)
                self.saver.save(self.sess,
                                'backup/model-{}'.format(self.step + i + 1))
                self.to_constant(inc=i + 1)

        print 'save checkpoint and binaries at step {}'.format(self.step + i +
                                                               1)
        self.saver.save(self.sess, 'backup/model-{}'.format(self.step + i + 1))
        self.to_constant(inc=i + 1)
Example #2
0
    def train_model_supervised(self, x, y, num_epochs):
        print("Supervised Training")
        logging.info("Supervised Training")
        for epoch in range(num_epochs):
            x, y = shuffle(x=x, y=y)
            # for i, (images, labels) in enumerate(self.train_loader):
            for i in range(int(len(x) / self.args.batch_size)):
                # Convert torch tensor to Variable

                x_l, y_l, _ = sample_minibatch_deterministically(x, y, batch_i=i, batch_size=self.args.batch_size)

                labels_onehot = torch.zeros([y_l.size(0), self.num_classes])
                labels_onehot.scatter_(1, y_l.long().unsqueeze(1), 1)
                # Forward + Backward + Optimize
                loss, grad_loss = self.optimize_grad_and_net(x_l, y_l.long(), labels_onehot,
                                          self.net.grad_optimizer, self.net.optimizer, self.net)

                if (i+1) % 10 == 0:
                    print ('Epoch [%d/%d], Step [%d/%d], Loss: %.6f, Grad Loss: %.8f'
                         %(epoch+1, self.num_epochs, i+1, len(x)//self.batch_size, loss.item(), grad_loss.item()))

                    logging.info('Epoch [%d/%d], Step [%d/%d], Loss: %.6f, Grad Loss: %.8f'
                         %(epoch+1, self.num_epochs, i+1, len(x)//self.batch_size, loss.item(), grad_loss.item()))

            if (epoch + 1) % 10 == 0:
                perf = self.test_model(epoch + 1)
                if perf > self.best_perf:
                    torch.save(self.net.state_dict(), self.model_name + '_model_best.pkl')
                    self.net.train()

        # Save the Model ans Stats
        pkl.dump(self.stats, open(self.model_name + '_stats.pkl', 'wb'))
        torch.save(self.net.state_dict(), self.model_name + '_model.pkl')
        if self.plot:
            plot(self.stats, name=self.model_name)
    def train_model_helper(self, x, y, is_supervised=True, weight=0.0):
        # can replace the two lines below with sample_minibatch
        x, y = shuffle(x=x, y=y)
        x_mb, y_mb, _ = sample_minibatch_deterministically(
            x, y, batch_i=1, batch_size=self.args.batch_size)

        labels_onehot = torch.zeros([y_mb.size(0), self.num_classes])
        labels_onehot.scatter_(1, y_mb.long().unsqueeze(1), 1)

        if is_supervised:
            loss, grad_loss = self.optimize_grad_and_net(
                x_mb,
                y_mb.long(),
                self.net.grad_optimizer,
                self.net.optimizer,
                self.net,
                is_supervised=is_supervised)
        else:
            loss, grad_loss = self.optimize_grad_and_net(
                x_mb,
                y_mb.long(),
                self.net.grad_optimizer,
                self.net.optimizer,
                self.net,
                is_supervised=is_supervised,
                weight=weight)

        return loss, grad_loss
Example #4
0
    def train_model_unsupervised(self, x, y, num_epochs):
        print("Unsupervised Training")
        logging.info("Unsupervised Training")
        for epoch in range(num_epochs):
            x, y = shuffle(x=x, y=y)
            for i in range(int(len(x) / self.args.batch_size)):
                x_l, y_l, _ = sample_minibatch_deterministically(x, y, batch_i=i, batch_size=self.args.batch_size)

                labels_onehot = torch.zeros([y_l.size(0), self.num_classes])
                labels_onehot.scatter_(1, y_l.unsqueeze(1).long(), 1)
                out = x_l
                # Forward + Backward + Optimize
                for (optimizer, forward) in zip(self.net.optimizers, self.net.forwards):
                    if self.conditioned:
                        out = self.optimizer_module(optimizer, forward, out, labels_onehot)
                    else:
                        out = self.optimizer_module(optimizer, forward, out)

            if (epoch+1) % 1 == 0:
                perf = self.test_model(epoch+1)
                if perf > self.best_perf:
                    torch.save(self.net.state_dict(), self.model_name+'_model_best.pkl')
                    self.net.train()

        # Save the Model ans Stats
        pkl.dump(self.stats, open(self.model_name+'_stats.pkl', 'wb'))
        torch.save(self.net.state_dict(), self.model_name+'_model.pkl')
        if self.plot:
            plot(self.stats, name=self.model_name)
Example #5
0
def train(net_out, input_pb):
    batches = data.shuffle()
    loss, loss_ph = loss_op(net_out)
    train_op = build_train_op(loss)
    if FLAGS['summary']:
        summary_op, writer = build_summary_op()

    # loss_mva = None;
    profile = list()

    config = tf.ConfigProto()
    # config.gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction = utility)
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True

    with tf.Session(config=config) as sess:
        sess.run(tf.global_variables_initializer())

        for i, (x_batch, datum) in enumerate(batches):

            if not i:
                print(
                    train_stats.format(FLAGS['lr'], FLAGS['batch'],
                                       FLAGS['epoch'], FLAGS['save']))

            feed_dict = {loss_ph[key]: datum[key] for key in loss_ph}
            feed_dict[input_pb] = x_batch
            # feed_dict.update(self.feed)

            fetches = [train_op, loss]

            if FLAGS['summary']:
                fetches.append(summary_op)

            fetched = sess.run(fetches, feed_dict)
            final_loss = fetched[1]

            # if loss_mva is None: loss_mva = loss
            # loss_mva = .9 * loss_mva + .1 * loss
            step_now = FLAGS['load'] + i + 1

            if FLAGS['summary']:
                writer.add_summary(fetched[2], step_now)

            form = 'step {} - loss {}'
            print(form.format(step_now, final_loss))

            profile += [final_loss]

            ckpt = (i + 1) % FLAGS['save']
            args = [sess, step_now, profile]
            if not ckpt:
                save_ckpt(*args)

        if ckpt:
            save_ckpt(*args)
Example #6
0
def run():
    print("Training...")
    gen = int(os.listdir(c.weights_dir + "/current")[0])
    mod = model.Model()
    mod.load_weight(c.weights_dir + "/{0}.pkl".format(gen))

    print("preparing data...")
    obsv, prob, result = data.load()
    count = np.size(obsv, 0)
    count_train = int(count * 0.8)
    obsv, prob, result = data.shuffle(obsv, prob, result)

    train_obsv = obsv[0:count_train]
    train_prob = prob[0:count_train]
    train_result = result[0:count_train]

    test_obsv = obsv[count_train:count]
    test_prob = prob[count_train:count]
    test_result = result[count_train:count]

    mod.test(test_obsv, test_prob, test_result)

    for d_gen in range(c.train_count):
        for epoch in range(c.train_epoch):
            print("epoch {0} of {1}".format(epoch + 1, c.train_epoch))

            tr_obsv, tr_prob, tr_result = data.augment(train_obsv, train_prob,
                                                       train_result)
            tr_obsv, tr_prob, tr_result = data.shuffle(tr_obsv, tr_prob,
                                                       tr_result)

            mod.train(tr_obsv, tr_prob, tr_result)
            # mod.train(train_obsv, train_prob, train_result)
            mod.test(test_obsv, test_prob, test_result)

            mod.save_weight(c.weights_dir + "/{0}.pkl".format(gen + 1))
            with open(c.weights_dir + "/eval/queue/{0}".format(gen + 1),
                      mode="w"):
                pass
        os.rename(c.weights_dir + "/current/{0}".format(gen),
                  c.weights_dir + "/current/{0}".format(gen + 1))
        gen = gen + 1
    print("Train Finished!")
Example #7
0
def run():
    gen = int(os.listdir(C.WEIGHTS_DIRECTORY + "/current")[0])
    mod = model.Model()
    mod.load_weight(C.WEIGHTS_DIRECTORY + "/{0}.pkl".format(gen))

    print("preparing data...")
    obsv, prob, result = data.load()
    count = np.size(obsv, 0)
    count_train = int(count * 0.8)
    obsv, prob, result = data.shuffle(obsv, prob, result)

    train_obsv = obsv[0:count_train]
    train_prob = prob[0:count_train]
    train_result = result[0:count_train]

    test_obsv = obsv[count_train:count]
    test_prob = prob[count_train:count]
    test_result = result[count_train:count]
    
    mod.test(test_obsv, test_prob, test_result)

    for d_gen in range(C.WEIGHT_COUNT):
        for epoch in range(C.WEIGHT_EPOCH):
            print("epoch {0} of {1}".format(epoch + 1, C.WEIGHT_EPOCH))
            
            tr_obsv, tr_prob, tr_result = data.augment(train_obsv, train_prob, train_result)
            tr_obsv, tr_prob, tr_result = data.shuffle(tr_obsv, tr_prob, tr_result)

            mod.train(tr_obsv, tr_prob, tr_result)
            # mod.train(train_obsv, train_prob, train_result)
            mod.test(test_obsv, test_prob, test_result)

            mod.save_weight(C.WEIGHTS_DIRECTORY + "/{0}.pkl".format(gen + 1))
            with open(C.WEIGHTS_DIRECTORY + "/eval/queue/{0}".format(gen + 1), mode="w"):
                pass
        os.rename(C.WEIGHTS_DIRECTORY + "/current/{0}".format(gen), C.WEIGHTS_DIRECTORY + "/current/{0}".format(gen + 1))
        gen = gen + 1
Example #8
0
    def train_model_supervised(self, x, y, num_epochs):
        for epoch in range(num_epochs):
            x, y = shuffle(x=x, y=y)
            for i in range(int(len(x) / self.args.batch_size)):

                x_l, y_l, _ = sample_minibatch_deterministically(
                    x, y, batch_i=i, batch_size=self.args.batch_size)

                labels_onehot = torch.zeros([y_l.size(0), self.num_classes])
                labels_onehot.scatter_(1, y_l.long().unsqueeze(1), 1)
                out = x_l

                for (optimizer, forward) in zip(self.net.optimizers,
                                                self.net.forwards):
                    if self.conditioned:
                        out = self.optimizer_module(optimizer, forward, out,
                                                    labels_onehot)
                    else:
                        out = self.optimizer_module(optimizer, forward, out)
                # synthetic model
                # Forward + Backward + Optimize
                loss, grad_loss = self.optimizer_dni_module(
                    x_l, y_l.long(), labels_onehot, self.net.grad_optimizer,
                    self.net.optimizer, self.net)

                if (i + 1) % 10 == 0:
                    print(
                        'Epoch [%d/%d], Step [%d/%d], Loss: %.6f, Grad Loss: %.8f'
                        % (epoch + 1, self.num_epochs, i + 1, len(x) //
                           self.batch_size, loss.item(), grad_loss.item()))

                    logging.info(
                        'Epoch [%d/%d], Step [%d/%d], Loss: %.6f, Grad Loss: %.8f'
                        % (epoch + 1, self.num_epochs, i + 1, len(x) //
                           self.batch_size, loss.item(), grad_loss.item()))

            if (epoch + 1) % 10 == 0:
                perf = self.test_model(epoch + 1)
                if perf > self.best_perf:
                    torch.save(self.net.state_dict(),
                               self.model_name + '_model_best.pkl')
                    self.net.train()

        # Save the Model ans Stats
        pkl.dump(self.stats, open(self.model_name + '_stats.pkl', 'wb'))
        torch.save(self.net.state_dict(), self.model_name + '_model.pkl')
        if self.plot:
            plot(self.stats, name=self.model_name)
Example #9
0
def yield_batch(file, training=True):
    """
  Yields batches of (data, label) as pair of Tensors.
  """
    data, label = data_module.load_point_cloud(file)
    data, label = data_module.shuffle(data, label)
    if training:
        data_module.random_rotate_point_cloud(data)
        data_module.jitter_point_cloud(data)
    num_batches = data.shape[0] // BATCH
    for batch in range(num_batches):
        start_idx = batch * BATCH
        end_idx = (batch + 1) * BATCH
        data_batch = torch.tensor(data[start_idx:end_idx, :, :]).to(DEVICE)
        label_batch = torch.tensor(label[start_idx:end_idx, :],
                                   dtype=torch.long).to(DEVICE)
        yield data_batch, label_batch
Example #10
0
    assert args.data_file, "Error: you must provide a text file to split"

    file_name = args.data_file.split('/')[-1]
    base_name = file_name.split('.')[0]  # nome senza estensione

    # file in cui scrivere
    f_train = codecs.open(base_name + ".train.utf8", 'w', 'utf-8')
    f_test = codecs.open(base_name + ".test.utf8", 'w', 'utf-8')
    f_validation = codecs.open(base_name + ".validation.utf8", 'w', 'utf-8')

    # estrae dati
    data.dataFromFiles(args.data_file,
                       getData=True,
                       getTestSet=False,
                       getValidationSet=False)
    data.shuffle()

    n_test = data.n_instances / 10

    # 1/10 per test set
    X, Y, _, __ = data.getBatch(n_test)
    s = '\n'.join(X)
    f_test.write(s)
    print "Test set created: " + base_name + ".test.utf8"

    # 1/10 per validation set
    X, Y, _, __ = data.getBatch(n_test)
    s = '\n'.join(X)
    f_validation.write(s)
    print "Validation set created: " + base_name + ".validation.utf8"
Example #11
0
    def train(self, epochs, train_batch_size, rate):
        '''
        #train net
        :param epochs:
        :param train_batch_size:
        :param rate:
        :return:
        '''
        with self.graph.as_default():
            # data part
            train_batch_samples = tf.placeholder(dtype=tf.float32,
                                                 shape=(train_batch_size, 28,
                                                        28, 1))
            train_batch_labels = tf.placeholder(dtype=tf.float32,
                                                shape=(train_batch_size, 10))

            train_batch_logits = self.forward(train_batch_samples)
            # train_loss
            # through softmax_...._logits,we get tensor of shape(batchs,)
            # through reduce_mean(),we get a "number"
            train_batch_loss = tf.reduce_mean(
                tf.nn.softmax_cross_entropy_with_logits(
                    labels=train_batch_labels, logits=train_batch_logits))

            #optimizer
            train_batch_optimizer = tf.train.AdamOptimizer(
                learning_rate=rate).minimize(train_batch_loss)
            # train_batch_optimizer=tf.train.GradientDescentOptimizer(learning_rate=rate).minimize(train_batch_loss)

            #prediction
            train_batch_prediction = tf.nn.softmax(train_batch_logits)

        #run
        with self.session as sess:
            tf.global_variables_initializer().run()
            print("----------Training Start----------")

            #epoch
            epoch = 1
            while epoch < epochs:
                print("epoch:", epoch)
                samples, labels = data.shuffle()

                #mini_batch
                for i in range(0, data.train_data_size, train_batch_size):
                    _, loss, prediction = sess.run(
                        fetches=[
                            train_batch_optimizer, train_batch_loss,
                            train_batch_prediction
                        ],
                        feed_dict={
                            train_batch_samples:
                            samples[i:i + train_batch_size],
                            train_batch_labels: labels[i:i + train_batch_size]
                        })

                    print("mini_batch", i, "~", i + train_batch_size, "of",
                          epoch, "epochs")
                    print("loss:", loss)
                    print(
                        "accuracy:",
                        self.accuracy(prediction,
                                      labels[i:i + train_batch_size]), "/",
                        train_batch_size)
                epoch += 1
Example #12
0
input_c = 3

batch_size = 8
# 论文默认 epoch is 135
epoch = 1
S = 7
B = 2
C = 20

labels = [
    'person', 'bird', 'cat', 'cow', 'dog', 'horse', 'sheep', 'aeroplane',
    'bicycle', 'boat', 'bus', 'car', 'motorbike', 'train', 'bottle', 'chair',
    'dining table', 'potted plant', 'sofa', 'tv/monitor'
]

DataSet = shuffle(ann_path, img_path, labels, batch_size, epoch)

x = tf.placeholder(tf.float32,
                   shape=[None, input_h, input_w, input_c],
                   name='input')
_probs = tf.placeholder(tf.float32, shape=[None, S * S, C], name='probs')
_confs = tf.placeholder(tf.float32, shape=[None, S * S, B], name='confs')
_coord = tf.placeholder(tf.float32, shape=[None, S * S, B, 4], name='coord')
_proid = tf.placeholder(tf.float32, shape=[None, S * S, C], name='proid')
_areas = tf.placeholder(tf.float32, shape=[None, S * S, B], name='areas')
_upleft = tf.placeholder(tf.float32, shape=[None, S * S, B, 2], name='upleft')
_botright = tf.placeholder(tf.float32,
                           shape=[None, S * S, B, 2],
                           name='botright')

model = YOLO()
Example #13
0
    def train(self,epochs,train_batch_size,rate):
        '''
        #train net
        :param epochs:
        :param train_batch_size:
        :param rate:
        :return:
        '''
        #define graph

        with self.graph.as_default():
            # data part
            with tf.name_scope("input"):
                train_batch_samples = tf.placeholder(dtype=tf.float32, shape=(train_batch_size, 784),name="train_batch_samples")
                train_batch_labels = tf.placeholder(dtype=tf.float32, shape=(train_batch_size, 10),name="train_batch_labels")
            with tf.name_scope("train_logits"):
                train_batch_logits=self.forward(train_batch_samples)
            # train_loss
            # through softmax_...._logits,we get tensor of shape(batchs,)
            # through reduce_mean(),we get a "number"
            with tf.name_scope("loss"):
                train_batch_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(train_batch_logits,train_batch_labels))
                tf.scalar_summary(tags="train_batch_loss",values=train_batch_loss)
            #optimizer
            with tf.name_scope("optimizer"):
                train_batch_optimizer=tf.train.GradientDescentOptimizer(learning_rate=rate).minimize(train_batch_loss)

            #prediction
            with tf.name_scope("prediction"):
                train_batch_prediction=tf.nn.softmax(train_batch_logits)

            #summary
            summary=tf.merge_all_summaries()

        # visulization
        writer = tf.train.SummaryWriter(logdir="./log", graph=self.graph)

        #run
        with self.session as sess:
            tf.initialize_all_variables().run()
            print ("----------Training Start----------")

            #epoch
            epoch=1
            while epoch<epochs:
                print ("epoch:",epoch)
                samples,labels=data.shuffle()

                #mini_batch
                for i in range(0,data.train_data_size,train_batch_size):
                    _,loss,prediction,summaries=sess.run(
                            fetches=[train_batch_optimizer,train_batch_loss,train_batch_prediction,summary],
                            feed_dict={train_batch_samples:samples[i:i+train_batch_size],train_batch_labels:labels[i:i+train_batch_size]}
                        )

                    print ("mini_batch",i,"~",i+train_batch_size,"of",epoch,"epochs")
                    print ("loss:",loss)
                    print ("accuracy:",self.accuracy(prediction,labels[i:i+train_batch_size]),"/",train_batch_size)

                    #add summary
                    writer.add_summary(summary=summaries,global_step=i)
                epoch+=1
Example #14
0
    def train(self, trainInput, testInput, trainTarget, testTarget, \
        reg_lambda=0.0, learning_rate=1e-4, dropout=0.0, batch_size=32, epochs=50, \
        restore_model=False, save_model=True, save_freq=5):

        self.loss_op = self.loss + reg_lambda * self.regularizer

        #self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
        self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
        self.update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        self.train_op = self.optimizer.minimize(self.loss_op)
        self.train_op = tf.group([self.train_op, self.update_ops])

        print("Training...")

        total_num_input = trainInput.shape[0]
        steps = math.ceil(total_num_input / batch_size)

        train_loss_history, test_loss_history, \
        train_accuracy_history, test_accuracy_history = [], [], [], []

        with tf.Session() as sess:
            tf.gfile.MakeDirs(self.save_folder)
            if restore_model:
                lastckpt = tf.train.latest_checkpoint(self.save_folder)
                print("Restoring from {}".format(lastckpt))
                self.saver = tf.train.import_meta_graph(lastckpt + '.meta')
                self.saver.restore(sess, lastckpt)
            else:
                sess.run(tf.global_variables_initializer())

            for e in range(1, epochs + 1):
                X, Y = shuffle(trainInput, trainTarget)

                print("Training phase:")
                for i in tqdm(range(0, steps)):
                    X_batch = X[i * batch_size:(i + 1) * batch_size, :]
                    Y_batch = Y[i * batch_size:(i + 1) * batch_size, :]
                    n_batch = X_batch.shape[0]
                    _, = sess.run(
                        [self.train_op],
                        feed_dict={
                            self.X: X_batch,
                            self.Y: Y_batch,
                            self.keep_prob: 1 - dropout,
                            self.training: True
                        })

                # Need to run in non-training mode for batch norm and dropout
                print("Test phase:")
                train_loss, train_accuracy, train_prediction = self._test(
                    sess, trainInput, trainTarget, batch_size=batch_size)
                test_loss, test_accuracy, test_prediction = self._test(
                    sess, testInput, testTarget, batch_size=batch_size)

                train_auc = roc_auc_score(trainTarget, train_prediction)
                test_auc = roc_auc_score(testTarget, test_prediction)

                train_loss_history.append(train_loss)
                train_accuracy_history.append(train_accuracy)
                test_loss_history.append(test_loss)
                test_accuracy_history.append(test_accuracy)

                print('Epoch %3d ==> Train Loss: %.4f, Train AUC: %.4f, Test Loss: %.4f, Test AUC: %.4f' % \
                    (e, train_loss_history[-1], train_auc, test_loss_history[-1], test_auc))

                if save_model and (e % save_freq == 0):
                    self.saver.save(sess,
                                    self.save_folder + 'model.ckpt',
                                    global_step=e)

            if save_model:
                self.saver.save(sess,
                                self.save_folder + 'model.ckpt',
                                global_step=epochs)

        loss_history = {"train": train_loss_history, "test": test_loss_history}
        accuracy_history = {
            "train": train_accuracy_history,
            "test": test_accuracy_history
        }
        return loss_history, accuracy_history
Example #15
0
'''
use this file to do some test
'''
from __future__ import print_function, division
import data
import numpy as np
import tensorflow as tf
samples, labels = data.shuffle()
print("shape of samples:", samples.shape)
print("shape of labels:", labels.shape)

data.showpic(samples, labels, 10)
Example #16
0
TRAIN_FOLDER = './data_part1/train' # folder with training images
TEST_FOLDER = './data_part1/test'   # folder with testing images
SPLIT_RATE = 0.90        # split rate for training and validation sets

IMAGE_HEIGHT = 64  # height of the image
IMAGE_WIDTH = 64   # width of the image
NUM_CHANNELS = 1   # number of channels of the image

# ---------------------------------------------------------------------------------------------------------- #
# Description:                                                                                               #
#         Load the training set, shuffle its images and then split them in training and validation subsets.  #
#         After that, load the testing set.                                                                  #
# ---------------------------------------------------------------------------------------------------------- #
X_train, y_train, classes_train = load_multiclass_dataset(TRAIN_FOLDER, IMAGE_HEIGHT, IMAGE_WIDTH, NUM_CHANNELS)
X_train = X_train/255.#.reshape(-1, IMAGE_HEIGHT*IMAGE_WIDTH*NUM_CHANNELS)/255.
X_train, y_train = shuffle(X_train, y_train, seed=42)
#X_train, y_train, X_val, y_val = split(X_train, y_train, SPLIT_RATE)

#print(X_train.shape, y_train.shape, X_val.shape, y_val.shape)

# ---------------------------------------------------------------------------------------------------------- #
# Description:                                                                                               #
#         Create a training graph that receives a batch of images and their respective labels and run a      #
#         training iteration or an inference job. Train the last FC layer using fine_tuning_op or the entire #
#         network using full_backprop_op. A weight decay of 1e-4 is used for full_backprop_op only.          #
# ---------------------------------------------------------------------------------------------------------- #
graph = tf.Graph()
with graph.as_default():
	X = tf.placeholder(tf.float32, shape = (None, IMAGE_HEIGHT, IMAGE_WIDTH, NUM_CHANNELS))
	learning_rate = tf.placeholder(tf.float32)
	is_training = tf.placeholder(tf.bool)
Example #17
0
parser.add_argument('--ni', type=int, help='Number of Iterations')
parser.add_argument('--opt', type=str, help='OPTimizer')
parser.add_argument('--ptt', nargs='+', type=int, help='ParTiTion')
parser.add_argument('--tb', action='store_true', help='TensorBoard')
parser.add_argument('--w', type=float, help='Weight')
parser.add_argument('--wd', type=float, help='Weight Decay')
args = parser.parse_args()

x, y = {'adult'    : data.load_adult,
        'cifar10'  : data.load_multi_cifar10,
        'cifar100' : data.load_multi_cifar100,
        'covtype'  : data.load_covtype,
        'kddcup08' : data.load_kddcup08,
        'letter'   : data.load_multi_letter,
        'mnist'    : data.load_multi_mnist}[args.ds]()
x, y = data.shuffle(x, y)
[[train_xx, train_yy],
 [val_xx,   val_yy],
 [test_xx,  test_yy]] = data.partition(x, y, args.ptt)
train_x, val_x, test_x = th.cat(train_xx), th.cat(val_xx), th.cat(test_xx)
train_y, val_y, test_y = th.cat(train_yy), th.cat(val_yy), th.cat(test_yy)
train_x, val_x, test_x = data.normalize([train_x, val_x, test_x])
train_xx = th.split(train_x, [len(x) for x in train_xx])
train_datasets = [D.TensorDataset(x) for x in train_xx]
train_loader = D.DataLoader(D.TensorDataset(train_x, train_y), args.bsi)
val_loader = D.DataLoader(D.TensorDataset(val_x, val_y), args.bsi)
test_loader = D.DataLoader(D.TensorDataset(test_x, test_y), args.bsi)
pclass_list = [len(y) / len(train_y) for y in train_yy]

n_classes = len(train_yy)
if len(args.bst) == n_classes:
Example #18
0
def prepare_data(X, y):
    X, y = shuffle(X, y)
    X = scale(X)

    return X, y, KFold(y.size, n_folds=5)