Example #1
0
    def train(self):
        """
        Runs a training loop on the model networks.
        """
        for i in xrange(self.num_steps):
            if c.ADVERSARIAL:
                # update discriminator
                batch = get_train_batch()
                print 'Training discriminator...'
                self.d_model.train_step(batch, self.g_model)

            # update generator
            batch = get_train_batch()
            print 'Training generator...'
            self.global_step = self.g_model.train_step(
                batch, discriminator=(self.d_model if c.ADVERSARIAL else None))

            # save the models
            if self.global_step % c.MODEL_SAVE_FREQ == 0:
                print '-' * 30
                print 'Saving models...'
                self.saver.save(self.sess,
                                c.MODEL_SAVE_DIR + 'model.ckpt',
                                global_step=self.global_step)
                print 'Saved models!'
                print '-' * 30

            # test generator model
            if self.global_step % c.TEST_FREQ == 0:
                self.test()
Example #2
0
    def train_standard(self):

        for i in range(self.num_train_steps):

            #If using the adversial loss first update the discriminator
            if c.ADVERSARIAL:
                batch = get_train_batch()
                self.GAN_model.discriminator_train_step(batch)

            #Update the generator
            batch = get_train_batch()
            self.global_step = self.GAN_model.generator_train_step(batch)

            self.GAN_model.save_summaries()
            self.GAN_model.increment_global_step()

            if self.global_step % c.MODEL_SAVE_FREQ == 0:
                print('Saving Model')
                self.saver.save(self.sess,
                                c.MODEL_SAVE_DIR + 'model.ckpt',
                                global_step=self.global_step)
                print('Model Save Success')

            if self.global_step % c.TEST_FREQ == 0:
                print('Testing Model')
                self.test(self.global_step)
                print('Testing Compleated')
Example #3
0
    def train(self):
        """
        Runs a training loop on the model networks.
        """

        np.random.shuffle(c.TEST_EXAMPLES)
        np.random.shuffle(c.TRAIN_EXAMPLES)

        examples_count = 0
        num_epoch = 0
        print('EPOCH - ' + str(num_epoch))
        for i in range(self.num_steps):

            print('Processing.. {} / {}'.format(examples_count + c.BATCH_SIZE,
                                                c.NUM_CLIPS))

            if c.ADVERSARIAL:
                for k in range(c.Critic_cycles):
                    # update discriminator
                    batch = get_train_batch(examples_count, c)
                    print('Training discriminator, step: ' + str(k))
                    self.d_model.train_step(batch, self.g_model)

            # update generator
            batch = get_train_batch(examples_count, c)

            examples_count += c.BATCH_SIZE

            print('Training generator...')
            self.global_step = self.g_model.train_step(
                batch, discriminator=(self.d_model if c.ADVERSARIAL else None))

            #test batch each 'epoch'

            print('Processed {} / {}'.format(examples_count, c.NUM_CLIPS))

            if examples_count >= c.NUM_CLIPS - c.BATCH_SIZE:
                np.random.shuffle(c.TRAIN_EXAMPLES)
                examples_count = 0
                self.test(c.TEST_BATCH_SIZE,
                          full=True)  #bsize = c.NUM_TEST_CLIPS,full=True)
                num_epoch += 1
                print('EPOCH - ' + str(num_epoch))

            # save the models
            if self.global_step % c.MODEL_SAVE_FREQ == 0:
                print('-' * 30)
                print('Saving models...')
                self.saver.save(self.sess,
                                c.MODEL_SAVE_DIR + 'model.ckpt',
                                global_step=self.global_step)
                print('Saved models!')
                print('-' * 30)
Example #4
0
 def __init__(self,
              input_shape,
              num_classes,
              is_training=True,
              use_test_queue=False):
     self.input_shape = input_shape
     self.name = "resnet"
     self.graph = tf.Graph()
     with self.graph.as_default():
         if is_training:
             self.X, self.labels = get_train_batch(
                 cfg.dataset,
                 cfg.batch_size,
                 cfg.num_threads,
                 samples_per_epoch=cfg.samples_per_epoch)
             self.inference(self.X, num_classes)
             self.loss()
             self._summary()
             self.global_step = tf.Variable(0,
                                            name='global_step',
                                            trainable=False)
             self.optimizer = tf.train.AdamOptimizer(epsilon=0.1)
             self.train_op = self.optimizer.minimize(
                 self.total_loss, global_step=self.global_step)
         else:
             if use_test_queue:
                 self.X, self.labels = get_test_batch(
                     cfg.dataset, cfg.test_batch_size, cfg.num_threads)
             else:
                 self.X = tf.placeholder(tf.float32, shape=self.input_shape)
                 self.labels = tf.placeholder(tf.int32,
                                              shape=(self.input_shape[0], ))
             self.inference(self.X, num_classes, keep_prob=1.0)
             self.loss()
             self._summary()
Example #5
0
 def __init__(self, input_shape, num_classes, is_training=True, use_test_queue=False):
     self.input_shape = input_shape
     self.name = "capsnet"
     self.graph = tf.Graph()
     with self.graph.as_default():
         if is_training:
             self.X, self.labels = get_train_batch(cfg.dataset, cfg.batch_size, cfg.num_threads, samples_per_epoch=cfg.samples_per_epoch)
             self.Y = tf.one_hot(self.labels, depth=num_classes, axis=1, dtype=tf.float32)
             self.inference(num_classes)
             self._loss()
             self._summary()
             self.global_step = tf.Variable(0, name='global_step', trainable=False)
             self.optimizer = tf.train.AdamOptimizer()
             self.train_op = self.optimizer.minimize(self.total_loss, global_step=self.global_step)
         else:
             if use_test_queue:
                 self.X, self.labels = get_test_batch(cfg.dataset, cfg.test_batch_size, cfg.num_threads)
                 self.Y = tf.one_hot(self.labels, depth=num_classes, axis=1, dtype=tf.float32)
                 self.inference(num_classes, is_training=False)
                 self._loss()
                 self._summary()
             else:
                 self.X = tf.placeholder(tf.float32, shape=self.input_shape)
                 self.labels = tf.placeholder(tf.int32, shape=(self.input_shape[0],))
                 self.Y = tf.one_hot(self.labels, depth=num_classes, axis=1, dtype=tf.float32) 
                 self.inference(num_classes, is_training=False)
                 self._loss()
                 errors = tf.not_equal(tf.to_int32(self.labels), self.predictions)
                 self.error_rate = tf.reduce_mean(tf.cast(errors, tf.float32))
Example #6
0
def main(_):
    if not os.path.exists(args.ckpt_dir):
        os.makedirs(args.ckpt_dir)
    if not os.path.exists(args.train_dir):
        os.makedirs(args.train_dir)
    if not os.path.exists(args.test_dir):
        os.makedirs(args.test_dir)
    if not os.path.exists(args.feature_dir):
        os.makedirs(args.feature_dir)

    IMG_W = 32
    IMG_H = 32
    CAPACITY = 256

    if args.phase == 'train':

        image_list, label_list = get_train_files(args.train_dir)
        train_batch, label_batch = get_train_batch(image_list, label_list,
                                                   IMG_W, IMG_H,
                                                   args.batch_size, CAPACITY)
        run_train(image_batch=train_batch,
                  label_batch=label_batch,
                  n_class=args.n_class,
                  batch_size=args.batch_size,
                  checkpoint_dir=args.ckpt_dir,
                  lr=args.lr,
                  MAX_STEP=args.iters)

    elif args.phase == 'test':

        test_list = get_test_files(args.test_dir)
        test_batch = get_test_batch(test_list, IMG_W, IMG_H, args.batch_size,
                                    CAPACITY)
        print(test_batch)
        run_test(test_batch, args.n_class, args.ckpt_dir, args.batch_size)

    elif args.phase == 'feature_extraction':

        image_list, label_list = get_train_files(args.train_dir)
        train_batch, label_batch = get_train_batch(image_list, label_list,
                                                   IMG_W, IMG_H,
                                                   args.batch_size, CAPACITY)
        feature_extraction(train_batch, args.n_class, args.ckpt_dir,
                           args.batch_size)

    else:
        print('{:*^50}'.format('【Unknown phase!】'))
Example #7
0
 def enqueue_batches():
     while not coord.should_stop():
         batch_feats, batch_labels = utils.get_train_batch(
             train_file, args.batch_size)
         sess.run(enqueue_op,
                  feed_dict={
                      x: batch_feats,
                      y: batch_labels
                  })
Example #8
0
    def train(self):

        for i in xrange(self.num_steps):

            # update discriminator
            batch = get_train_batch()
            print 'Training discriminator...'
            self.d_model.train_step(batch)

            # update generator
            batch = get_train_batch()
            print 'Training generator...'
            self.global_step = self.g_model.train_step(batch)

            if self.global_step % c.MODEL_SAVE_FREQ == 0:
                print '-' * 30
                print 'Saving models...'
                self.saver.save(self.sess, os.path.join(c.MODEL_SAVE_DIR, 'model.ckpt'), global_step=self.global_step)
                print 'Saved models'
                print '-' * 30
    def train_next_batch(self, batch_size):
        train_indices = np.random.choice(self.num_train,
                                         batch_size,
                                         replace=True)
        train_imgs, train_vessels = utils.get_train_batch(
            self.train_img_raw,
            self.train_gt_raw,
            train_indices.astype(np.int32),
            img_size=self.image_size)
        train_vessels = np.expand_dims(train_vessels, axis=3)

        return train_imgs, train_vessels
Example #10
0
    def train(self):
        """
        Runs a training loop on the model networks.
        """
        for i in xrange(self.num_steps):
            if c.ADVERSARIAL:
                # update discriminator
                batch = get_train_batch(c.BATCH_SIZE, num_rec_out=1)
                print 'Training discriminator...'
                self.buff.append(
                    self.d_model.train_step(
                        batch, self.buff,
                        self.g_model))  ####(self.buff.append,self.buff)
                if len(self.buff) > 100:
                    for i in range(len(self.buff) - 100):
                        del (self.buff[i])

            # update generator
            batch = get_train_batch(c.BATCH_SIZE, num_rec_out=1)
            print 'Training generator...'
            self.global_step = self.g_model.train_step(
                batch, discriminator=(self.d_model if c.ADVERSARIAL else None))

            # save the models
            if self.global_step % c.MODEL_SAVE_FREQ == 0:
                print '-' * 30
                print 'Saving models...'
                self.saver.save(self.sess,
                                c.MODEL_SAVE_DIR + 'model.ckpt',
                                global_step=self.global_step)
                print 'Saved models!'
                print '-' * 30

            # test generator model
            if self.global_step % c.TEST_FREQ == 0:
                self.test()
Example #11
0
    def train_babysit(self):

        print(
            "The generator and discriminator will be balanced during training")

        for i in range(self.num_train_steps):

            if self.global_step % c.SUMMARY_FREQ == 0:
                print("Discriminator accuracy moving average : ",
                      self.GAN_model.discrim_acc_MA)

            #Update discriminator on condition the accuracy is not too high
            if (self.GAN_model.discrim_acc_MA < c.UPPER):
                batch = get_train_batch()
                self.global_step = self.GAN_model.discriminator_train_step(
                    batch)

            #Update the generator on condition the accuracy isnt too low
            if (self.GAN_model.discrim_acc_MA > c.LOWER):
                batch = get_train_batch()
                self.global_step = self.GAN_model.generator_train_step(batch)

            self.GAN_model.save_summaries()
            self.GAN_model.increment_global_step()

            if self.global_step % c.MODEL_SAVE_FREQ == 0:
                print('Saving Model')
                self.saver.save(self.sess,
                                c.MODEL_SAVE_DIR + 'model.ckpt',
                                global_step=self.global_step)
                print('Model Save Success')

            if self.global_step % c.TEST_FREQ == 0:
                print('Testing Model')
                self.test(self.global_step)
                print('Testing Compleated')
Example #12
0
    def train(self):
        """
        Runs a training loop on the model networks.
        """
        delta = 45.6
        adv_windowed_list = []
        for i in xrange(self.num_steps):
            if c.ADVERSARIAL:
                # update discriminator
                batch = get_train_batch()
                print 'Training discriminator...'
                self.d_model.train_step(batch, self.g_model)

            # update generator
            batch = get_train_batch()
            print 'Training generator...'
            if (len(adv_windowed_list) > (c.WINDOW_SIZE - 1)):
                adv_windowed_list.pop(0)

            if not adv_windowed_list:
                self.global_step, adv_windowed_list = self.g_model.train_step(
                    batch,
                    adv_windowed_list,
                    discriminator=(self.d_model if c.ADVERSARIAL else None))

            start = True
            if (sum(adv_windowed_list) / len(adv_windowed_list) <=
                    delta / c.WINDOW_SIZE):
                self.global_step, adv_windowed_list = self.g_model.train_step(
                    batch,
                    adv_windowed_list,
                    discriminator=(self.d_model if c.ADVERSARIAL else None))

            counter = 0
            while (sum(adv_windowed_list) / len(adv_windowed_list) >
                   delta / c.WINDOW_SIZE):
                print(
                    sum(adv_windowed_list) / len(adv_windowed_list),
                    delta / c.WINDOW_SIZE)
                if not start:
                    adv_windowed_list.pop(len(adv_windowed_list) - 1)
                self.global_step, adv_windowed_list = self.g_model.train_step(
                    batch,
                    adv_windowed_list,
                    discriminator=(self.d_model if c.ADVERSARIAL else None))
                counter += 1
                start = False
                print(counter)
                if counter >= 10:
                    break

            print("Out of loop")
            # save the models

            if self.global_step % c.MODEL_SAVE_FREQ == 0:
                print '-' * 30
                print 'Saving models...'
                self.saver.save(self.sess,
                                c.MODEL_SAVE_DIR + 'model.ckpt',
                                global_step=self.global_step)
                print 'Saved models!'
                print '-' * 30

            # test generator model
            if self.global_step % c.TEST_FREQ == 0:
                self.test()

        #if self.global_step % c.MODEL_SAVE_FREQ == 0:
        print '-' * 30
        print 'Saving models...'
        self.saver.save(self.sess,
                        c.MODEL_SAVE_DIR + 'model.ckpt',
                        global_step=self.global_step)
        print 'Saved models!'
        print '-' * 30

        # test generator model
        #if self.global_step % c.TEST_FREQ == 0:
        self.test()
Example #13
0
def main(args):

    # loading configurations
    with open(args.config) as f:
        config = yaml.safe_load(f)["configuration"]

    # set up workspace
    work_space = config["workspace"]
    tf_board = config["tf_board"]
    setup_workpath(work_space)
    name = config["Name"]

    # Construct or load embeddings
    print("Initializing embeddings ...")
    vocab_size = config["embeddings"]["vocab_size"]
    embed_size = config["embeddings"]["embed_size"]

    vocab_file = '%s/data/%s-%s' % (work_space, "vocab", vocab_size)
    print("\tDone.")

    # Build the model and compute losses
    (enc_num_layers, enc_num_units, enc_cell_type, enc_bidir, attn_num_units,
     dec_num_layers, dec_num_units, dec_cell_type, state_pass, infer_max_iter,
     l2_regularize, learning_rate) = get_model_config(config)

    (train_s_file, train_t_file, dev_s_file, dev_t_file, max_length,
     gpu_fraction, gpu_id, checkpoint_every, max_checkpoints, print_every,
     train_steps, is_beam_search, batch_size,
     beam_size) = get_training_config(config)

    print("Building model architecture ...")
    train_model = Seq2SeqModel(mode='train',
                               model_name=name,
                               vocab_size=vocab_size,
                               embedding_size=embed_size,
                               enc_num_layers=enc_num_layers,
                               enc_num_units=enc_num_units,
                               enc_cell_type=enc_cell_type,
                               enc_bidir=enc_bidir,
                               attn_num_units=attn_num_units,
                               dec_num_layers=dec_num_layers,
                               dec_num_units=dec_num_units,
                               dec_cell_type=dec_cell_type,
                               batch_size=batch_size,
                               beam_search=is_beam_search,
                               beam_size=beam_size,
                               infer_max_iter=infer_max_iter,
                               l2_regularize=l2_regularize,
                               learning_rate=learning_rate,
                               max_to_keep=max_checkpoints)

    print("\tDone.")

    logdir = '%s/nn_models/' % work_space
    restore_from = '%s/nn_models/' % work_space

    is_overwritten_training = logdir != restore_from  # 判断两个文件件是否相同

    # Set up session
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction,
                                visible_device_list=gpu_id)
    sess = tf.Session(config=tf.ConfigProto(log_device_placement=False,
                                            gpu_options=gpu_options))
    init = tf.global_variables_initializer()
    sess.run(init)

    # tensorbord
    train_writer = tf.summary.FileWriter(tf_board + 'train/', sess.graph)
    test_writer = tf.summary.FileWriter(tf_board + 'test/', sess.graph)

    try:
        saved_global_step = load(train_model.saver, sess, restore_from)
        if is_overwritten_training or saved_global_step is None:
            # The first training step will be saved_global_step + 1,
            # therefore we put -1 here for new or overwritten trainings.
            saved_global_step = -1

    except Exception:
        print("Something went wrong while restoring checkpoint. "
              "Training is terminated to avoid the overwriting.")
        raise

    # ##### Training #####
    # Load data
    print("Loading data ...")

    # Load vocabularies.
    if os.path.exists(vocab_file):
        vocab_table, reverse_vocab_table = create_vocab_tables(vocab_file)
    else:
        create_vocab_file(train_s_file, train_t_file, dev_s_file, dev_t_file,
                          vocab_file, vocab_size)
        vocab_table, reverse_vocab_table = create_vocab_tables(vocab_file)

    train_set, dev_set = prepare_train_dev_data(train_s_file, train_t_file,
                                                dev_s_file, dev_t_file,
                                                vocab_table, max_length)

    # Training
    last_saved_step = saved_global_step
    num_steps = saved_global_step + train_steps
    losses = []
    steps = []

    print("Start training ...")
    try:
        for step in range(saved_global_step + 1, num_steps):
            start_time = time.time()

            batch = get_train_batch(train_set, max_length, batch_size)

            loss_value = train_model.train(sess, batch)

            losses.append(loss_value)
            duration = (time.time() - start_time)
            if step % print_every == 0 and step != 0:
                # train perplexity
                t_perp = train_model.compute_perplexity(sess, batch)
                add_summary(train_writer, step, 'train perplexity', t_perp)

                # eval perplexity
                dev_str = ""
                if dev_set is not None:
                    eval_batch = get_train_batch(dev_set, max_length,
                                                 batch_size)

                    eval_perp = train_model.compute_perplexity(
                        sess, eval_batch)
                    add_summary(test_writer, step, 'eval perplexity',
                                eval_perp)
                    dev_str += "val_prep: {:.3f}\n".format(eval_perp)

                steps.append(step)
                info = 'step {:d}, loss = {:.6f},perp: {:.3f}\n{}({:.3f} sec/step)'
                print(info.format(step, loss_value, t_perp, dev_str, duration))

            if step % checkpoint_every == 0:
                save(train_model.saver, sess, logdir, step)
                last_saved_step = step

    except KeyboardInterrupt:
        # Introduce a line break after ^C so save message is on its own line.
        print()

    finally:
        if step > last_saved_step:
            save(train_model.saver, sess, logdir, step)
Example #14
0
 task_losss = []
 for i in range(args.batch_size):
     '''
     Cumulate Inner Gradient
     '''
     users = np.random.choice(list(train_user_lists.keys()),
                              args.task_size,
                              replace=False)
     model_tmp = copy.deepcopy(model)
     model_tmp.train()
     optimizer_tmp = torch.optim.Adam(model_tmp.parameters(),
                                      lr=args.inner_lr)
     for inner_batch in range(args.inner_batch):
         k_shot = np.random.randint(args.n_shot) + 1
         pos_items, neg_items, adj_lists_sample = get_train_batch(
             users, train_user_lists, train_item_lists, k_shot,
             num_users, num_items)
         items, labels, adj_lists_sample = get_train_batch_ctr(
             users, train_user_lists, train_item_lists, k_shot,
             neg_lists)
         labels = torch.tensor(labels).cuda()
         optimizer_tmp.zero_grad()
         # loss = model_tmp.loss(users,pos_items,neg_items,adj_lists_sample,few_shot=k_shot)
         loss = model_tmp.loss_ctr(users,
                                   items,
                                   labels,
                                   adj_lists_sample,
                                   few_shot=k_shot)
         # 根据loss backward获得各可训练参数的梯度
         loss.backward()
         #optimizer的step结合梯度信息和更新算法来实际改变参数的值