Beispiel #1
0
def train(set_name, bbn_dim, batch_size, max_iter=100000):
    model = JMLH(set_name, bbn_dim)

    data = Dataset(set_name=set_name, batch_size=batch_size)

    opt = tf.keras.optimizers.Adam(1e-4)

    train_iter = iter(data.train_data)
    test_iter = iter(data.test_data)

    time_string = strftime("%a%d%b%Y-%H%M%S", gmtime())
    result_path = os.path.join(REPO_PATH, 'result', set_name + '_JMLH')
    save_path = os.path.join(result_path, 'model', time_string)
    summary_path = os.path.join(result_path, 'log', time_string)
    if not os.path.exists(result_path):
        os.makedirs(result_path)
    if not os.path.exists(save_path):
        os.makedirs(save_path)

    writer = tf.summary.create_file_writer(summary_path)
    checkpoint = tf.train.Checkpoint(opt=opt, model=model)
    for i in range(max_iter):
        with writer.as_default():
            train_batch = next(train_iter)
            train_code, loss = train_step(model, train_batch, opt)
            train_label = train_batch[2].numpy()
            train_entry = train_batch[0].numpy()
            data.update(train_entry, train_code, train_label, 'train')

            if i == 0:
                print(model.summary())

            if (i + 1) % 100 == 0:
                train_hook = eval_cls_map(train_code, train_code, train_label,
                                          train_label)

                tf.summary.scalar('train/loss', loss, step=i)
                tf.summary.scalar('train/hook', train_hook, step=i)

                print('batch {}: loss {}'.format(i, loss))

            if (i + 1) % 2000 == 0:
                print('Testing!!!!!!!!')
                test_batch = next(test_iter)
                test_code = test_step(model, test_batch)
                test_label = test_batch[2].numpy()
                test_entry = test_batch[0].numpy()
                data.update(test_entry, test_code, test_label, 'test')
                test_hook = eval_cls_map(test_code,
                                         data.train_code,
                                         test_label,
                                         data.train_label,
                                         at=1000)
                tf.summary.scalar('test/hook', test_hook, step=i)

                save_name = os.path.join(save_path, 'ymmodel' + str(i))
                checkpoint.save(file_prefix=save_name)
Beispiel #2
0
    def hook_test(self):
        q = self.test_data.this_batch['batch_code']
        ql = self.test_data.this_batch['batch_label']

        t = self.training_data.code
        tl = self.training_data.label
        return eval_cls_map(q, t, ql, tl, at=1000)
    def train(self, dataset, log_path, save_path):
        init_op = tf.global_variables_initializer()
        self.sess.run(init_op)

        max_epoch = 40
        train_interval, test_interval = dataset.iter_num()

        summary_op = tf.summary.merge(
            tf.get_collection(tf.GraphKeys.SUMMARIES, scope=NAMESCOPE_TRAIN))
        summary_op_eval = tf.summary.merge(
            tf.get_collection(tf.GraphKeys.SUMMARIES, scope=NAMESCOPE_TEST))

        writer = tf.summary.FileWriter(log_path + '/')
        saver = tf.train.Saver()

        for i in range(max_epoch):
            for j in range(train_interval):
                batch_data = dataset.next_batch_train()
                batch_code, summaries, this_loss, _ = self.sess.run(
                    [self.out_tensor, summary_op, self.loss, self.opt],
                    feed_dict={self.img_in: batch_data})
                step = tf.train.global_step(self.sess, self.global_step)
                writer.add_summary(summaries, global_step=step)
                dataset.apply_code(batch_code)
                print('Epoch ' + str(i) + ', Batch ' + str(j) +
                      '(Global Step: ' + str(step) + '): ' + str(this_loss))
                del batch_code, summaries, this_loss
                gc.collect()

            print('Testing...')
            for j in range(test_interval):
                batch_data = dataset.next_batch_test()
                batch_code = self._forward(batch_data)
                dataset.apply_code(batch_code, 1)
                del batch_code, batch_data
                gc.collect()

            mean_average_precision = eval_tools.eval_cls_map(
                dataset.code_test, dataset.code_train, dataset.label_test,
                dataset.label_train)
            step = tf.train.global_step(self.sess, self.global_step)
            eval_to_write = self.sess.run(summary_op_eval,
                                          feed_dict={
                                              self.eval_map:
                                              np.asarray(
                                                  [mean_average_precision])
                                          })
            writer.add_summary(eval_to_write, global_step=step)
            saver.save(self.sess, save_path + '\\hehemodel', global_step=step)
            print(str(mean_average_precision))
            dataset.reshuffle()
            del eval_to_write, mean_average_precision
            gc.collect()
    def _eval_loop(self, train_code, train_label, batch_reader, num=5):
        code = 0
        cls = 0
        for i in range(num):
            batch_data = reader(i, mode=1)
            this_out = self.forward(batch_data)
            if i == 0:
                code = np.asarray(this_out)
                cls = batch_data.get('batch_label')
            else:
                code = np.concatenate((code, this_out))
                cls = np.concatenate((cls, batch_data.get('batch_label')))

        mean_average_precision = eval_tools.eval_cls_map(code, train_code, cls, train_label)
        return np.asarray([mean_average_precision])
    def _eval_loop_for_shuffle(self, train_code, train_label, test_feat, test_label, num=5):
        code = 0
        cls = test_label
        for i in range(num):
            batch_start = i * self.batch_size
            batch_end = batch_start + self.batch_size
            batch_data = dict()
            batch_data['batch_feat'] = test_feat[batch_start:batch_end, :]
            this_out = self.forward(batch_data)
            if i == 0:
                code = np.asarray(this_out)
            else:
                code = np.concatenate((code, this_out))

        mean_average_precision = eval_tools.eval_cls_map(code, train_code, cls, train_label)
        del code, cls
        gc.collect()
        return np.asarray([mean_average_precision])
Beispiel #6
0
def train(max_iter=50000):
    model = Model(k=10)
    data = Dataset()
    opt = tf.keras.optimizers.Adam(1e-4)
    train_iter = iter(data.train_data)
    test_iter = iter(data.test_data)

    time_string = strftime("%a%d%b%Y-%H%M%S", gmtime())
    result_path = os.path.join(ROOT_PATH, 'result', 'cifar10')
    save_path = os.path.join(result_path, 'model', time_string)
    summary_path = os.path.join(result_path, 'log', time_string)
    if not os.path.exists(result_path):
        os.makedirs(result_path)
    if not os.path.exists(save_path):
        os.makedirs(save_path)
    writer = tf.summary.create_file_writer(summary_path)
    checkpoint = tf.train.Checkpoint(opt=opt, model=model)

    for i in range(max_iter):
        with writer.as_default():
            train_batch = next(train_iter)

            train_code, train_loss = train_step(model, train_batch, opt, i)
            train_entry = train_batch[0]
            train_label = train_batch[2]
            data.update(train_entry.numpy(), train_code.numpy(),
                        train_label.numpy(), 'train')
            print('Step {}: loss: {}'.format(i, train_loss.numpy()))

            if i % 500 == 0 and i > 0:
                print('Testing!!!!!!!!')
                test_batch = next(test_iter)
                test_entry, test_feat, test_label = test_batch
                test_code = model(test_feat, training=False)
                data.update(test_entry.numpy(), test_code.numpy(),
                            test_label.numpy(), 'test')
                test_map = eval_cls_map(test_code.numpy(),
                                        data.train_code,
                                        test_label.numpy(),
                                        data.train_label,
                                        at=None)

                tf.summary.scalar('map/test', test_map, step=i)
Beispiel #7
0
def train_step(model: Model, batch_data, opt: tf.optimizers.Optimizer, step):
    feat = batch_data[1]
    label = batch_data[2]
    summary_step = -1 if step % 50 > 0 else step
    with tf.GradientTape() as tape:
        net_out = model(feat, training=True)
        loss = model.loss(feat, net_out, step=summary_step)
        gradient = tape.gradient(loss, sources=model.trainable_variables)
        opt.apply_gradients(zip(gradient, model.trainable_variables))

    code, prob, fc_cls = net_out['decoder']
    if summary_step >= 0:
        sim_gt = tf.expand_dims(tf.expand_dims(label_relevance(label), 0), -1)
        batch_map = eval_cls_map(code.numpy(), code.numpy(), label.numpy(),
                                 label.numpy())
        tf.summary.image('sim/gt', sim_gt, step=summary_step, max_outputs=1)
        tf.summary.scalar('map/train', batch_map, step=summary_step)

    return code, loss
Beispiel #8
0
def load_weights(set_name, bbn_dim, cbn_dim, batch_size, middle_dim, path):
    """
    create model and load it with pre-trained weights stored under the path
	"""
    print("loading weights from {}".format(path))
    model = TBH(set_name, bbn_dim, cbn_dim, middle_dim)
    data = Dataset(set_name=set_name, batch_size=batch_size, shuffle=False)

    actor_opt = tf.keras.optimizers.Adam(1e-4)
    critic_opt = tf.keras.optimizers.Adam(1e-4)

    checkpoint = tf.train.Checkpoint( actor_opt=actor_opt, critic_opt=critic_opt, model=model )
    checkpoint.restore(tf.train.latest_checkpoint(path))
    update_codes(model,data,batch_size,set_name)
    
    print("updating codes for the dataset and plot PR code...")
    test_hook,test_precision,pr_curve = eval_cls_map(data.test_code, data.train_code, data.test_label, data.train_label, 1000, True)
    make_PR_plot(path, pr_curve)
    print("The mPA is: {}".format(test_hook))
    print("The mean precision@1000 is: {}".format(test_precision))
Beispiel #9
0
    def hook_train(self):
        q = self.training_data.this_batch['batch_code']
        l = self.training_data.this_batch['batch_label']

        return eval_cls_map(q, q, l, l)
Beispiel #10
0
def hook(query, base, label_q, label_b):
    return eval_cls_map(query, base, label_q, label_b)
Beispiel #11
0
def train(set_name,
          bbn_dim,
          cbn_dim,
          batch_size,
          middle_dim=1024,
          max_iter=80000):
    model = TBH(set_name, bbn_dim, cbn_dim, middle_dim)

    data = Dataset(set_name=set_name, batch_size=batch_size)

    actor_opt = tf.keras.optimizers.Adam(1e-4)
    critic_opt = tf.keras.optimizers.Adam(1e-4)

    train_iter = iter(data.train_data)
    test_iter = iter(data.test_data)

    time_string = strftime("%a%d%b%Y-%H%M%S", gmtime())
    result_path = os.path.join(REPO_PATH, 'result', set_name)
    save_path = os.path.join(result_path, 'model', time_string)
    summary_path = os.path.join(result_path, 'log', time_string)
    if not os.path.exists(result_path):
        os.makedirs(result_path)
    if not os.path.exists(save_path):
        os.makedirs(save_path)

    writer = tf.summary.create_file_writer(summary_path)
    checkpoint = tf.train.Checkpoint(actor_opt=actor_opt,
                                     critic_opt=critic_opt,
                                     model=model)

    for i in range(max_iter):
        with writer.as_default():
            train_batch = next(train_iter)
            train_code, actor_loss, critic_loss = train_step(
                model, train_batch, bbn_dim, cbn_dim, batch_size, actor_opt,
                critic_opt)
            train_label = train_batch[2].numpy()
            train_entry = train_batch[0].numpy()
            data.update(train_entry, train_code, train_label, 'train')

            if i == 0:
                print(model.summary())

            if (i + 1) % 100 == 0:
                train_hook, train_precision, _ = hook(train_code, train_code,
                                                      train_label, train_label)

                tf.summary.scalar('train/actor', actor_loss, step=i)
                tf.summary.scalar('train/critic', critic_loss, step=i)
                tf.summary.scalar('train/hook', train_hook, step=i)
                tf.summary.scalar('train/precision', train_precision, step=i)

                print('batch {}, actor {}, critic {}, map {}, precision {}'.
                      format(i, actor_loss, critic_loss, train_hook,
                             train_precision))

            if (i + 1) % 2000 == 0:
                print('Testing!!!!!!!!')
                test_batch = next(test_iter)
                test_code = test_step(model, test_batch)
                test_label = test_batch[2].numpy()
                test_entry = test_batch[0].numpy()
                data.update(test_entry, test_code, test_label, 'test')
                if (i + 1) < max_iter:
                    test_hook, test_precision, pr_curve = eval_cls_map(
                        test_code, data.train_code, test_label,
                        data.train_label, 1000)
                else:  # reach the max iteration, now update code for all test_data in order to plot PR curve
                    data = Dataset(set_name=set_name,
                                   batch_size=batch_size,
                                   shuffle=False)
                    update_codes(model, data, batch_size, set_name)
                    test_hook, test_precision, pr_curve = eval_cls_map(
                        data.test_code, data.train_code, data.test_label,
                        data.train_label, 1000, True)
                    make_PR_plot(summary_path, pr_curve)
                tf.summary.scalar('test/hook', test_hook, step=i)
                tf.summary.scalar('test/precision', test_precision, step=i)

                print('test_map {}, test_precision@1000 {}'.format(
                    test_hook, test_precision))

                save_name = os.path.join(save_path, 'ymmodel' + str(i))
                checkpoint.save(file_prefix=save_name)