コード例 #1
0
 def test_moredatanumber(self):
     """test for more data"""
     print "----5----"
     m1_score = compute_m1(
         json_predictions_file="data/more_data_number.json",
         reference_file="data/id_to_words.json")
     self.assertEqual(m1_score['error'], 1)
コード例 #2
0
 def test_rightdata(self):
     """test for right data"""
     print "----1----"
     m1_score = compute_m1(
         json_predictions_file="./data/val_cadidate_captions_json.json",
         reference_file="./data/val_references_json.json")
     self.assertEqual(m1_score['error'], 0)
コード例 #3
0
 def test_wrongname(self):
     """test for wrong_name"""
     print "----6----"
     m1_score = compute_m1(json_predictions_file="data/wrong_name.json",
                           reference_file="data/id_to_words.json")
     self.assertEqual(m1_score['error'], 1)
コード例 #4
0
 def test_keyerror(self):
     """test for key error"""
     print "----3----"
     m1_score = compute_m1(json_predictions_file="data/key_error.json",
                           reference_file="data/id_to_words.json")
     self.assertEqual(m1_score['error'], 1)
コード例 #5
0
 def test_nulldata(self):
     """test for null data"""
     print "----2----"
     m1_score = compute_m1(json_predictions_file="data/has_null_data.json",
                           reference_file="data/id_to_words.json")
     self.assertEqual(m1_score['error'], 1)
コード例 #6
0
 def test_lessdatanumber(self):
     """test for less data"""
     m1_score = compute_m1(
         json_predictions_file="data/less_data_number.json",
         reference_file="data/id_to_words.json")
     self.assertEqual(m1_score['error'], 1)
コード例 #7
0
    def train(self):
        # train/val dataset
        #        n_examples = self.data['captions'].shape[0]
        #        n_iters_per_epoch = int(np.ceil(float(n_examples)/self.batch_size))
        #        features = self.data['features']
        #        captions = self.data['captions']
        #        image_idxs = self.data['image_idxs']
        #        val_features = self.val_data['features']

        val_features = self.val_data['features']
        n_iters_val = int(
            np.ceil(float(val_features.shape[0]) / self.batch_size))
        # build graphs for training model and sampling captions
        loss = self.model.build_model()
        #        tf.get_variable_scope().reuse_variables()
        #        _, _, generated_captions = self.model.build_sampler(max_len=20)
        #
        #        # train op
        #        with tf.name_scope('optimizer'):
        #            optimizer = self.optimizer(learning_rate=self.learning_rate)
        #            grads = tf.gradients(loss, tf.trainable_variables())
        #            grads_and_vars = list(zip(grads, tf.trainable_variables()))
        #            train_op = optimizer.apply_gradients(grads_and_vars=grads_and_vars)

        with tf.variable_scope(tf.get_variable_scope()) as scope:
            with tf.name_scope('optimizer'):
                tf.get_variable_scope().reuse_variables()
                _, _, generated_captions = self.model.build_sampler(max_len=20)
                optimizer = self.optimizer(learning_rate=self.learning_rate)
                grads = tf.gradients(loss, tf.trainable_variables())
                grads_and_vars = list(zip(grads, tf.trainable_variables()))
        train_op = optimizer.apply_gradients(grads_and_vars=grads_and_vars)

        # summary op
        tf.summary.scalar('batch_loss', loss)
        for var in tf.trainable_variables():
            tf.summary.histogram(var.op.name, var)
        for grad, var in grads_and_vars:
            tf.summary.histogram(var.op.name + '/gradient', grad)

        summary_op = tf.summary.merge_all()

        print "The number of epoch: %d" % self.n_epochs
        #print "Data size: %d" %n_examples
        print "Batch size: %d" % self.batch_size
        #print "Iterations per epoch: %d" %n_iters_per_epoch

        config = tf.ConfigProto(allow_soft_placement=True)
        #os.environ["CUDA_VISIBLE_DEVICES"] = '1'
        #config.gpu_options.per_process_gpu_memory_fraction=0.9
        config.gpu_options.allow_growth = True
        with tf.Session(config=config) as sess:
            init = tf.global_variables_initializer()
            sess.run(init)
            summary_writer = tf.summary.FileWriter(
                self.log_path, graph=tf.get_default_graph())
            saver = tf.train.Saver(max_to_keep=20)

            if self.pretrained_model is not None:
                print "Start training with pretrained Model.."
                saver.restore(sess, self.pretrained_model)

            prev_loss = -1
            curr_loss = 0
            start_t = time.time()

            for e in range(self.n_epochs):
                for part_num in range(5):
                    data = load_coco_data(data_path='./data',
                                          split='train',
                                          batch=part_num)
                    n_examples = data['captions'].shape[0]
                    n_iters_per_epoch = int(
                        np.ceil(float(n_examples) / self.batch_size))
                    features = data['features']
                    captions = data['captions']
                    image_idxs = data['image_idxs']

                    rand_idxs = np.random.permutation(n_examples)
                    captions = captions[rand_idxs]
                    image_idxs = image_idxs[rand_idxs]

                    for i in range(n_iters_per_epoch):
                        captions_batch = captions[i * self.batch_size:(i + 1) *
                                                  self.batch_size]
                        image_idxs_batch = image_idxs[i *
                                                      self.batch_size:(i + 1) *
                                                      self.batch_size]
                        features_batch = features[image_idxs_batch]
                        feed_dict = {
                            self.model.features: features_batch,
                            self.model.captions: captions_batch
                        }
                        _, l = sess.run([train_op, loss], feed_dict)
                        curr_loss += l

                        # write summary for tensorboard visualization
                        if i % 10 == 0:
                            summary = sess.run(summary_op, feed_dict)
                            summary_writer.add_summary(
                                summary, e * n_iters_per_epoch + i)

                        if (i + 1) % self.print_every == 0:
                            print "\nTrain loss at epoch %d & part %d & iteration %d (mini-batch): %.5f" % (
                                e + 1, part_num + 1, i + 1, l)
                            ground_truths = captions[image_idxs ==
                                                     image_idxs_batch[0]]
                            decoded = decode_captions(ground_truths,
                                                      self.model.idx_to_word)
                            for j, gt in enumerate(decoded):
                                print "Ground truth %d: %s" % (j + 1, gt)
                            gen_caps = sess.run(generated_captions, feed_dict)
                            decoded = decode_captions(gen_caps,
                                                      self.model.idx_to_word)
                            print "Generated caption: %s\n" % decoded[0]

                    del data

                print "Previous epoch loss: ", prev_loss
                print "Current epoch loss: ", curr_loss
                print "Elapsed time: ", time.time() - start_t
                prev_loss = curr_loss
                curr_loss = 0

                # print out BLEU scores and file write
                if self.print_bleu:
                    all_gen_cap = np.ndarray((val_features.shape[0], 20))
                    for i in range(n_iters_val):
                        features_batch = val_features[i *
                                                      self.batch_size:(i + 1) *
                                                      self.batch_size]
                        feed_dict = {self.model.features: features_batch}
                        gen_cap = sess.run(generated_captions,
                                           feed_dict=feed_dict)
                        all_gen_cap[i * self.batch_size:(i + 1) *
                                    self.batch_size] = gen_cap

                    all_decoded = decode_captions(all_gen_cap,
                                                  self.model.idx_to_word)
                    captions2json.captions2json(
                        all_decoded, self.val_data['file_names'],
                        './data/val/val_cadidate_captions_json.json')
                    compute_m1(
                        json_predictions_file=
                        './data/val/val_cadidate_captions_json.json',
                        reference_file='./data/val/val_references_json.json')

                # save model's parameters
                if (e + 1) % self.save_every == 0:

                    saver.save(sess,
                               os.path.join(self.model_path, 'model.ckpt'),
                               global_step=e + 1)
                    print "model-%s saved." % (e + 1)
コード例 #8
0
 def test_rightdata(self):
     """test for right data"""
     m1_score = compute_m1(
         json_predictions_file="data/id_to_test_caption.json",
         reference_file="data/id_to_words.json")
     self.assertEqual(m1_score['error'], 0)
コード例 #9
0
 def test_wrongname(self):
     """test for wrong_name"""
     m1_score = compute_m1(json_predictions_file="data/wrong_name.json",
                           reference_file="data/id_to_words.json")
     self.assertEqual(m1_score['error'], 1)
コード例 #10
0
 def test_moredatanumber(self):
     """test for more data"""
     m1_score = compute_m1(json_predictions_file="data/more_data_number.json",
                           reference_file="data/id_to_words.json")
     self.assertEqual(m1_score['error'], 1)
コード例 #11
0
 def test_keyerror(self):
     """test for key error"""
     m1_score = compute_m1(json_predictions_file="data/key_error.json",
                           reference_file="data/id_to_words.json")
     self.assertEqual(m1_score['error'], 1)
コード例 #12
0
 def test_nulldata(self):
     """test for null data"""
     m1_score = compute_m1(json_predictions_file="data/has_null_data.json",
                           reference_file="data/id_to_words.json")
     self.assertEqual(m1_score['error'], 1)
コード例 #13
0
 def test_rightdata(self):
     """test for right data"""
     m1_score = compute_m1(json_predictions_file="data/id_to_test_caption.json",
                           reference_file="data/id_to_words.json")
     self.assertEqual(m1_score['error'], 0)