Exemple #1
0
def extract_feature(imgList, args):
    tf.reset_default_graph()

    queue = tf.train.string_input_producer(imgList,
                                           num_epochs=None,
                                           shuffle=False)
    reader = tf.WholeFileReader()

    img_path, img_data = reader.read(queue)
    img = vgg_preprocessing.preprocess_image(
        tf.image.decode_jpeg(contents=img_data, channels=3), args.imgSize,
        args.imgSize)
    img = tf.expand_dims(img, 0)
    with slim.arg_scope(resnet_v1.resnet_arg_scope()):
        net, end_points = resnet_v1.resnet_v1_152(inputs=img,
                                                  is_training=False)
    feat1 = end_points['resnet_v1_152/block4']
    feat2 = end_points['pool5']

    saver = tf.train.Saver()
    init_op = tf.global_variables_initializer()

    with tf.Session() as sess:
        sess.run(init_op)
        saver.restore(sess, args.cnnModel)
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)
        feats1 = []
        feats2 = []
        for i in range(len(imgList)):
            f1, f2 = sess.run([feat1, feat2
                               ])  # f1: (1, 7, 7, 2048)   f2: (1, 1, 1, 2048)
            feats1.append(f1[0])
            feats2.append(f2[0][0][0])
        coord.request_stop()
        coord.join(threads)
    return feats1, feats2
Exemple #2
0
import tensorflow as tf

from slim.nets import resnet_v1
from slim.preprocessing import vgg_preprocessing
slim = tf.contrib.slim

FRAME_HOME = '../data/ucf101-frames'
FEATURE_HOME = 'ucf101-block4-7-7-2048-features'

img = tf.placeholder(dtype=tf.float32)
pre_img = vgg_preprocessing.preprocess_image(img, 224, 224, is_training=False)
pre_img = tf.expand_dims(pre_img, 0)

with slim.arg_scope(resnet_v1.resnet_arg_scope()):
    _, end_points = resnet_v1.resnet_v1_152(inputs=pre_img, is_training=False)
feature = tf.squeeze(end_points['resnet_v1_152/block4'])

if not os.path.exists('resnet_v1_152.ckpt'):
    os.system(
        'wget http://download.tensorflow.org/models/resnet_v1_152_2016_08_28.tar.gz'
    )
    os.system('tar -xvzf resnet_v1_152_2016_08_28.tar.gz')
    os.system('rm resnet_v1_152_2016_08_28.tar.gz')

if not os.path.isdir(FEATURE_HOME):
    os.mkdir(FEATURE_HOME)

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    tf.train.Saver().restore(sess, './resnet_v1_152.ckpt')
Exemple #3
0
    def _add_seq2seq(self):
        """Add the whole sequence-to-sequence model to the graph."""
        hps = self._hps
        vsize = self._vocab.size()  # size of the vocabulary
        # with tf.variable_scope('image_encoder'):
        self.reshaped_pix = tf.reshape(self._side_batch, [-1, 32, 64, 3])
        with slim.arg_scope(resnet_arg_scope()):
            net, end_points = resnet_v1_152(self.reshaped_pix,
                                            is_training=FLAGS.mode == 'train')
            # feat1 = end_points['resnet_v1_152/block4']
        pic_encoded = end_points['global_pool']
        # self.end_points = end_points
        # self.net = net

        with tf.variable_scope('seq2seq'):
            # Some initializers
            self.rand_unif_init = tf.random_uniform_initializer(
                -hps.rand_unif_init_mag, hps.rand_unif_init_mag, seed=123)
            self.trunc_norm_init = tf.truncated_normal_initializer(
                stddev=hps.trunc_norm_init_std)

            # Add embedding matrix (shared by the encoder and decoder inputs)
            with tf.variable_scope('embedding'):
                embedding = tf.get_variable('embedding', [vsize, hps.emb_dim],
                                            dtype=tf.float32,
                                            initializer=self.trunc_norm_init)
                emb_enc_inputs = tf.nn.embedding_lookup(
                    embedding, self._enc_batch
                )  # tensor with shape (batch_size, max_enc_steps, emb_size)
                emb_dec_inputs = [
                    tf.nn.embedding_lookup(embedding, x)
                    for x in tf.unstack(self._dec_batch, axis=1)
                ]  # list length max_dec_steps containing shape (batch_size, emb_size)
            pic_encoded = tf.reshape(
                tf.squeeze(pic_encoded),
                [FLAGS.batch_size, FLAGS.max_side_steps, -1])
            emb_side_inputs = tf.layers.dense(pic_encoded, FLAGS.emb_dim * 2)
            # Add the encoder.
            enc_outputs, fw_st, bw_st = self._add_encoder(
                emb_enc_inputs, self._enc_lens)
            # batch_size * pic_num * emb_dim
            new_emb_side_inputs = tf.reshape(emb_side_inputs, [
                FLAGS.batch_size * int(FLAGS.max_side_steps / 5), 5,
                FLAGS.hidden_dim * 2
            ])
            # (batch_size*pic_num/5) * 5 * emb_dim

            side_states = self._add_side_rnn_encoder(
                new_emb_side_inputs, 5 * tf.ones(
                    (new_emb_side_inputs.get_shape()[0]), dtype=tf.int32))
            self._side_inputs = tf.reshape(
                side_states, [FLAGS.batch_size, -1, FLAGS.hidden_dim * 2])
            self._enc_states = enc_outputs

            # Our encoder is bidirectional and our decoder is unidirectional so we need to reduce the final encoder hidden state to the right size to be the initial decoder hidden state
            self._dec_in_state = self._reduce_states(fw_st, bw_st)
            self._last_state = tf.concat(self._dec_in_state, -1)

            with tf.variable_scope('interaction'):
                change_side_states = tf.transpose(self._side_inputs, [0, 2, 1])
                self._change_side_states = change_side_states
                attn_matrix = tf.matmul(self._enc_states, change_side_states)
                # batch_size * enc_len * side_len
                self._video_aware_enc_states = tf.matmul(
                    attn_matrix, self._side_inputs)
                self._news_aware_side_states = tf.matmul(
                    tf.transpose(attn_matrix, [0, 2, 1]), self._enc_states)
                gate = tf.layers.dense(self._last_state,
                                       1,
                                       activation=tf.nn.sigmoid)
                gate = tf.expand_dims(tf.tile(gate, [1, FLAGS.hidden_dim * 2]),
                                      1)
                ones = np.ones([FLAGS.batch_size, 1, FLAGS.hidden_dim * 2])
                self._enc_states = gate * self._enc_states + (
                    ones - gate) * self._video_aware_enc_states

            # Add the decoder.
            with tf.variable_scope('decoder'):
                decoder_outputs, self._dec_out_state, self.attn_dists, self.p_gens, self.coverage = self._add_decoder(
                    emb_dec_inputs)
                # attn_seg, attn_side = self.pic_attention(emb_side_inputs)
                # self._attn_side = attn_side

            # Add the output projection to obtain the vocabulary distribution
            with tf.variable_scope('output_projection'):
                w = tf.get_variable('w', [hps.hidden_dim, vsize],
                                    dtype=tf.float32,
                                    initializer=self.trunc_norm_init)
                w_t = tf.transpose(w)
                v = tf.get_variable('v', [vsize],
                                    dtype=tf.float32,
                                    initializer=self.trunc_norm_init)
                vocab_scores = [
                ]  # vocab_scores is the vocabulary distribution before applying softmax. Each entry on the list corresponds to one decoder step
                for i, output in enumerate(decoder_outputs):
                    if i > 0:
                        tf.get_variable_scope().reuse_variables()
                    vocab_scores.append(tf.nn.xw_plus_b(
                        output, w, v))  # apply the linear layer

                vocab_dists = [
                    tf.nn.softmax(s) for s in vocab_scores
                ]  # The vocabulary distributions. List length max_dec_steps of (batch_size, vsize) arrays. The words are in the order they appear in the vocabulary file.

            # For pointer-generator model, calc final distribution from copy distribution and vocabulary distribution
            if FLAGS.pointer_gen:
                final_dists = self._calc_final_dist(vocab_dists,
                                                    self.attn_dists)
            else:  # final distribution is just vocabulary distribution
                final_dists = vocab_dists

            if hps.mode in ['train', 'eval']:
                # Calculate the loss
                with tf.variable_scope('loss'):
                    if FLAGS.pointer_gen:
                        # Calculate the loss per step
                        # This is fiddly; we use tf.gather_nd to pick out the probabilities of the gold target words
                        loss_per_step = [
                        ]  # will be list length max_dec_steps containing shape (batch_size)
                        batch_nums = tf.range(
                            0, limit=hps.batch_size)  # shape (batch_size)
                        for dec_step, dist in enumerate(final_dists):
                            targets = self._target_batch[:,
                                                         dec_step]  # The indices of the target words. shape (batch_size)
                            indices = tf.stack((batch_nums, targets),
                                               axis=1)  # shape (batch_size, 2)
                            gold_probs = tf.gather_nd(
                                dist, indices
                            )  # shape (batch_size). prob of correct words on this step
                            losses = -tf.log(gold_probs + 1e-10)
                            loss_per_step.append(losses)

                        # Apply dec_padding_mask and get loss
                        self._loss = _mask_and_avg(loss_per_step,
                                                   self._dec_padding_mask)

                    else:  # baseline model
                        self._loss = tf.contrib.seq2seq.sequence_loss(
                            tf.stack(vocab_scores, axis=1), self._target_batch,
                            self._dec_padding_mask
                        )  # this applies softmax internally

                    tf.summary.scalar('loss', self._loss)

                    # Calculate coverage loss from the attention distributions
                    if hps.coverage:
                        with tf.variable_scope('coverage_loss'):
                            self._coverage_loss = _coverage_loss(
                                self.attn_dists, self._dec_padding_mask)
                            tf.summary.scalar('coverage_loss',
                                              self._coverage_loss)
                        self._total_loss = self._loss + hps.cov_loss_wt * self._coverage_loss
                        tf.summary.scalar('total_loss', self._total_loss)

                # with tf.variable_scope('pic_loss'):
                #     self._loss_pic = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=attn_side,
                #                                                                        labels=self._dec_pic_target))
                #     # self._loss_unified = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=attn_side,
                #     #                                                                    labels=attn_seg))
                # self._all_loss = self._loss_pic
                # self._all_loss = self._loss

        with tf.variable_scope('side'):
            emb_side_inputs = tf.nn.l2_normalize(emb_side_inputs, dim=-1)

            # self-attention
            side_outputs, sfw_st, sbw_st = self._add_side_encoder(
                self._side_inputs, self._side_lens)
            conditional_vec = tf.expand_dims(self._last_state, 1)
            conditional_weight = tf.layers.dense(
                tf.multiply(conditional_vec, side_outputs), 1)
            self._cond_side_states = tf.multiply(side_outputs,
                                                 conditional_weight)

            s_gate = tf.layers.dense(self._last_state,
                                     1,
                                     activation=tf.nn.sigmoid)
            s_gate = tf.expand_dims(s_gate, 1)
            s_ones = np.ones_like(s_gate)
            self._side_states = s_gate * self._news_aware_side_states + (
                s_ones - s_gate) * self._cond_side_states

            fusion_gate = tf.layers.dense(self._last_state,
                                          1,
                                          activation=tf.nn.sigmoid)
            fusion_gate = tf.expand_dims(
                tf.tile(fusion_gate, [1, FLAGS.hidden_dim * 2]), 1)
            fusion_ones = tf.ones_like(fusion_gate)
            side_states = tf.nn.l2_normalize(tf.reshape(
                tf.tile(tf.expand_dims(self._side_states, 1), [1, 5, 1, 1]),
                [FLAGS.batch_size, -1, FLAGS.hidden_dim * 2]),
                                             dim=-1)
            fusion_side = fusion_gate * emb_side_inputs + (
                fusion_ones - fusion_gate) * side_states

            attn_side = tf.squeeze(
                tf.layers.dense(
                    fusion_side,
                    1,
                    kernel_initializer=tf.contrib.layers.xavier_initializer()))
            attn_side = nn_ops.softmax(attn_side)
            self.attn_side = attn_side

            # last_state = tf.nn.l2_normalize(tf.tile(tf.expand_dims(self._last_state, 1), [1, 10, 1]), dim=-1)
            # emb_side_inputs = tf.nn.l2_normalize(emb_side_inputs, dim=-1)
            # attn_side = tf.squeeze(tf.layers.dense(tf.concat([last_state, emb_side_inputs], -1), 1, activation=tf.nn.sigmoid, kernel_initializer=tf.contrib.layers.xavier_initializer()))
            # self.attn_side = attn_side

            with tf.variable_scope('pic_loss'):
                # self._loss_pic = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=attn_side,
                #                                                                         labels=self._dec_pic_target))
                # self._loss_pic = pairwise_hinge_loss(logits=attn_side, labels=self._dec_pic_target)
                self._loss_pic = pairwise_hinge_loss(logits=attn_side,
                                                     labels=tf.one_hot(
                                                         self._dec_pic_target,
                                                         FLAGS.max_side_steps))
        if hps.mode in ['train', 'eval']:
            self._all_loss = self._loss + self._loss_pic

        if hps.mode == "decode" or hps.mode == 'auto_decode':
            # We run decode beam search mode one decoder step at a time
            assert len(
                final_dists
            ) == 1  # final_dists is a singleton list containing shape (batch_size, extended_vsize)
            final_dists = final_dists[0]
            topk_probs, self._topk_ids = tf.nn.top_k(
                final_dists, hps.batch_size * 2
            )  # take the k largest probs. note batch_size=beam_size in decode mode
            self._topk_log_probs = tf.log(topk_probs)