def text_objseg_region(text_seq_batch, imcrop_batch, spatial_batch, num_vocab,
                       embed_dim, lstm_dim, mlp_hidden_dims, deeplab_dropout,
                       mlp_dropout):

    # Language feature (LSTM hidden state)
    feat_lang = lstm_net.lstm_net(text_seq_batch, num_vocab, embed_dim,
                                  lstm_dim)

    # Local image feature
    feat_vis = deeplab.deeplab_fc8_full_conv(imcrop_batch,
                                             'deeplab',
                                             apply_dropout=deeplab_dropout)
    input_dim = 1
    for d in feat_vis.get_shape().as_list()[1:]:
        input_dim *= d
    feat_vis_flatten = tf.reshape(feat_vis, [-1, input_dim])

    # L2-normalize the features (except for spatial_batch)
    # and concatenate them
    feat_all = tf.concat(axis=1,
                         values=[
                             tf.nn.l2_normalize(feat_lang, 1),
                             tf.nn.l2_normalize(feat_vis_flatten, 1),
                             spatial_batch
                         ])

    # MLP Classifier over concatenate feature
    with tf.variable_scope('classifier'):
        mlp_l1 = fc_relu('mlp_l1', feat_all, output_dim=mlp_hidden_dims)
        if mlp_dropout: mlp_l1 = drop(mlp_l1, 0.5)
        mlp_l2 = fc('mlp_l2', mlp_l1, output_dim=1)

    return mlp_l2
def text_objseg_region(text_seq_batch, imcrop_batch, spatial_batch, num_vocab,
                       embed_dim, lstm_dim, mlp_hidden_dims, vgg_dropout,
                       mlp_dropout):

    # Language feature (LSTM hidden state)
    feat_lang = lstm_net.lstm_net(text_seq_batch, num_vocab, embed_dim,
                                  lstm_dim)[0]

    # Local image feature
    feat_vis = vgg_net.vgg_fc8(imcrop_batch,
                               'vgg_local',
                               apply_dropout=vgg_dropout)

    # L2-normalize the features (except for spatial_batch)
    # and concatenate them
    feat_all = tf.concat(axis=1,
                         values=[
                             tf.nn.l2_normalize(feat_lang, 1),
                             tf.nn.l2_normalize(feat_vis, 1), spatial_batch
                         ])

    # MLP Classifier over concatenate feature
    with tf.variable_scope('classifier'):
        mlp_l1 = fc_relu('mlp_l1', feat_all, output_dim=mlp_hidden_dims)
        if mlp_dropout: mlp_l1 = drop(mlp_l1, 0.5)
        mlp_l2 = fc('mlp_l2', mlp_l1, output_dim=1)

    return mlp_l2
Exemple #3
0
def text_objseg_full_conv(text_seq_batch, imcrop_batch, num_vocab, embed_dim,
    lstm_dim, mlp_hidden_dims, vgg_dropout, mlp_dropout):

    # Language feature (LSTM hidden state)
    feat_lang = lstm_net.lstm_net(text_seq_batch, num_vocab, embed_dim, lstm_dim)

    # Local image feature
    feat_vis = vgg_net.vgg_fc8_full_conv(imcrop_batch, 'vgg_local',
        apply_dropout=vgg_dropout)

    # Reshape and tile LSTM top
    featmap_H, featmap_W = feat_vis.get_shape().as_list()[1:3]
    N, D_text = feat_lang.get_shape().as_list()
    feat_lang = tf.tile(tf.reshape(feat_lang, [N, 1, 1, D_text]),
        [1, featmap_H, featmap_W, 1])

    # L2-normalize the features (except for spatial_batch)
    # and concatenate them along axis 3 (channel dimension)
    spatial_batch = tf.convert_to_tensor(generate_spatial_batch(N, featmap_H, featmap_W))
    feat_all = tf.concat(3, [tf.nn.l2_normalize(feat_lang, 3),
                             tf.nn.l2_normalize(feat_vis, 3),
                             spatial_batch])

    # MLP Classifier over concatenate feature
    with tf.variable_scope('classifier'):
        mlp_l1 = conv_relu('mlp_l1', feat_all, kernel_size=1, stride=1,
            output_dim=mlp_hidden_dims)
        if mlp_dropout: mlp_l1 = drop(mlp_l1, 0.5)
        mlp_l2 = conv('mlp_l2', mlp_l1, kernel_size=1, stride=1, output_dim=1)

    return mlp_l2
def text_objseg_full_conv(text_seq_batch, imcrop_batch, num_vocab, embed_dim,
                          lstm_dim, mlp_hidden_dims, deeplab_dropout,
                          mlp_dropout, is_training):

    # Language feature (LSTM hidden state)
    feat_lang = lstm_net.lstm_net(text_seq_batch, num_vocab, embed_dim,
                                  lstm_dim)[0]

    #deeplab101
    net = deeplab101.DeepLabResNetModel({'data': imcrop_batch},
                                        is_training=is_training)
    feat_vis = net.layers['fc1_voc12']

    # # Local image feature
    # feat_vis = deeplab.deeplab_fc8_full_conv(imcrop_batch, 'deeplab',
    #     apply_dropout=deeplab_dropout)

    # Reshape and tile LSTM top
    featmap_H, featmap_W = feat_vis.get_shape().as_list()[1:3]
    N, D_text = feat_lang.get_shape().as_list()
    feat_lang = tf.tile(tf.reshape(feat_lang, [N, 1, 1, D_text]),
                        [1, featmap_H, featmap_W, 1])

    # L2-normalize the features (except for spatial_batch)
    # and concatenate them along axis 3 (channel dimension)
    spatial_batch = tf.convert_to_tensor(
        generate_spatial_batch(N, featmap_H, featmap_W))
    feat_all = tf.concat(axis=3,
                         values=[
                             tf.nn.l2_normalize(feat_lang, 3),
                             tf.nn.l2_normalize(feat_vis, 3), spatial_batch
                         ])

    # MLP Classifier over concatenate feature
    with tf.variable_scope('classifier'):
        mlp_l1 = conv_relu('mlp_l1',
                           feat_all,
                           kernel_size=1,
                           stride=1,
                           output_dim=mlp_hidden_dims)
        if mlp_dropout: mlp_l1 = drop(mlp_l1, 0.5)
        mlp_l2 = conv('mlp_l2', mlp_l1, kernel_size=1, stride=1, output_dim=1)

    return mlp_l2
def text_objseg_region(text_seq_batch, imcrop_batch, spatial_batch, num_vocab,
    embed_dim, lstm_dim, mlp_hidden_dims, vgg_dropout, mlp_dropout):

    # Language feature (LSTM hidden state)
    feat_lang = lstm_net.lstm_net(text_seq_batch, num_vocab, embed_dim, lstm_dim)

    # Local image feature
    feat_vis = vgg_net.vgg_fc8(imcrop_batch, 'vgg_local', apply_dropout=vgg_dropout)

    # L2-normalize the features (except for spatial_batch)
    # and concatenate them
    feat_all = tf.concat(1, [tf.nn.l2_normalize(feat_lang, 1),
                             tf.nn.l2_normalize(feat_vis, 1),
                             spatial_batch])

    # MLP Classifier over concatenate feature
    with tf.variable_scope('classifier'):
        mlp_l1 = fc_relu('mlp_l1', feat_all, output_dim=mlp_hidden_dims)
        if mlp_dropout: mlp_l1 = drop(mlp_l1, 0.5)
        mlp_l2 = fc('mlp_l2', mlp_l1, output_dim=1)

    return mlp_l2
use_nms = False
nms_thresh = 0.3

################################################################################
# Evaluation network
################################################################################

# Inputs
text_seq_batch = tf.placeholder(tf.int32, [T, 1])  # one batch per sentence
imcrop_batch = tf.placeholder(tf.float32, [N, 224, 224, 3])
lstm_top_batch = tf.placeholder(tf.float32, [N, D_text])
fc8_crop_batch = tf.placeholder(tf.float32, [N, D_im])
spatial_batch = tf.placeholder(tf.float32, [N, 8])

# Language feature (LSTM hidden state)
lstm_top = lstm_net.lstm_net(text_seq_batch, num_vocab, embed_dim, lstm_dim)

# Local image feature
fc8_crop = vgg_net.vgg_fc8(imcrop_batch, 'vgg_local', apply_dropout=False)

# L2-normalize the features (except for spatial_batch)
# and concatenate them along axis 1 (feature dimension)
feat_all = tf.concat(1, [tf.nn.l2_normalize(lstm_top_batch, 1),
                         tf.nn.l2_normalize(fc8_crop_batch, 1),
                         spatial_batch])

# Outputs
# MLP Classifier over concatenate feature
with tf.variable_scope('classifier'):
    mlp_l1 = fc_relu('mlp_l1', feat_all, output_dim=mlp_hidden_dims)
    mlp_l2 = fc('mlp_l2', mlp_l1, output_dim=1)
Exemple #7
0
use_nms = False
nms_thresh = 0.3

################################################################################
# Evaluation network
################################################################################

# Inputs
text_seq_batch = tf.placeholder(tf.int32, [T, 1])  # one batch per sentence
imcrop_batch = tf.placeholder(tf.float32, [N, 224, 224, 3])
lstm_top_batch = tf.placeholder(tf.float32, [N, D_text])
fc8_crop_batch = tf.placeholder(tf.float32, [N, D_im])
spatial_batch = tf.placeholder(tf.float32, [N, 8])

# Language feature (LSTM hidden state)
lstm_top = lstm_net.lstm_net(text_seq_batch, num_vocab, embed_dim, lstm_dim)

# Local image feature
fc8_crop = vgg_net.vgg_fc8(imcrop_batch, 'vgg_local', apply_dropout=False)

# L2-normalize the features (except for spatial_batch)
# and concatenate them along axis 1 (feature dimension)
feat_all = tf.concat(1, [
    tf.nn.l2_normalize(lstm_top_batch, 1),
    tf.nn.l2_normalize(fc8_crop_batch, 1), spatial_batch
])

# Outputs
# MLP Classifier over concatenate feature
with tf.variable_scope('classifier'):
    mlp_l1 = fc_relu('mlp_l1', feat_all, output_dim=mlp_hidden_dims)
def main(args):

    ################################################################################
    # Validate input arguments
    ################################################################################
    assert not (
        args.concat and (not args.multicrop)
    ), "Cannot test concatenated labels on single image crop per batch."
    assert not (args.classes and args.concat
                ), "Cannot test concatenated labels when using image classes"
    assert not (
        args.classes and (not args.multicrop)
    ), "Cannot test on single image per batch when using image classes"

    # Initialize GPU
    os.environ['CUDA_VISIBLE_DEVICES'] = args.GPU_ID

    # print mode
    print()
    print("Model:", pretrained_model)
    print("All crops per batch - True | First crop per batch - False:",
          args.multicrop)
    print("Concatenated captions - True | Simple captions - False:",
          args.concat)
    print("Image Classes - True | Image Descriptions - False:", args.classes)
    print()

    ################################################################################
    # Evaluation network
    ################################################################################

    # Inputs
    text_seq_batch = tf.placeholder(tf.int32, [T, N])
    imcrop_batch = tf.placeholder(tf.float32, [N, 224, 224, 3])
    lstm_top_batch = tf.placeholder(tf.float32, [N, D_text])
    fc8_crop_batch = tf.placeholder(tf.float32, [N, D_im])

    # Language feature (LSTM hidden state)
    lstm_top = lstm_net.lstm_net(text_seq_batch, num_vocab, embed_dim,
                                 lstm_dim)

    # Local image feature
    fc8_crop = vgg_net.vgg_fc8(imcrop_batch, 'vgg_local', apply_dropout=False)

    # L2-normalize the features (except for spatial_batch)
    # and concatenate them along axis 1 (feature dimension)
    feat_all = tf.concat(axis=1,
                         values=[
                             tf.nn.l2_normalize(lstm_top_batch, 1),
                             tf.nn.l2_normalize(fc8_crop_batch, 1)
                         ])

    # Outputs
    # MLP Classifier over concatenate feature
    with tf.variable_scope('classifier'):
        mlp_l1 = fc_relu('mlp_l1', feat_all, output_dim=mlp_hidden_dims)
        mlp_l2 = fc('mlp_l2', mlp_l1, output_dim=1)
    scores = mlp_l2

    # Load pretrained model
    snapshot_restorer = tf.train.Saver(None)
    sess = tf.Session()
    snapshot_restorer.restore(sess, pretrained_model)

    ################################################################################
    # Load annotations and bounding box proposals
    ################################################################################

    coco = COCO(query_file)
    coco_captions = COCO(caption_file)
    imgid_list = coco.getImgIds()
    catid_list = coco.getCatIds()

    ################################################################################
    # Load testing data
    ################################################################################

    testing_samples_pos = []
    testing_samples_neg = []
    num_imcrop = len(imgid_list)

    # Gather a testing example per full image.
    for n_imcrop in range(num_imcrop):
        # image
        img_id = imgid_list[n_imcrop]

        # get the decriptions of the image
        caption_ids = coco_captions.getAnnIds(imgIds=img_id)
        captions = [
            x['caption'].strip() for x in coco_captions.loadAnns(caption_ids)
        ]

        if args.concat:
            # append two positive captions; one with itself if only one present
            pos_desc = captions[0] + ' and ' + captions[len(captions) - 1]
            testing_samples_pos.append((img_id, pos_desc, 1))

            # form negative examples by choosing random image
            # that is not the current image, get its descriptions,
            # and choose one at random.
            false_idx = n_imcrop
            while false_idx == n_imcrop:
                false_idx = randint(0, num_imcrop - 1)
            desc_ids = coco_captions.getAnnIds(imgid_list[false_idx])
            desc_idx = randint(0, len(desc_ids) - 1)
            neg_desc1 = coco_captions.loadAnns(
                desc_ids[desc_idx])[0]['caption'].strip()

            false_idx = n_imcrop
            while false_idx == n_imcrop:
                false_idx = randint(0, num_imcrop - 1)
            desc_ids = coco_captions.getAnnIds(imgid_list[false_idx])
            desc_idx = randint(0, len(desc_ids) - 1)
            neg_desc2 = coco_captions.loadAnns(
                desc_ids[desc_idx])[0]['caption'].strip()

            # negative example: append two negative captions
            neg_desc = neg_desc1 + ' and ' + neg_desc2
            testing_samples_neg.append((img_id, neg_desc, 0))

            # negative example: append one negative and one positive example
            neg_desc = neg_desc1 + ' and ' + captions[0].strip()
            testing_samples_neg.append((img_id, neg_desc, 0))
            neg_desc = captions[0].strip() + ' and ' + neg_desc1
            testing_samples_neg.append((img_id, neg_desc, 0))

        # for appending image captions
        elif args.classes:
            img_catids = coco.getCatIds(imgIds=img_id)
            img_cat_names = [cat['name'] for cat in coco.loadCats(img_catids)]
            for category in img_cat_names:
                testing_samples_pos.append((img_id, category, 1))

                # form one negative example by choosing random category that
                # img is not in
                false_catid = img_catids[0]
                while false_catid in img_catids:
                    false_catid = catid_list[randint(0, len(catid_list) - 1)]
                false_cat_name = coco.loadCats(false_catid)[0]['name']
                testing_samples_neg.append((img_id, false_cat_name, 0))

        else:
            for caption in captions:
                # append one positive sample per description
                testing_samples_pos.append((img_id, caption, 1))

                # form one negative example by choosing random image
                # that is not the current image, get its descriptions,
                # and choose one at random.
                false_idx = n_imcrop
                while false_idx == n_imcrop:
                    false_idx = randint(0, num_imcrop - 1)
                desc_ids = coco_captions.getAnnIds(imgid_list[false_idx])
                desc_idx = randint(0, len(desc_ids) - 1)
                false_cap = coco_captions.loadAnns(
                    desc_ids[desc_idx])[0]['caption'].strip()

                testing_samples_neg.append((img_id, false_cap, 0))

    # Combine samples
    print('#pos=', len(testing_samples_pos))
    print('#neg=', len(testing_samples_neg))

    # TODO: Not exactly sure what your multicrop is testing here? Just removes the
    # positive examples from being tested? How is this useful?
    if args.multicrop:
        testing_samples = testing_samples_pos + testing_samples_neg
    else:
        testing_samples = testing_samples_neg

    print('#total testing examples=', len(testing_samples))
    num_batch = len(testing_samples) // N
    print('total batch number: %d' % num_batch)

    ################################################################################
    # Testing
    ################################################################################

    # Pre-allocate arrays
    imcrop_val = np.zeros((N, 224, 224, 3), dtype=np.float32)
    text_seq_val = np.zeros((T, N), dtype=np.int32)
    lstm_top_val = np.zeros((N, D_text))
    label_val = np.zeros((N, 1), dtype=np.bool)

    correct_predictions = 0
    total_predictions = 0

    # optimization for faster image loading
    last_img_id = -100
    last_imcrop = None

    for n_batch in range(num_batch):
        print('batch %d / %d' % (n_batch + 1, num_batch))
        batch_begin = n_batch * N
        batch_end = (n_batch + 1) * N

        # load and preprocess last image from previous batch
        first_img_id = testing_samples[max(batch_begin - 1, 0)][0]
        first_imname = coco.loadImgs(first_img_id)[0]['coco_url']
        first_im = skimage.io.imread(first_imname)
        first_imcrop = skimage.img_as_ubyte(
            skimage.transform.resize(first_im, [224, 224]))
        if len(np.shape(first_im)) != 3: continue

        for n_sample in range(batch_begin, batch_end):
            img_id, description, label = testing_samples[n_sample]

            # Preprocess image and caption
            if args.multicrop:
                # Optimization: do not reload image if it is the same as the last one
                if img_id == last_img_id:
                    imcrop = last_imcrop
                else:
                    imname = coco.loadImgs(img_id)[0]['coco_url']
                    im = skimage.io.imread(imname)

                    # ignore grayscale images
                    if len(np.shape(im)) != 3: continue

                    imcrop = skimage.img_as_ubyte(
                        skimage.transform.resize(im, [224, 224]))
                    last_img_id = img_id
                    last_imcrop = imcrop
            else:
                imcrop = first_imcrop
            text_seq = text_processing.preprocess_sentence(
                description, vocab_dict, T)

            # Form batch
            idx = n_sample - batch_begin
            text_seq_val[:, idx] = text_seq
            imcrop_val[idx, ...] = imcrop - vgg_net.channel_mean
            label_val[idx] = label

        # Extract visual feature
        fc8_crop_val = sess.run(fc8_crop, feed_dict={imcrop_batch: imcrop_val})

        # Extract language feature
        lstm_top_val[...] = sess.run(lstm_top,
                                     feed_dict={text_seq_batch: text_seq_val})

        # Compute scores per proposal
        scores_val = sess.run(scores,
                              feed_dict={
                                  lstm_top_batch: lstm_top_val,
                                  fc8_crop_batch: fc8_crop_val
                              })
        scores_val = scores_val[:batch_end - batch_begin + 1, ...].reshape(-1)

        # Evaluate on bounding labels
        for indx in range(len(scores_val)):
            correct_predictions += ((scores_val[indx] > 0) == label_val[indx])
            total_predictions += 1

        print("%d correct predictions out of %d" %
              (correct_predictions, total_predictions))
        print(correct_predictions / total_predictions)

    print('Final results on the whole test set')
    result_str = 'recall = %0.4f \n' % (float(correct_predictions) /
                                        total_predictions)
    print(result_str)
def recurrent_multimodal(text_seq_batch, imcrop_batch, num_vocab, embed_dim,
                         lstm_dim, mlp_hidden_dims, feature_vis_dropout,
                         mlp_dropout):

    _, feat_langs, embedded_seq = lstm_net.lstm_net(text_seq_batch, num_vocab,
                                                    embed_dim, lstm_dim)

    # feat_vis = vgg_net.vgg_fc8_full_conv(imcrop_batch, 'vgg_local', apply_dropout=vgg_dropout)
    feat_vis = deeplab.deeplab_fc8_full_conv(imcrop_batch,
                                             'deeplab',
                                             feature_vis_dropout,
                                             output_dim=1000)

    featmap_H, featmap_W = feat_vis.get_shape().as_list()[1:3]

    # Reshape and tile feat_langs, embedded_seq
    T, N, D_text = embedded_seq.get_shape().as_list()
    feat_langs = [
        tf.tile(tf.reshape(feat_lang, [N, 1, 1, D_text]),
                [1, featmap_H, featmap_W, 1]) for feat_lang in feat_langs
    ]

    embedded_seq = [
        tf.tile(tf.reshape(_embedded_seq, (N, 1, 1, embed_dim)),
                [1, featmap_H, featmap_W, 1])
        for _embedded_seq in tf.split(embedded_seq, T, 0)
    ]

    # L2-normalize the features (except for spatial_batch)
    # and concatenate them along axis 3 (channel dimension)
    spatial_batch = tf.convert_to_tensor(
        generate_spatial_batch(N, featmap_H, featmap_W))

    #concat all features
    feat_alls = []
    for i in range(T):
        feat_alls.append(
            tf.concat([
                tf.nn.l2_normalize(feat_langs[i], 3),
                tf.nn.l2_normalize(feat_vis, 3), spatial_batch
            ], 3))
        #feat_alls.append(tf.concat([feat_langs[i], feat_vis, spatial_batch], 3))

    feat_all = tf.stack(feat_alls, 3)
    feat_all = tf.transpose(feat_all, [0, 3, 1, 2, 4])
    print(feat_all.shape)

    #mlstm
    print(tf.get_variable_scope().reuse)
    mlstm_top = rnn.mlstm_layer('mlstm', feat_all, None, 500)[0]
    print(tf.get_variable_scope().reuse)

    #MLP classfier
    with tf.variable_scope('classifier'):
        mlp_l1 = conv('mlp_l1',
                      mlstm_top,
                      kernel_size=1,
                      stride=1,
                      output_dim=1)
        #if mlp_dropout: mlp_l1 = drop(mlp_l1, 0.5)
        #mlp_l2 = conv('mlp_l2', mlp_l1, kernel_size=1, stride=1, output_dim=1)

    return mlp_l1