def exe_test(sess,
             data,
             batch_size,
             v2i,
             i2v,
             hf1,
             hf2,
             hf3,
             feature_shape1,
             feature_shape2,
             predict_words,
             input_video1,
             input_video2,
             input_video3,
             input_captions,
             y,
             capl=16):

    caption_output = []
    total_data = len(data)
    num_batch = int(round(total_data * 1.0 / batch_size)) + 1

    for batch_idx in xrange(num_batch):
        batch_caption = data[batch_idx *
                             batch_size:min((batch_idx + 1) *
                                            batch_size, total_data)]

        data_v1 = MsrDataUtil.getBatchVideoFeature(batch_caption, hf1,
                                                   feature_shape1)
        data_v2 = MsrDataUtil.getBatchVideoFeature(batch_caption, hf2,
                                                   feature_shape2)
        data_v3 = MsrDataUtil.getBatchVideoFeature(batch_caption, hf3,
                                                   feature_shape3)

        data_c, data_y = MsrDataUtil.getBatchTestCaptionWithSparseLabel(
            batch_caption, v2i, capl=capl)
        [gw] = sess.run(
            [predict_words],
            feed_dict={
                input_video1: data_v1,
                input_video2: data_v2,
                input_video3: data_v3,
                input_captions: data_c,
                y: data_y
            })

        generated_captions = MsrDataUtil.convertCaptionI2V(
            batch_caption, gw, i2v)

        for idx, sen in enumerate(generated_captions):
            print('%s : %s' % (batch_caption[idx].keys()[0], sen))
            caption_output.append({
                'image_id': batch_caption[idx].keys()[0],
                'caption': sen
            })

    js = {}
    js['val_predictions'] = caption_output

    return js
Beispiel #2
0
def beam_search_exe_test(sess,
                         data,
                         cate_info,
                         batch_size,
                         v2i,
                         i2v,
                         hf,
                         feature_shape,
                         predict_words,
                         input_video,
                         input_captions,
                         input_categories,
                         y,
                         finished_beam,
                         logprobs_finished_beams,
                         past_logprobs,
                         capl=16):

    caption_output = []
    total_data = len(data)
    num_batch = int(round(total_data * 1.0 / batch_size))

    for batch_idx in xrange(num_batch):
        batch_caption = data[batch_idx *
                             batch_size:min((batch_idx + 1) *
                                            batch_size, total_data)]

        data_v = MsrDataUtil.getBatchVideoFeature(batch_caption, hf,
                                                  feature_shape)
        data_c, data_y = MsrDataUtil.getBatchTestCaptionWithSparseLabel(
            batch_caption, v2i, capl=capl)
        data_cate = MsrDataUtil.getBatchVideoCategoriesInfo(
            batch_caption, cate_info, feature_shape)
        [gw, tw, gp, gl] = sess.run(
            [
                finished_beam, predict_words, logprobs_finished_beams,
                past_logprobs
            ],
            feed_dict={
                input_video: data_v,
                input_captions: data_c,
                input_categories: data_cate,
                y: data_y
            })

        generated_captions = MsrDataUtil.convertCaptionI2V(
            batch_caption, gw, i2v)

        for idx, sen in enumerate(generated_captions):
            print('%s : %s' % (batch_caption[idx].keys()[0], sen))

            caption_output.append({
                'image_id': batch_caption[idx].keys()[0],
                'caption': sen
            })

    js = {}
    js['val_predictions'] = caption_output

    return js
def exe_test(sess, data, batch_size, v2i, i2v, hf1, hf2, feature_shape, 
	predict_words, input_video, input_captions, y, finished_beam, logprobs_finished_beams, past_logprobs, beam_hidden_state, past_symbols_states, finished_beams_states, capl=16):
	
	caption_output = []
	total_data = len(data)
	
	num_batch = int(math.ceil((total_data*1.0/batch_size)))
	print(num_batch)
	


	for batch_idx in xrange(num_batch):
		batch_caption = data[batch_idx*batch_size:min((batch_idx+1)*batch_size,total_data)]
		
		data_v1 = MsrDataUtil.getBatchVideoFeature(batch_caption,hf1,(feature_shape[0],2048))
		data_v2 = MsrDataUtil.getBatchC3DVideoFeature(batch_caption,hf2,(feature_shape[0],4096))

		# data_v1 = data_v1/(np.linalg.norm(data_v1, ord=None, axis=-1, keepdims=True)+sys.float_info.epsilon)
		# data_v2 = data_v2/(np.linalg.norm(data_v2, ord=None, axis=-1, keepdims=True)+sys.float_info.epsilon)

		data_v = np.concatenate((data_v1,data_v2),axis=-1)
		
		data_c, data_y = MsrDataUtil.getBatchTestCaptionWithSparseLabel(batch_caption, v2i, capl=capl)
		# [gw,tw,gp, gl, pp, pss, fbs] = sess.run([finished_beam, predict_words, logprobs_finished_beams, past_logprobs, beam_hidden_state, past_symbols_states, finished_beams_states],feed_dict={input_video:data_v, input_captions:data_c, y:data_y})
		[tw] = sess.run([predict_words],feed_dict={input_video:data_v, input_captions:data_c, y:data_y})

		generated_captions = MsrDataUtil.convertCaptionI2V(batch_caption, tw, i2v)

		for idx, sen in enumerate(generated_captions):
			print('%s : %s' %(batch_caption[idx].keys()[0],sen))
			caption_output.append({'image_id':batch_caption[idx].keys()[0],'caption':sen})
	
	js = {}
	js['val_predictions'] = caption_output

	return js
Beispiel #4
0
def beamsearch_exe_test(sess,
                        data,
                        cate_info,
                        batch_size,
                        v2i,
                        i2v,
                        hf1,
                        hf2,
                        feature_shape1,
                        feature_shape2,
                        predict_words,
                        input_video1,
                        input_video2,
                        input_captions,
                        input_categories,
                        y,
                        finished_beam,
                        logprobs_finished_beams,
                        capl=16):

    caption_output = []
    total_data = len(data)
    num_batch = int(round(total_data * 1.0 / batch_size))

    for batch_idx in xrange(num_batch):
        batch_caption = data[batch_idx *
                             batch_size:min((batch_idx + 1) *
                                            batch_size, total_data)]

        data_v1 = MsrFinalDataUtil.getBatchVideoFeature(
            batch_caption, hf1, feature_shape1)
        data_v2 = MsrFinalDataUtil.getBatchC3DVideoFeature(
            batch_caption, hf2, feature_shape2)

        data_c, data_y = MsrDataUtil.getBatchTestCaptionWithSparseLabel(
            batch_caption, v2i, capl=capl)
        data_cate = MsrDataUtil.getBatchVideoCategoriesInfo(
            batch_caption, cate_info, feature_shape1)

        [tw, gw, gp] = sess.run(
            [predict_words, finished_beam, logprobs_finished_beams],
            feed_dict={
                input_video1: data_v1,
                input_video2: data_v2,
                input_captions: data_c,
                input_categories: data_cate,
                y: data_y
            })

        generated_captions = MsrDataUtil.convertCaptionI2V(
            batch_caption, gw, i2v)

        for idx, sen in enumerate(generated_captions):
            print('%s : %s' % (batch_caption[idx].keys()[0], sen))
            caption_output.append({
                'video_id': batch_caption[idx].keys()[0],
                'caption': sen.strip()
            })

    js = {}
    js['result'] = caption_output
    js['version'] = '3'
    js['external_data'] = {
        'used':
        'true',
        'details':
        'the features of video frames are extracted by ResNet152 and C3D'
    }

    return js