def demoImage(i): img=mpimg.imread('images/anh'+str(i)+'.jpg') imgplot = plt.imshow(img) plt.axis("off") plt.show() captions=generate_new_caption(vgg_feat[i-1]) for caption in captions: speak.Speak(utils.listToSentence(caption['sen'][1:]))
def captionImage(image_path): img=mpimg.imread(image_path) imgplot = plt.imshow(img) plt.axis("off") plt.show() img_feat=extract_feautures.get_vgg_feat(image_path) captions=generate_new_caption(img_feat) for caption in captions: speak.Speak(utils.listToSentence(caption['sen'][1:]))
def generate_new_caption( img_feat): tf.reset_default_graph() config=utils.Config() model_gen=model.Model(config,1) save_file =path+ '/weight_model/'+model_gen.config.model_name+'/train_model.ckpt' saver = tf.train.Saver() with tf.Session() as sess: saver.restore(sess, save_file) captions=model_gen.generate_caption(sess,img_feat)[0:4] for caption in captions: print(utils.listToSentence(caption['sen']), 'score: ', caption['score']) return captions
def test_modal(start,end): tf.reset_default_graph() config=utils.Config() model_test=model.Model(config,1) save_file =path+ '/weight_model/'+model_test.config.model_name+'/train_model.ckpt' saver = tf.train.Saver() with tf.Session() as sess: saver.restore(sess, save_file) for i in range(200000)[start:end]: img=mpimg.imread(path+'/data/'+materialwv.dataset+'_Dataset/'+materialwv.getImageLink(i)) imgplot = plt.imshow(img) plt.axis("off") plt.show() captions=model_test.generate_caption(sess,materialwv.getImageFeat(i))[0:4] for caption in captions: print(str(i),utils.listToSentence(caption['sen']), 'score: ', caption['score'])
def gen_test_caption(): tf.reset_default_graph() config=utils.Config() model_test=model.Model(config,1) save_file =path+ '/weight_model/'+model_test.config.model_name+'/train_model.ckpt' saver = tf.train.Saver() with tf.Session() as sess: saver.restore(sess, save_file) file=open(materialwv.result_path+"result_"+"beam"+str(utils.beam_size)+".txt","w") start = time.time() for i in range(200000)[utils.total_train_img+utils.total_validate_img:utils.total_train_img+utils.total_validate_img+utils.total_test_img]: captions=model_test.generate_caption(sess,materialwv.getImageFeat(i))[0]['sen'] caption=utils.listToSentence(captions[1:len(captions)-1]) file.write(caption+"\n") if i%100==0: print(i, "time",str(time.time()-start), caption) start=time.time() file.close()