Example #1
0
def text(imgs):
    #On charge le modèle VGG
    encode = ei.model_gen()

    #on charge le modèle RNN
    sd = SceneDesc.scenedesc()
    model = sd.create_model(ret_model=True)

    #On charge les poids qui vont avec
    weight = 'RNN_Train_weights/Weights.h5'
    model.load_weights(weight)

    #Où se situent nos images
    path = "Data/Images/"

    if isinstance(imgs, list):  #si nous avons une liste d'imagess
        encoded_images = [(img, ei.encodings(encode, path + img))
                          for img in imgs]
        image_captions = [(img,
                           tm.generate_captions(sd,
                                                model,
                                                encoding,
                                                beam_size=3))
                          for img, encoding in encoded_images]

    else:  #Si nous avons une image unique
        image_path = path + imgs
        encoded_image = ei.encodings(encode, image_path)
        image_captions = (imgs,
                          tm.generate_captions(sd,
                                               model,
                                               encoded_image,
                                               beam_size=3))

    print(image_captions)
Example #2
0
def test_generate_captions():
	'''
	Wherein we test test_mod.generate_captions. Since you may
	use pre-computed weights from any source only print the 
	generated sentence to stdout and check that it is nonempty
	'''

	encoded_img=ei.encodings(ei.model_gen(),test_img)
	caption=generate_captions(sd,model,encoded_img,beam_size=3)
	def report():
		print('The model generated the caption: '+caption)
	atexit.register(report)
	assert(len(caption)>0)
Example #3
0
def text(img):
	t1= time.time()
	encode = ei.model_gen()
	weight = 'Output/Weights.h5'
	sd = SceneDesc.scenedesc()
	model = sd.create_model(ret_model = True)
	model.load_weights(weight)
	image_path = img
	encoded_images = ei.encodings(encode, image_path)

	image_captions = tm.generate_captions(sd, model, encoded_images, beam_size=3)
	engine = pyttsx.init()
	print (image_captions)
	engine.say(	str(image_captions))
	engine.runAndWait()
Example #4
0
def text(img):
    t1 = time.time()
    encode = ei.model_gen()
    weight = 'Output/Weights.h5'
    sd = SceneDesc.scenedesc()
    model = sd.create_model(ret_model=True)
    model.load_weights(weight)
    image_path = img
    encoded_images = ei.encodings(encode, image_path)

    image_captions = tm.generate_captions(sd,
                                          model,
                                          encoded_images,
                                          beam_size=3)
    engine = pyttsx.init()
    print '\nCaption Generated for the above Image is=\n', image_captions