示例#1
0
    def get_training_batch(self,
                           batch_no,
                           batch_size,
                           image_size,
                           z_dim,
                           caption_vector_length,
                           split,
                           data_dir,
                           data_set,
                           loaded_data=None):
        real_images = []
        wrong_images = []
        acoustic_features = []
        z_noise = []
        image_files = []
        for i in range(batch_no * batch_size,
                       batch_no * batch_size + batch_size):
            idx = i % len(loaded_data["image_list"])
            real_images.append(
                image_processing.load_image_array(
                    loaded_data["image_list"][idx], image_size))
            wrong_images.append(
                image_processing.load_image_array(
                    loaded_data["image_list"][random.randint(
                        0,
                        len(loaded_data["image_list"]) - 1)], image_size))
            filename = loaded_data["image_list"][idx].split('/')[-1].split(
                '.')[0]
            acoustic_features.append(
                np.load(self.feat_path + filename + ".npy"))
            z_noise.append(np.random.uniform(-1, 1, [z_dim]))
            image_files.append(filename)

        return np.array(real_images), np.array(wrong_images), np.array(
            acoustic_features), np.array(z_noise), image_files
示例#2
0
def get_training_batch(batch_no, batch_size, image_size, z_dim, 
    caption_vector_length, split, data_dir, data_set, loaded_data = None):
    if data_set == 'flowers':
        real_images = np.zeros((batch_size, 64, 64, 3))
        wrong_images = np.zeros((batch_size, 64, 64, 3))
        captions = np.zeros((batch_size, caption_vector_length))

        cnt = 0
        image_files = []
        #caption_text = [None]*batch_size
        for i in range(batch_no * batch_size, batch_no * batch_size + batch_size):
            idx = i % len(loaded_data['image_list'])
            image_file =  join(data_dir, 'flowers/jpg/'+loaded_data['image_list'][idx])
            image_array = image_processing.load_image_array(image_file, image_size)
            real_images[cnt,:,:,:] = image_array
            
            # Improve this selection of wrong image
            wrong_image_id = random.randint(0,len(loaded_data['image_list'])-1)
            wrong_image_file =  join(data_dir, 'flowers/jpg/'+loaded_data['image_list'][wrong_image_id])
            wrong_image_array = image_processing.load_image_array(wrong_image_file, image_size)
            wrong_images[cnt, :,:,:] = wrong_image_array

            random_caption = random.randint(0,4)
            #caption_text[i] = random_caption
            captions[cnt,:] = loaded_data['captions'][ loaded_data['image_list'][idx] ][ random_caption ][0:caption_vector_length]
            image_files.append( image_file )
            cnt += 1

        z_noise = np.random.uniform(-1, 1, [batch_size, z_dim])
        return real_images, wrong_images, captions, z_noise, image_files#, caption_text
示例#3
0
文件: train.py 项目: xyz8/MLDS2017
def get_training_batch(batch_no, batch_size, image_size, z_dim,
                       caption_vector_length, split, method_dir, imgs_dir,
                       data_set, loaded_data):
    real_images = np.zeros((batch_size, 64, 64, 3))
    wrong_images = np.zeros((batch_size, 64, 64, 3))
    captions = np.zeros((batch_size, caption_vector_length))

    cnt = 0
    image_files = []
    for i in range(batch_no * batch_size, batch_no * batch_size + batch_size):
        idx = i % len(loaded_data['image_list'])
        image_file = join(data_set, imgs_dir, loaded_data['image_list'][idx])
        image_array = image_processing.load_image_array(image_file, image_size)
        real_images[cnt, :, :, :] = image_array

        # Improve this selection of wrong image
        wrong_image_id = random.randint(0, len(loaded_data['image_list']) - 1)
        wrong_image_file = join(data_set, imgs_dir,
                                loaded_data['image_list'][wrong_image_id])
        wrong_image_array = image_processing.load_image_array(
            wrong_image_file, image_size)
        wrong_images[cnt, :, :, :] = wrong_image_array

        captions[cnt,:] = loaded_data\
            ['captions'][loaded_data['image_list'][idx]][0][:caption_vector_length]
        image_files.append(image_file)
        cnt += 1

    z_noise = np.random.uniform(-1, 1, [batch_size, z_dim])
    return real_images, wrong_images, captions, z_noise, image_files
示例#4
0
def get_training_batch(batch_no, batch_size, image_size, z_dim,
                       caption_vector_length, image_dir, loaded_data):

    real_images = np.zeros((batch_size, 64, 64, 3))
    wrong_images = np.zeros((batch_size, 64, 64, 3))
    captions = np.zeros((batch_size, caption_vector_length))

    cnt = 0
    image_files = []
    for i in range(batch_no * batch_size, batch_no * batch_size + batch_size):
        idx = i % len(loaded_data['image_list'])
        class_name, image_file_name = loaded_data['image_list'][idx]
        processed_img_dir = os.path.join(image_dir, class_name)
        image_file = join(processed_img_dir, image_file_name)
        image_array = image_processing.load_image_array(image_file, image_size)
        real_images[cnt, :, :, :] = image_array

        # Improve this selection of wrong image
        wrong_image_id = random.randint(0, len(loaded_data['image_list']) - 1)
        wrong_class_name, wrong_image_file_name = loaded_data['image_list'][
            wrong_image_id]
        wrong_processed_img_dir = os.path.join(image_dir, wrong_class_name)
        wrong_image_file = join(wrong_processed_img_dir, wrong_image_file_name)
        wrong_image_array = image_processing.load_image_array(
            wrong_image_file, image_size)
        wrong_images[cnt, :, :, :] = wrong_image_array

        random_caption = random.randint(0, 4)
        captions[cnt, :] = loaded_data['captions'][image_file_name][
            random_caption][0:caption_vector_length]
        image_files.append(image_file)
        cnt += 1

    z_noise = np.random.uniform(-1, 1, [batch_size, z_dim])
    return real_images, wrong_images, captions, z_noise, image_files
示例#5
0
    def get_training_batch(self, batch_no, batch_size, image_size, z_dim, caption_vector_length, split, data_dir, data_set, loaded_data = None):
        real_images = []
        wrong_images = []
        acoustic_features = []
        z_noise = []
        class_labels = []
        image_files = []
        for i in range(batch_no * batch_size, batch_no * batch_size + batch_size):
            #idx = i % len(loaded_data["image_list"])
            place_or_object_id = random.randint(0, len(loaded_data["image_list"])-1)
            image_index = random.randint(0, len(loaded_data["image_list"][place_or_object_id])-1)
            real_images.append(image_processing.load_image_array(loaded_data["image_list"][place_or_object_id][image_index], image_size))
            different_place_or_object_id = random.randint(0, len(loaded_data["image_list"])-1)
            while different_place_or_object_id == place_or_object_id:
                different_place_or_object_id = random.randint(0, len(loaded_data["image_list"])-1)
            wrong_images.append(image_processing.load_image_array(loaded_data["image_list"][different_place_or_object_id][random.randint(0, len(loaded_data["image_list"][different_place_or_object_id])-1)], image_size))
            filename = loaded_data["image_list"][place_or_object_id][image_index].split('/')[-1].split('.')[0].split('_')[0]
            feat_index = random.randint(0, len(self.feat_path_filelist[place_or_object_id])-1)
            if self.add_noise == False:
                acoustic_features.append(np.load(self.feat_path_list[place_or_object_id] + self.feat_path_filelist[place_or_object_id][feat_index]))
            elif self.add_noise == True:
                feat = np.load(self.feat_path_list[place_or_object_id] + self.feat_path_filelist[place_or_object_id][feat_index])
                acoustic_features.append(feat + np.random.normal(scale=0.01, size=[caption_vector_length]))
            #z_noise.append(np.random.uniform(-1, 1, [z_dim]))
            z_noise.append(np.random.normal(scale=0.01, size=[z_dim]))
            class_label = np.zeros(self.num_class)
            class_label[place_or_object_id] = 1
            class_labels.append(class_label)
            image_files.append(filename)

        return np.array(real_images), np.array(wrong_images), np.array(acoustic_features), np.array(z_noise), np.array(class_labels), image_files
示例#6
0
def get_training_batch(batch_no, batch_size, image_size, z_dim, 
	caption_vector_length, split, data_dir, data_set, loaded_data = None):
	if data_set == 'mscoco':
		with h5py.File( join(data_dir, 'tvs/'+split + '_tvs_' + str(batch_no))) as hf:
			caption_vectors = np.array(hf.get('tv'))
			caption_vectors = caption_vectors[:,0:caption_vector_length]
		with h5py.File( join(data_dir, 'tvs/'+split + '_tv_image_id_' + str(batch_no))) as hf:
			image_ids = np.array(hf.get('tv'))

		real_images = np.zeros((batch_size, 64, 64, 3))
		wrong_images = np.zeros((batch_size, 64, 64, 3))
		
		image_files = []
		for idx, image_id in enumerate(image_ids):
			image_file = join(data_dir, '%s2014/COCO_%s2014_%.12d.jpg'%(split, split, image_id) )
			image_array = image_processing.load_image_array(image_file, image_size)
			real_images[idx,:,:,:] = image_array
			image_files.append(image_file)
		
		# TODO>> As of Now, wrong images are just shuffled real images.
		first_image = real_images[0,:,:,:]
		for i in range(0, batch_size):
			if i < batch_size - 1:
				wrong_images[i,:,:,:] = real_images[i+1,:,:,:]
			else:
				wrong_images[i,:,:,:] = first_image

		z_noise = np.random.uniform(-1, 1, [batch_size, z_dim])


		return real_images, wrong_images, caption_vectors, z_noise, image_files

	if data_set == 'faces':
		real_images = np.zeros((batch_size, 64, 64, 3))
		wrong_images = np.zeros((batch_size, 64, 64, 3))
		captions = np.zeros((batch_size, caption_vector_length))

		cnt = 0
		image_files = []
		for i in range(batch_no * batch_size, batch_no * batch_size + batch_size):
			idx = i % len(loaded_data['image_list'])
			image_file =  join(data_dir, 'faces/jpg/'+loaded_data['image_list'][idx])
			image_array = image_processing.load_image_array(image_file, image_size)
			real_images[cnt,:,:,:] = image_array
			
			# Improve this selection of wrong image
			wrong_image_id = random.randint(0,len(loaded_data['image_list'])-1)
			wrong_image_file =  join(data_dir, 'faces/jpg/'+loaded_data['image_list'][wrong_image_id])
			wrong_image_array = image_processing.load_image_array(wrong_image_file, image_size)
			wrong_images[cnt, :,:,:] = wrong_image_array

			random_caption = random.randint(0,4)
			captions[cnt,:] = loaded_data['captions'][ loaded_data['image_list'][idx] ][ random_caption ][0:caption_vector_length]
			image_files.append( image_file )
			cnt += 1

		z_noise = np.random.uniform(-1, 1, [batch_size, z_dim])
		return real_images, wrong_images, captions, z_noise, image_files
示例#7
0
def get_training_batch(batch_no, batch_size, image_size, z_dim, 
	caption_vector_length, split, data_dir, data_set, loaded_data = None):
	if data_set == 'mscoco':
		with h5py.File( join(data_dir, 'tvs/'+split + '_tvs_' + str(batch_no))) as hf:
			caption_vectors = np.array(hf.get('tv'))
			caption_vectors = caption_vectors[:,0:caption_vector_length]
		with h5py.File( join(data_dir, 'tvs/'+split + '_tv_image_id_' + str(batch_no))) as hf:
			image_ids = np.array(hf.get('tv'))

		real_images = np.zeros((batch_size, 64, 64, 3))
		wrong_images = np.zeros((batch_size, 64, 64, 3))
		
		image_files = []
		for idx, image_id in enumerate(image_ids):
			image_file = join(data_dir, '%s2014/COCO_%s2014_%.12d.jpg'%(split, split, image_id) )
			image_array = image_processing.load_image_array(image_file, image_size)
			real_images[idx,:,:,:] = image_array
			image_files.append(image_file)
		
		# TODO>> As of Now, wrong images are just shuffled real images.
		first_image = real_images[0,:,:,:]
		for i in range(0, batch_size):
			if i < batch_size - 1:
				wrong_images[i,:,:,:] = real_images[i+1,:,:,:]
			else:
				wrong_images[i,:,:,:] = first_image

		z_noise = np.random.uniform(-1, 1, [batch_size, z_dim])


		return real_images, wrong_images, caption_vectors, z_noise, image_files

	if data_set == 'flowers':
		real_images = np.zeros((batch_size, 64, 64, 3))
		wrong_images = np.zeros((batch_size, 64, 64, 3))
		captions = np.zeros((batch_size, caption_vector_length))

		cnt = 0
		image_files = []
		for i in range(batch_no * batch_size, batch_no * batch_size + batch_size):
			idx = i % len(loaded_data['image_list'])
			image_file =  join(data_dir, 'flowers/jpg/'+loaded_data['image_list'][idx])
			image_array = image_processing.load_image_array(image_file, image_size)
			real_images[cnt,:,:,:] = image_array
			
			# Improve this selection of wrong image
			wrong_image_id = random.randint(0,len(loaded_data['image_list'])-1)
			wrong_image_file =  join(data_dir, 'flowers/jpg/'+loaded_data['image_list'][wrong_image_id])
			wrong_image_array = image_processing.load_image_array(wrong_image_file, image_size)
			wrong_images[cnt, :,:,:] = wrong_image_array

			random_caption = random.randint(0,4)
			captions[cnt,:] = loaded_data['captions'][ loaded_data['image_list'][idx] ][ random_caption ][0:caption_vector_length]
			image_files.append( image_file )
			cnt += 1

		z_noise = np.random.uniform(-1, 1, [batch_size, z_dim])
		return real_images, wrong_images, captions, z_noise, image_files
示例#8
0
def get_augment_batch(image_size, data_dir, batch_no, augment_batch_size, loaded_data, image_captions):
    real_images = np.zeros((augment_batch_size, 64, 64, 3))
    text_captions = []
    cnt = 0
    for i in range(batch_no * augment_batch_size, batch_no * augment_batch_size + augment_batch_size):
        idx = i % len(loaded_data['image_list'])
        image_file =  join(data_dir, 'augment/'+ loaded_data['image_list'][idx])
        ugly = 1
        while(ugly):
            try:
                image_array = image_processing.load_image_array(image_file, image_size)
                if image_array.shape[2] != 3:
                    print('image file wrong number of channels. Deleting at :', image_file)
                    os.remove(image_file)
                    image_file = join(data_dir, 'augment/' + 
                                  loaded_data['image_list'][np.random.randint(1,len(loaded_data['image_list'])-1)])
                    continue
                ugly=0
            except:
                print('image file broken at :', image_file)
                image_file = join(data_dir, 'augment/' + 
                                  loaded_data['image_list'][np.random.randint(1,len(loaded_data['image_list'])-1)])
                
        real_images[cnt,:,:,:] = image_array
        text_captions.append(image_captions[loaded_data['image_list'][idx]])
        cnt += 1

    return text_captions, real_images
示例#9
0
def get_coco_batch(image_size, dataDir, dataType, coco, coco_caps,
                   coco_batch_size, imgIds):
    coco_real_images = np.zeros((coco_batch_size, 64, 64, 3))
    coco_wrong_images = np.zeros((coco_batch_size, 64, 64, 3))
    coco_ann = []
    ann_temp = []
    image_files = []
    for idx in range(coco_batch_size):
        img = coco.loadImgs(imgIds[np.random.randint(0, len(imgIds))])[0]
        image_file = '%s/%s/%s' % (dataDir, dataType, img['file_name'])
        image_array = image_processing.load_image_array(image_file, image_size)
        annIds = coco_caps.getAnnIds(imgIds=img['id'])
        anns = coco_caps.loadAnns(annIds)
        #coco_ann.append(coco_caps.showAnns(anns))
        coco_real_images[idx, :, :, :] = image_array
        for i in anns:
            ann_temp.append(i['caption'])

        coco_ann.append(ann_temp)
        ann_temp = []
        image_files.append(image_file)

    first_image = coco_real_images[0, :, :, :]
    for i in range(0, coco_batch_size):
        if i < coco_batch_size - 1:
            coco_wrong_images[i, :, :, :] = coco_real_images[i + 1, :, :, :]
        else:
            coco_wrong_images[i, :, :, :] = first_image
    return coco_ann, coco_real_images, coco_wrong_images, image_files
示例#10
0
def save_for_viz_val(data_dir, generated_images, image_files, image_caps,
                     image_ids, image_size, id):

    generated_images = np.squeeze(np.array(generated_images))
    for i in range(0, generated_images.shape[0]):
        image_dir = join(data_dir, str(image_ids[i]))
        if not os.path.exists(image_dir):
            os.makedirs(image_dir)

        real_image_path = join(image_dir, '{}.tif'.format(image_ids[i]))
        if os.path.exists(image_dir):
            real_images_255 = image_processing.load_image_array(image_files[i],
                                                                image_size,
                                                                image_ids[i],
                                                                mode='val')
            imageio.imwrite(real_image_path, real_images_255)

        caps_dir = join(image_dir, "caps.txt")
        if not os.path.exists(caps_dir):
            with open(caps_dir, "w") as text_file:
                text_file.write(image_caps[i] + "\n")

        fake_images_255 = generated_images[i]
        imageio.imwrite(join(image_dir, 'fake_image_{}.tif'.format(id)),
                        fake_images_255)
示例#11
0
def get_test_batch(batch_no, batch_size, image_size, z_dim, 
	caption_vector_length, data_dir, data_set, loaded_data = None):
	
        if data_set == 'shapes':
		real_images = np.zeros((batch_size, 64, 64, 3))
		#wrong_images = np.zeros((batch_size, 64, 64, 3))
		captions = np.zeros((batch_size, caption_vector_length))

		cnt = 0
		#image_files = []
		for i in range(batch_no * batch_size, batch_no * batch_size + batch_size):
		#for i in range(num_images):
			idx = i % len(loaded_data['image_list'])
			image_file =  join(data_dir, 'shapes/images/'+loaded_data['image_list'][idx])
			image_array = image_processing.load_image_array(image_file, image_size)
			real_images[cnt,:,:,:] = image_array
			
			# Improve this selection of wrong image
			#wrong_image_id = random.randint(0,len(loaded_data['image_list'])-1)
			#wrong_image_file =  join(data_dir, 'shapes/images/'+loaded_data['image_list'][wrong_image_id])
			#wrong_image_array = image_processing.load_image_array(wrong_image_file, image_size)
			#wrong_images[cnt, :,:,:] = wrong_image_array

			random_caption = random.randint(0,4)
			captions[cnt,:] = loaded_data['captions'][ loaded_data['image_list'][idx] ][ random_caption ][0:caption_vector_length]
			#image_files.append( image_file )
			cnt += 1

		z_noise = np.random.uniform(-1, 1, [batch_size, z_dim])
		return real_images, captions, z_noise
示例#12
0
def get_flower_batch(image_size, data_dir, batch_no, flower_batch_size,
                     loaded_data, image_captions):

    real_images = np.zeros((flower_batch_size, 64, 64, 3))
    wrong_images = np.zeros((flower_batch_size, 64, 64, 3))
    captions = np.zeros((flower_batch_size, 556))
    text_captions = []
    cnt = 0
    image_files = []
    for i in range(batch_no * flower_batch_size,
                   batch_no * flower_batch_size + flower_batch_size):
        idx = i % len(loaded_data['image_list'])
        image_file = join(data_dir,
                          'flowers/jpg/' + loaded_data['image_list'][idx])
        image_array = image_processing.load_image_array(image_file, image_size)
        real_images[cnt, :, :, :] = image_array
        ann_idx = random.randint(0, 4)
        #image_captions['image_02267.jpg']
        text_captions.append(
            image_captions[loaded_data['image_list'][idx]][ann_idx])
        # Improve this selection of wrong image

        random_caption = random.randint(0, 4)
        #captions[cnt,:] = loaded_data['captions'][loaded_data['image_list'][idx]][ random_caption ][0: 100]
        image_files.append(image_file)
        cnt += 1

    #z_noise = np.random.uniform(-1, 1, [flower_batch_size, z_dim])
    #return real_images, wrong_images, captions, z_noise, image_files
    return text_captions, real_images
示例#13
0
def get_training_batch(batch_no,
                       batch_size,
                       image_size,
                       z_dim,
                       caption_vector_length,
                       split,
                       data_dir,
                       data_set,
                       loaded_data=None):
    real_images = np.zeros((batch_size, 64, 64, 3))
    wrong_images = np.zeros((batch_size, 64, 64, 3))
    captions = np.zeros((batch_size, caption_vector_length))

    cnt = 0
    image_files = []
    for i in range(batch_no * batch_size, batch_no * batch_size + batch_size):
        idx = i % len(loaded_data['image_list'])
        image_file = join(
            data_dir, 'faces/jpg/' + loaded_data['image_list'][idx] + '.jpg')
        image_array = image_processing.load_image_array(image_file, image_size)
        real_images[cnt, :, :, :] = image_array

        # Improve this selection of wrong image
        wrong_image_id = random.randint(0, len(loaded_data['image_list']) - 1)
        wrong_image_file = join(
            data_dir,
            'faces/jpg/' + loaded_data['image_list'][wrong_image_id] + '.jpg')
        wrong_image_array = image_processing.load_image_array(
            wrong_image_file, image_size)
        wrong_images[cnt, :, :, :] = wrong_image_array

        random_caption = random.randint(0, 4)
        caption_file = join(
            data_dir,
            'faces/captions/' + loaded_data['image_list'][idx] + '.txt')
        with open(caption_file, 'r') as f:
            c = f.read().split('\n')[random_caption]
            c = word2vec.embed_sentence(c)
        captions[cnt, :] = c[0:caption_vector_length]
        image_files.append(image_file)
        cnt += 1

    z_noise = np.random.uniform(-1, 1, [batch_size, z_dim])
    return real_images, wrong_images, captions, z_noise, image_files
    def get_training_batch(self,
                           batch_no,
                           batch_size,
                           image_size,
                           z_dim,
                           caption_vector_length,
                           split,
                           data_dir,
                           data_set,
                           loaded_data=None):
        real_images = []  # (batch_size, image_size, image_size, channel)
        wrong_images = []  # (batch_size, image_size, image_size, channel)
        captions = []  # (batch_size, sentence_vector_size=2400)
        z_noise = []  # (batch_size, noise_vector_size=100)
        image_files = []  # (batch_size)
        for i in range(batch_no * batch_size,
                       batch_no * batch_size + batch_size):
            idx = i % len(loaded_data["image_list"])
            real_images.append(
                image_processing.load_image_array(
                    loaded_data["image_list"][idx], image_size))
            wrong_images.append(
                image_processing.load_image_array(
                    loaded_data["image_list"][random.randint(
                        0,
                        len(loaded_data["image_list"]) - 1)], image_size))
            video_name = loaded_data["image_list"][idx].split('/')[-1].split(
                '.')[0].split('-')[0]
            caption_path = "/mnt/ntu/text_to_video/data/sentences/vectors/"
            caption_npy = np.load(open(caption_path + video_name + ".npy",
                                       'r'))
            captions.append(caption_npy[random.randint(
                0, caption_npy.shape[0] - 1)])
            z_noise.append(np.random.uniform(-1, 1, [z_dim]))
            image_files.append(video_name)

        #return real_images, wrong_images, captions, z_noise, image_files
        return np.array(real_images), np.array(wrong_images), np.array(
            captions), np.array(z_noise), image_files
示例#15
0
    def get_training_batch(self,
                           batch_no,
                           batch_size,
                           image_size,
                           z_dim,
                           caption_vector_length,
                           split,
                           data_dir,
                           data_set,
                           loaded_data=None):
        real_images = []
        wrong_images = []
        acoustic_features = []
        z_noise = []
        image_files = []
        for i in range(batch_no * batch_size,
                       batch_no * batch_size + batch_size):
            idx = i % len(loaded_data["image_list"])
            real_images.append(
                image_processing.load_image_array(
                    loaded_data["image_list"][idx], image_size))
            wrong_images.append(
                image_processing.load_image_array(
                    loaded_data["image_list"][random.randint(
                        0,
                        len(loaded_data["image_list"]) - 1)], image_size))
            filename = loaded_data["image_list"][idx].split('/')[-1].split(
                '.')[0]
            onehot = np.zeros(len(self.training_data_list))
            onehot[loaded_data["class_label"][idx]] = 1
            acoustic_features.append(onehot)
            #z_noise.append(np.random.uniform(-1, 1, [z_dim]))
            z_noise.append(np.random.normal(scale=0.01, size=[z_dim]))
            image_files.append(filename)

        return np.array(real_images), np.array(wrong_images), np.array(
            acoustic_features), np.array(z_noise), image_files
示例#16
0
def get_augment_batch(image_size, data_dir, batch_no, augment_batch_size,
                      loaded_data, image_captions):
    real_images = np.zeros((augment_batch_size, 64, 64, 3))
    text_captions = []
    cnt = 0
    for i in range(batch_no * augment_batch_size,
                   batch_no * augment_batch_size + augment_batch_size):
        idx = i % len(loaded_data['image_list'])
        image_file = join(data_dir,
                          'augment/' + loaded_data['image_list'][idx])
        image_array = image_processing.load_image_array(image_file, image_size)
        real_images[cnt, :, :, :] = image_array
        text_captions.append(image_captions[loaded_data['image_list'][idx]])
        cnt += 1

    return text_captions, real_images
def get_training_batch(batch_no,
                       batch_size,
                       image_size,
                       z_dim,
                       caption_vector_length,
                       split,
                       data_dir,
                       data_set,
                       loaded_data=None):
    if data_set == 'mscoco':
        with h5py.File(join(data_dir,
                            'tvs/' + split + '_tvs_' + str(batch_no))) as hf:
            caption_vectors = np.array(hf.get('tv'))
            caption_vectors = caption_vectors[:, 0:caption_vector_length]
        with h5py.File(
                join(data_dir,
                     'tvs/' + split + '_tv_image_id_' + str(batch_no))) as hf:
            image_ids = np.array(hf.get('tv'))

        real_images = np.zeros((batch_size, 64, 64, 3))
        wrong_images = np.zeros((batch_size, 64, 64, 3))

        image_files = []
        for idx, image_id in enumerate(image_ids):
            image_file = join(
                data_dir,
                '%s2014/COCO_%s2014_%.12d.jpg' % (split, split, image_id))
            image_array = image_processing.load_image_array(
                image_file, image_size)
            real_images[idx, :, :, :] = image_array
            image_files.append(image_file)

        # TODO>> As of Now, wrong images are just shuffled real images.
        first_image = real_images[0, :, :, :]
        for i in range(0, batch_size):
            if i < batch_size - 1:
                wrong_images[i, :, :, :] = real_images[i + 1, :, :, :]
            else:
                wrong_images[i, :, :, :] = first_image

        z_noise = np.random.uniform(-1, 1, [batch_size, z_dim])

        return real_images, wrong_images, caption_vectors, z_noise, image_files
示例#18
0
    def get_training_batch(self,
                           batch_no,
                           batch_size,
                           image_size,
                           z_dim,
                           caption_vector_length,
                           split,
                           data_dir,
                           data_set,
                           loaded_data=None):
        real_images = []
        image_files = []
        for i in range(batch_no * batch_size,
                       batch_no * batch_size + batch_size):
            idx = i % len(loaded_data["image_list"])
            real_images.append(
                image_processing.load_image_array(
                    loaded_data["image_list"][idx], 64))
            filename = loaded_data["image_list"][idx]
            image_files.append(filename)

        return np.array(real_images), image_files
示例#19
0
import tensorflow as tf
import numpy as np

output_dir = "./images_Many1/"
num_stages = 5
image_size = 64

#def load_test_data(output_dir, image_size, num_stages):
#image_file =  join(data_dir, 'flowers/jpg/'+loaded_data['image_list'][idx])
image_list = [f for f in listdir(output_dir) if f[-3:] == 'jpg']
image_list.sort(key=lambda x: int(x[:x.index("_")]))
num_group = len(image_list) / (num_stages + 1)
for i in range(num_group):
    real_image_name = image_list[i * (num_stages + 1)]
    fake_image_name = image_list[i * (num_stages + 1) + 5]
    real_image = image_processing.load_image_array(
        join(output_dir, real_image_name), image_size)
    fake_image = image_processing.load_image_array(
        join(output_dir, fake_image_name), image_size)
    real_image = np.einsum('ijk->kij', real_image)
    fake_image = np.einsum('ijk->kij', fake_image)
    t_real_image = tf.placeholder('float32', [3, image_size, image_size],
                                  name='real_image')
    t_fake_image = tf.placeholder('float32', [3, image_size, image_size],
                                  name='fake_image')
    s_r = tf.svd(t_real_image, compute_uv=False)
    s_f = tf.svd(t_fake_image, compute_uv=False)
    with tf.Session() as sess:
        sess.run([s_r], feed_dict={t_real_image: real_image})
        sess.run([s_f], feed_dict={t_fake_image: fake_image})
    print fake_image