def process_individual_frames(self, PATH_TO_DATA, annotations, list_of_layers, sampling_rate, LCD):
		i = 0
		label_map = {}
		frm_map = {}
		X = {}
		map_index_data = pickle.load(open(annotations, "rb"))

		for index in map_index_data:
			segments = map_index_data[index]
			print "Processing images for label " + str(index)
			for seg in segments:
				print str(seg)
				frm_num = seg[0]
				while frm_num <= seg[1]:
					print frm_num
					frm_map[i] = frm_num
					label_map[i] = index
					im = caffe.io.load_image(utils.get_full_image_path(PATH_TO_DATA, frm_num))
					self.net.blobs['data'].data[...] = self.transformer.preprocess('data', im)
					out = self.net.forward()
					for layer in list_of_layers:
						if layer == 'input':
							data = cv2.imread(full_image_path)
						else:
							data = self.net.blobs[layer].data[0]
						data = utils.flatten(data)
						utils.dict_insert(layer, data, X)
					frm_num += sampling_rate
					i += 1
		return X, label_map, frm_map
Beispiel #2
0
    def process_individual_frames(self, PATH_TO_DATA, annotations,
                                  list_of_layers, sampling_rate):
        X = {}
        map_index_data = pickle.load(open(annotations, "rb"))

        segments = utils.get_chronological_sequences(map_index_data)
        for seg in segments:
            print str(seg)
            frm_num = seg[0]
            while frm_num <= seg[1]:
                print frm_num, annotations
                im = caffe.io.load_image(
                    utils.get_full_image_path(PATH_TO_DATA, frm_num))
                self.net.blobs['data'].data[...] = self.transformer.preprocess(
                    'data', im)
                out = self.net.forward()
                for layer in list_of_layers:
                    if layer == 'input':
                        data = cv2.imread(full_image_path)
                    else:
                        data = self.net.blobs[layer].data[0]
                    data = utils.flatten(data)
                    utils.dict_insert(layer, data, X)
                frm_num += sampling_rate
        return X
	def process_individual_frames_2(self, PATH_TO_DATA, annotations, list_of_layers, sampling_rate):
		X = {}
		map_index_data = pickle.load(open(annotations, "rb"))

		segments = utils.get_chronological_sequences(map_index_data)
		for seg in segments:
			print str(seg)
			frm_num = seg[0]
			while frm_num <= seg[1]:
				print frm_num, annotations
				im = caffe.io.load_image(utils.get_full_image_path(PATH_TO_DATA, frm_num))
				self.net.blobs['data'].data[...] = self.transformer.preprocess('data', im)
				out = self.net.forward()
				for layer in list_of_layers:
					if layer == 'input':
						data = cv2.imread(full_image_path)
					else:
						data = self.net.blobs[layer].data[0]
					data = utils.flatten(data)
					utils.dict_insert(layer, data, X)
				frm_num += sampling_rate
		return X
	def process_batch(self, PATH_TO_DATA, annotations, list_of_layers, sampling_rate, batch_size, LCD):
		i = 0
		label_map = {}
		frm_map = {}
		X = {}
		map_index_data = pickle.load(open(annotations, "rb"))

		for index in map_index_data:
			segments = map_index_data[index]
			# print "Processing images for label " + str(index)
			for seg in segments:
				# print str(seg)
				frm_num = seg[0]
				b = 1 #Running count of num frames in batch
				batch_data = {}
				while frm_num <= (seg[1] + batch_size):
					# Initialize Batch
					if b == 1:
						label_map[i] = index
						frm_map[i] = frm_num
					# Process frames and build up features in batches
					im = caffe.io.load_image(utils.get_full_image_path(PATH_TO_DATA, frm_num))
					self.net.blobs['data'].data[...] = self.transformer.preprocess('data', im)
					out = self.net.forward()
					for layer in list_of_layers:
						if LCD:
							if layer == 'input':
								print "ERROR: Cannot do LCD on input layer"
								sys.exit()
							data = self.net.blobs[layer].data[0]
							data = lcd.LCD(data)
							utils.dict_insert(layer, data, batch_data, axis = 0)
						else:
							if layer == 'input':
								data = cv2.imread(full_image_path)
							else:
								data = self.net.blobs[layer].data[0]
							data = utils.flatten(data)
							utils.dict_insert(layer, data, batch_data, axis = 1)
					if b == batch_size:
						print("Batch %3d" % i)
						b = 0
						i += 1
						# Concatenate with main data dictionary
						for layer in list_of_layers:
							data = encoding.encode_VLAD(batch_data[layer] , 5)
							utils.dict_insert(layer, data, X)
						batch_data = {}

					b += 1
					frm_num += sampling_rate
		return X, label_map, frm_map