def create_visual_keys(project_directory, config, location_keys_map):

	raw_file_directory = project_directory + config.get('RAW-DATA','visual_descriptors')
	preprocessed_file_directory = project_directory + config.get('PREPROCESSED-DATA','visual_directory')
	files = helpers.load_directory(raw_file_directory)

	r = redis.Redis(host=config.get('CACHE','host'), port=int(config.get('CACHE','port')))

	for filepath in files:

		location_name = helpers.tokenize(helpers.get_file_name(filepath), " ")[0]
		model = helpers.tokenize(helpers.get_file_name(filepath), " ")[1]
		location_id = location_keys_map[location_name]

		file = helpers.load_text_file(filepath)

		index = 0
		for line in file:
			
			image_id = helpers.tokenize(line, ",")[0]
			
			key = location_id + "_" + model + "_" + image_id
			value = str(index)
			r.set(key, value)

			key = location_id + "_" + model + "_" + str(index)
			value = str(image_id)
			r.set(key, value)

			index = index + 1
def generate_textual_descriptors(project_directory, config, key, input_file, location_keys_map):

	feature_map = {}
	feature_index = -1
	data_map = {}
	data_index = -1
	inverse_data_map = {}

	for line in input_file:
		
		tokens = helpers.tokenize(line, " ")

		data_index = data_index + 1
		data_map[tokens[0]] = data_index
		inverse_data_map[data_index] = tokens[0]

		i = get_starting_index(key, tokens, location_keys_map)
		while i < len(tokens):
			if tokens[i] not in feature_map:
				feature_index = feature_index + 1
				feature_map[tokens[i]] = feature_index
			i = i + 4

	data_feature_matrix = np.zeros( ( len(data_map.items()), len(feature_map.items()) ) )
	data_index = -1

	for line in input_file:

		data_index = data_index + 1
		feature_vector = np.zeros( (len(feature_map.items())) )

		tokens = helpers.tokenize(line, " ")

		i = get_starting_index(key, tokens, location_keys_map)
		while i < len(tokens)-1:
			feature = tokens[i]
			feature_index = feature_map[feature]
			feature_vector[feature_index] = float(tokens[i+3])
			i = i + 4

		data_feature_matrix[data_index] = feature_vector

	sparse_data_matrix = scipy.sparse.csr_matrix(data_feature_matrix)
	scipy.sparse.save_npz(project_directory + config.get('PREPROCESSED-DATA','textual_directory')+key+'.npz', sparse_data_matrix)

	r = redis.Redis(host=config.get('CACHE','host'), port=int(config.get('CACHE','port')))

	r.hmset('dm_'+key, data_map)
	r.hmset('idm_'+key, inverse_data_map)
	r.hmset('location_map', location_keys_map)
	r.hmset('inverse_location_map', reverse_map(location_keys_map))
    def generate_visual_descriptors_map(self):

        visual_descriptors = {}
        visual_descriptors_directory = helpers.load_directory(
            self.project_directory +
            self.config.get("RAW-DATA", "visual_descriptors"))

        for file in visual_descriptors_directory:

            location_name = helpers.get_file_name(file).split()[0]
            location_id = self.cache.hgetall('location_map')[
                location_name.encode('utf-8')].decode('utf-8')
            model = helpers.get_file_name(file).split()[1]

            if location_id not in visual_descriptors:
                visual_descriptors[location_id] = {}
            if model not in visual_descriptors[location_id]:
                visual_descriptors[location_id][model] = []

            file = helpers.load_text_file(file)
            lines = []
            for line in file:
                line = helpers.tokenize(line, ",")
                line = helpers.to_float(line[1:len(line) - 1])
                lines.append(line)

            visual_descriptors[location_id][model] = lines

        return visual_descriptors
	def get_starting_index(self, vector_space, tokens):
		start = 1
		if vector_space == "location":
				location_name = tokens[0]
				location_name_length = len(helpers.tokenize(location_name, "_"))
				if location_name == 'doge_s_palace':
					location_name_length = 2
				start = start + location_name_length

		return start
def get_starting_index(key, tokens, location_keys_map):

	start = 1
	if key == "location":
			location_name = tokens[0]
			location_name_length = len(helpers.tokenize(location_name, "_"))
			if location_name == 'doge_s_palace':
				location_name_length = 2
			start = start + location_name_length

	return start
def cn(preprocessed_file_directory, filename, file):

	temp_visual_descriptor = []
	for line in file:
		line = helpers.tokenize(line, ",")
		line = line[1:len(line)]
		line = np.array(np.array(helpers.to_float(line)) * 100)
		line.astype(np.uint8)
		temp_visual_descriptor.append(line)

	sparse_data_matrix = scipy.sparse.csr_matrix(temp_visual_descriptor)
	scipy.sparse.save_npz(preprocessed_file_directory+filename+'.npz', sparse_data_matrix)
def csd(preprocessed_file_directory, filename, file):

	temp_visual_descriptor = []
	for line in file:
		line = helpers.tokenize(line, ",")
		line = line[1:len(line)]
		line = helpers.to_float(line)
		line = np.reshape(cv2.normalize(np.array(line), None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F), (len(line)))
		line = np.floor(line)
		line.astype(np.uint8)
		temp_visual_descriptor.append(line)

	sparse_data_matrix = scipy.sparse.csr_matrix(temp_visual_descriptor)
	scipy.sparse.save_npz(preprocessed_file_directory+filename+'.npz', sparse_data_matrix)
def cm(preprocessed_file_directory, filename, file):

	temp_visual_descriptor = []
	for line in file:
		line = helpers.tokenize(line, ",")
		line = line[1:4]
		line = helpers.to_float(line)
		line = np.array(line)
		line.astype(np.uint8)
		temp_visual_descriptor.append(line)

	visual_descriptor = np.asarray(temp_visual_descriptor)
	np.savetxt(preprocessed_file_directory+filename+".csv", visual_descriptor, delimiter=",")

	sparse_data_matrix = scipy.sparse.csr_matrix(temp_visual_descriptor)
	scipy.sparse.save_npz(preprocessed_file_directory+filename+'.npz', sparse_data_matrix)
	def generate_feature_list(self, vector_space, file):

		feature_list = {}
		data_index = 0

		for line in file:
			
			tokens = helpers.tokenize(line, " ")
			i = self.get_starting_index(vector_space, tokens)

			while i < len(tokens)-1:
				feature = tokens[i]
				if feature not in feature_list:
					feature_list[feature] = []
				feature_list[feature].append(data_index)
				i = i + 4

			data_index = data_index + 1

		return feature_list, data_index
示例#10
0
def glrlm(preprocessed_file_directory, filename, file):

	temp_visual_descriptor = []
	for line in file:
		line = helpers.tokenize(line, ",")
		line = line[1:len(line)]
		line = helpers.to_float(line)
		line = np.array(line)
		line.astype(np.uint8)
		temp_visual_descriptor.append(line)

	temp_visual_descriptor = np.array(temp_visual_descriptor)

	for i in range(0, len(temp_visual_descriptor)):
		for j in range(0, len(temp_visual_descriptor[0])):
			temp_visual_descriptor[i][j] = np.floor( ( temp_visual_descriptor[i][j] / max(temp_visual_descriptor[:,j]) ) * 100 ) 
		

	sparse_data_matrix = scipy.sparse.csr_matrix(temp_visual_descriptor)
	scipy.sparse.save_npz(preprocessed_file_directory+filename+'.npz', sparse_data_matrix)
示例#11
0
def hog(preprocessed_file_directory, filename, file):

	temp_visual_descriptor = []
	for line in file:

		line = helpers.tokenize(line, ",")
		line = line[1:len(line)]
		line = helpers.to_float(line)
		line = np.array(line)

		for i in range(0, len(line), 9):
			line[i:i+9] = line[i:i+9] / sum(line[i:i+9])

		line = line * 100

		line.astype(np.uint8)
		temp_visual_descriptor.append(line)

	sparse_data_matrix = scipy.sparse.csr_matrix(temp_visual_descriptor)
	scipy.sparse.save_npz(preprocessed_file_directory+filename+'.npz', sparse_data_matrix)