Exemple #1
0
def compile_txt_results(algorithm_info, source_db):
	algorithm_uuid = algorithm_info[lookup.uuid_descriptor]
	asset_dir = abspath(lookup.get_algo_asset_dirs()[lookup.detector])
	
	image_files = lookup.get_image_list(source_db)

	result_file_func = lambda file: join(asset_dir, algorithm_uuid + '_' + fs.get_filename(file[lookup.file_path], extension=False) + '.txt')
	result_files = [{lookup.file_path: result_file_func(file), lookup.label: file[lookup.label]} for file in image_files]

	results = []

	for idx, result_file_info in enumerate(result_files):
		file_result = fs.read_txt_file(result_file_info[lookup.file_path])
		file_result = ''.join(file_result)

		result = None
		if algorithm_info[lookup.DETECTOR_TYPE] == lookup.binary_detector:
			yes_filter = algorithm_info[lookup.regex_filter_yes]
			no_filter = algorithm_info[lookup.regex_filter_no]

			stego = re.search(yes_filter, file_result)
			cover = re.search(no_filter, file_result)
			assert (stego or cover and not (stego and cover))

			if stego: 
				result = lookup.stego
			else:
				result = lookup.cover
		else:
			result = float(file_result)

		result_file_info.update({lookup.result: result})
		results.append(result_file_info)

	return results
Exemple #2
0
def load_data_as_array(db_uuid):
    db_information = lookup.get_db_info(db_uuid)
    image_dict = lookup.get_image_list(db_information[lookup.uuid_descriptor])

    image_info = [(img.get_image_array(img_dict[lookup.file_path]),
                   translate_label(img_dict[lookup.label]))
                  for img_dict in image_dict]
    return image_info
Exemple #3
0
	def embed_ratio(self, db_name:str, source_db:str, embedding_ratio:float):
		"""generates a test DB. if divided, embeddors are randomly distributed each of the db images. otherwise each image undergoes an operation by each embeddor"""
		db_information = lookup.get_source_db_info(source_db)
		db_compatible_states = set(db_information[lookup.compatible_descriptor])

		db_embed_compatible = db_compatible_states.intersection(self.compatible_types)
		
		if len(db_embed_compatible) <= 0:
			raise ValueError('The embeddor set and dataset are not compatible')

		if embedding_ratio > self.max_embedding_ratio:
			raise ValueError('The embeddor set cannot support this embedding ratio')

		image_dict = lookup.get_image_list(db_information[lookup.uuid_descriptor])
		image_dict = list(filter(lambda img_info: img_info[lookup.image_type] in db_embed_compatible, image_dict))
		random.shuffle(image_dict)

		num_images = len(image_dict)
		num_embeddors = len(self.embeddors)

		input_partition = []
		output_partition = []

		output_directory_name = fs.get_uuid()
		output_directory = abspath(join(lookup.get_db_dirs()[lookup.dataset], output_directory_name))
		
		assert(not fs.dir_exists(output_directory))
		fs.make_dir(output_directory)

		images_per_embeddor = int(num_images / num_embeddors)
		remainder = num_images - images_per_embeddor*num_embeddors
		for i in range(num_embeddors):
			start_idx = i*images_per_embeddor
			end_idx = (i+1)*images_per_embeddor
			input_list = image_dict[start_idx:end_idx].copy()

			input_partition.append(input_list)

		for idx in range(remainder):
			input_partition[idx].append(image_dict[idx + num_embeddors*images_per_embeddor].copy())

		ratio_embeddor = partial(generator.secret_message_from_embedding, embedding_ratio) 
		secret_message = [list(map(ratio_embeddor, input_list)) for input_list in input_partition]
		output_partition = [generator.generate_output_list(embeddor, output_directory, input_partition[idx]) for idx, embeddor in enumerate(self.embeddors)]

		partition = [[{
						lookup.INPUT_IMAGE_PATH: input_partition[i][j][lookup.file_path], 
						lookup.OUTPUT_IMAGE_PATH: output_partition[i][j],
						lookup.PAYLOAD: embedding_ratio,
						lookup.SECRET_TXT_PLAINTEXT: secret_message[i][j],
						lookup.PASSWORD: generator.generate_password(),
						}
		 			for j in range(len(input_partition[i]))] for i in range(num_embeddors)]

		db_uuid = self.embed_db(db_name, partition, source_db, embedding_ratio)

		return db_uuid
Exemple #4
0
def compile_csv_results(algorithm_info, source_db):
	algorithm_uuid = algorithm_info[lookup.uuid_descriptor]
	asset_dir = abspath(lookup.get_algo_asset_dirs()[lookup.detector])

	image_files = lookup.get_image_list(source_db)
	image_filepath = [abspath(file[lookup.file_path]) for file in image_files]

	result_files = [algorithm_uuid + '_' + fs.get_filename(file[lookup.file_path], extension=False) + '.csv' for file in image_files]
	result_files = [join(asset_dir, result_file) for result_file in result_files]

	raise NotImplementedError
Exemple #5
0
def compile_csv_directory(algorithm_info, source_db):
	algorithm_uuid = algorithm_info[lookup.uuid_descriptor]
	asset_dir = abspath(lookup.get_algo_asset_dirs()[lookup.detector])

	image_list = lookup.get_image_list(source_db)

	directory = list(set([fs.get_directory(image[lookup.file_path]) for image in image_list]))
	assert(len(directory) == 1)
	directory = directory[0]
	result_csv_file = algorithm_uuid + '_' + fs.get_filename(directory) + '.csv'
	result_csv_file = join(asset_dir, result_csv_file)

	data = fs.read_csv_file(result_csv_file)
	results = []

	def get_image_info(file_name):
		transform = lambda img: img[lookup.file_path]
		if lookup.OUTPUT_FILE in algorithm_info:
			if algorithm_info[lookup.OUTPUT_FILE] == lookup.INPUT_IMAGE_NAME:
				transform = lambda img: fs.get_filename(img[lookup.file_path])

		filtered_list = list(filter(lambda img: transform(img) == file_name, image_list))
		assert(len(filtered_list) == 1)
		return filtered_list[0]

	for result in data:
		result_info = get_image_info(result[0])
		file_result = result[1]

		if algorithm_info[lookup.DETECTOR_TYPE] == lookup.binary_detector:
			yes_filter = algorithm_info[lookup.regex_filter_yes]
			no_filter = algorithm_info[lookup.regex_filter_no]

			stego = re.search(yes_filter, file_result)
			cover = re.search(no_filter, file_result)
			assert (stego or cover and not (stego and cover))

			if stego: 
				result = lookup.stego
			else:
				result = lookup.cover
		else:
			result = float(file_result)
		
		result_info.update({lookup.result: result})
		results.append(result_info)

	return results
Exemple #6
0
	def verify(self, db:str):
		db_info = lookup.get_steganographic_db_info(db)
		db_images = lookup.get_image_list(db_info[lookup.uuid_descriptor])

		#need to group properly....
		#need to read parameter assets	#group all config files and batch read all the information properly... 
		sorted_by_embeddor = defaultdict(list)
		for image_info in db_images:
			image_info[lookup.INPUT_IMAGE_PATH] = image_info[lookup.file_path]
			sorted_by_embeddor[image_info[lookup.uuid_descriptor]].append(image_info)

		all_pre_cmds = []
		all_cmds = []
		all_post_cmds = []
		all_termination_cmds = []

		all_embeddors = {}

		for embeddor_uuid in sorted_by_embeddor:
			embeddor = algo.get_algorithm_info(lookup.embeddor, embeddor_uuid)
			all_embeddors[embeddor_uuid] = embeddor
			verify_params = copy.deepcopy(sorted_by_embeddor[embeddor_uuid])
			
			pre_cmds, cmds, post_cmds, termination_cmds = verify_cmds.generate_commands(embeddor, verify_params)
			all_pre_cmds += pre_cmds
			all_cmds += cmds
			all_post_cmds += post_cmds 
			all_termination_cmds += termination_cmds

		print('running pre commands')
		runner.run_pool(all_pre_cmds, self.cores)
		print('completed.')
		print('running commands')
		runner.run_pool(all_cmds, self.cores)
		print('completed.')
		print('running post commands.')
		runner.run_pool(all_post_cmds, self.cores)
		verification_results = algo_processor.verify_embedding(db, all_embeddors)
		print('completed.')
		print('terminating processes...')
		runner.run_pool(all_termination_cmds, self.cores)
		print('completed.')
		
		return verification_results
Exemple #7
0
	def detect(self, test_dbs:list):
		print('collecting results from following databases: ' + str(test_dbs))
		results = []
		for db in test_dbs:
			db_info = lookup.get_db_info(db)
			db_compatible = set(db_info[lookup.compatible_descriptor])
			db_detect_compatible = db_compatible.intersection(self.compatible_types)
			
			if len(db_detect_compatible) != len(db_compatible):
				raise ValueError('The detector set and dataset are not compatible')

			db_image_dict = lookup.get_image_list(db)
			db_image_list = list(map(lambda img: {lookup.INPUT_IMAGE_PATH: img[lookup.file_path]}, db_image_dict))
			db_results = self.detect_list(db_image_list, db)

			results.append(db_results)

		statistics = algo.calculate_statistics(*results)
		return statistics
Exemple #8
0
def verify_embedding(verify_db, embeddors):
	embeddor_results = defaultdict(list)
	image_files = lookup.get_image_list(verify_db)

	for image_file in image_files:
		image_file[lookup.INPUT_IMAGE_PATH] = image_file[lookup.file_path]
		embeddor_uuid = image_file[lookup.uuid_descriptor]
		verify_txt_file = generator.generate_verify_file(embeddors[embeddor_uuid], image_file)

		asset_file_name = fs.get_filename(verify_txt_file)
		asset_directory = lookup.get_algo_asset_dirs()[lookup.embeddor]
		verify_file_path = abspath(join(asset_directory, asset_file_name))

		data = fs.read_txt_file(verify_file_path)

		if (len(data[0])) == int(image_file[lookup.secret_txt_length]):
			verification_result = True
		else:
			verification_result = False

		embeddor_results[embeddor_uuid].append({lookup.INPUT_IMAGE_PATH: image_file[lookup.INPUT_IMAGE_PATH], lookup.result: verification_result})

	return embeddor_results
Exemple #9
0
def generate_labels(database_uuids, output_csv_file, relative=False):
    """generates labels.csv file for a set of databases"""
    db_image_list = [('cover', 'steganographic')]
    label_file_directory = abspath(lookup.get_top_level_dirs()[lookup.db])
    path_to_label_file = join(label_file_directory, output_csv_file)
    for db in database_uuids:
        db_image_dict = lookup.get_image_list(db)
        if relative:
            db_image_list = db_image_list + list(
                map(
                    lambda img:
                    (relpath(img[lookup.source_image], label_file_directory),
                     relpath(img[lookup.file_path], label_file_directory)),
                    db_image_dict))
        else:
            db_image_list = db_image_list + list(
                map(
                    lambda img:
                    (img[lookup.source_image], img[lookup.file_path]),
                    db_image_dict))

    fs.write_to_csv_file(path_to_label_file, db_image_list)
    print('The labels file can be found here: ' + path_to_label_file)