コード例 #1
0
ファイル: processor.py プロジェクト: DAI-Lab/StegBench
def process_image_directory(path_to_directory, db_name, operation_dict):
    """processes an image directory"""
    source_master_file = lookup.get_all_files()[lookup.source_db_file]
    metadata_directory = lookup.get_db_dirs()[lookup.metadata]
    output_directory = path_to_directory

    db_uuid = fs.get_uuid()
    target_directory = join(metadata_directory, db_uuid)

    assert (fs.dir_exists(path_to_directory))
    assert (fs.dir_exists(metadata_directory))

    absolute_path = abspath(path_to_directory)

    files = [
        join(absolute_path, f) for f in listdir(absolute_path)
        if img.is_image_file(join(absolute_path, f))
    ]

    if operation_dict:
        dataset_directory = lookup.get_db_dirs()[lookup.dataset]
        output_directory = abspath(join(dataset_directory, fs.get_uuid()))
        fs.make_dir(output_directory)

        files = modify_images(absolute_path, output_directory, operation_dict)

    else:
        files = [
            join(absolute_path, f) for f in listdir(absolute_path)
            if img.is_image_file(join(absolute_path, f))
        ]

    info_images, compatible_types = process_cover_list(files)
    rows = [lookup.cover_image_header] + info_images

    fs.make_dir(target_directory)
    fs.write_to_csv_file(join(target_directory, lookup.db_file), rows)

    num_images = len(files)
    compatible_types = list(compatible_types)

    dataset_info = [(db_uuid, abspath(output_directory), db_name, num_images,
                     compatible_types)]
    fs.write_to_csv_file(source_master_file, dataset_info)

    return db_uuid
コード例 #2
0
ファイル: orchestrator.py プロジェクト: DAI-Lab/StegBench
	def embed_ratio(self, db_name:str, source_db:str, embedding_ratio:float):
		"""generates a test DB. if divided, embeddors are randomly distributed each of the db images. otherwise each image undergoes an operation by each embeddor"""
		db_information = lookup.get_source_db_info(source_db)
		db_compatible_states = set(db_information[lookup.compatible_descriptor])

		db_embed_compatible = db_compatible_states.intersection(self.compatible_types)
		
		if len(db_embed_compatible) <= 0:
			raise ValueError('The embeddor set and dataset are not compatible')

		if embedding_ratio > self.max_embedding_ratio:
			raise ValueError('The embeddor set cannot support this embedding ratio')

		image_dict = lookup.get_image_list(db_information[lookup.uuid_descriptor])
		image_dict = list(filter(lambda img_info: img_info[lookup.image_type] in db_embed_compatible, image_dict))
		random.shuffle(image_dict)

		num_images = len(image_dict)
		num_embeddors = len(self.embeddors)

		input_partition = []
		output_partition = []

		output_directory_name = fs.get_uuid()
		output_directory = abspath(join(lookup.get_db_dirs()[lookup.dataset], output_directory_name))
		
		assert(not fs.dir_exists(output_directory))
		fs.make_dir(output_directory)

		images_per_embeddor = int(num_images / num_embeddors)
		remainder = num_images - images_per_embeddor*num_embeddors
		for i in range(num_embeddors):
			start_idx = i*images_per_embeddor
			end_idx = (i+1)*images_per_embeddor
			input_list = image_dict[start_idx:end_idx].copy()

			input_partition.append(input_list)

		for idx in range(remainder):
			input_partition[idx].append(image_dict[idx + num_embeddors*images_per_embeddor].copy())

		ratio_embeddor = partial(generator.secret_message_from_embedding, embedding_ratio) 
		secret_message = [list(map(ratio_embeddor, input_list)) for input_list in input_partition]
		output_partition = [generator.generate_output_list(embeddor, output_directory, input_partition[idx]) for idx, embeddor in enumerate(self.embeddors)]

		partition = [[{
						lookup.INPUT_IMAGE_PATH: input_partition[i][j][lookup.file_path], 
						lookup.OUTPUT_IMAGE_PATH: output_partition[i][j],
						lookup.PAYLOAD: embedding_ratio,
						lookup.SECRET_TXT_PLAINTEXT: secret_message[i][j],
						lookup.PASSWORD: generator.generate_password(),
						}
		 			for j in range(len(input_partition[i]))] for i in range(num_embeddors)]

		db_uuid = self.embed_db(db_name, partition, source_db, embedding_ratio)

		return db_uuid
コード例 #3
0
ファイル: lookup.py プロジェクト: DAI-Lab/StegBench
def get_image_list(db_descriptor):
    metadata_directory = get_db_dirs()[metadata]
    db_directory = join(metadata_directory, db_descriptor)
    db_master_file = join(db_directory, db_file)

    assert (fs.dir_exists(db_directory))
    assert (fs.file_exists(db_master_file))

    image_info = fs.read_csv_file(db_master_file, return_as_dict=True)
    return image_info
コード例 #4
0
ファイル: downloader.py プロジェクト: DAI-Lab/StegBench
def download_from_BOWS2(directory):
    zip_file_name = 'BOWS2.zip'
    unzip_directory = 'BOWS2OrigEp3'
    path_to_zip_file = join(directory, zip_file_name)
    path_to_unzip_directory = join(directory, unzip_directory)

    retrieve_file(BOWS2_URL, path_to_zip_file)
    unzip_file(path_to_zip_file, directory)

    assert (fs.dir_exists(path_to_unzip_directory))

    return path_to_unzip_directory
コード例 #5
0
ファイル: downloader.py プロジェクト: DAI-Lab/StegBench
def download_from_BOSS(directory):
    zip_file_name = 'BOSS.zip'
    unzip_directory = 'BOSSbase_1.01'
    path_to_zip_file = join(directory, zip_file_name)
    path_to_unzip_directory = join(directory, unzip_directory)

    retrieve_file(BOSS_URL, path_to_zip_file)
    unzip_file(path_to_zip_file, directory)

    assert (fs.dir_exists(path_to_unzip_directory))

    return path_to_unzip_directory
コード例 #6
0
ファイル: downloader.py プロジェクト: DAI-Lab/StegBench
def download_from_ALASKA(directory):
    zip_file_name = 'ALASKA_training_set_jpg1_cover.zip'
    unzip_directory = 'alaska1ALASKA_training_set_jpg1_cover'
    path_to_zip_file = join(directory, zip_file_name)
    path_to_unzip_directory = join(directory, unzip_directory)

    retrieve_file(ALASKA_URL, path_to_zip_file)
    unzip_file(path_to_zip_file, directory)

    assert (fs.dir_exists(path_to_unzip_directory))

    return path_to_unzip_directory
コード例 #7
0
ファイル: downloader.py プロジェクト: DAI-Lab/StegBench
def download_from_COCO_VAL(directory):
    zip_file_name = 'val2017.zip'
    unzip_directory = 'val2017'
    path_to_zip_file = join(directory, zip_file_name)
    path_to_unzip_directory = join(directory, unzip_directory)

    retrieve_file(COCO_VAL_URL, path_to_zip_file)
    unzip_file(path_to_zip_file, directory)

    assert (fs.dir_exists(path_to_unzip_directory))

    return path_to_unzip_directory
コード例 #8
0
ファイル: downloader.py プロジェクト: DAI-Lab/StegBench
def download_from_DIV2K_TRAIN(directory):
    zip_file_name = 'DIV2K_train_HR.zip'
    unzip_directory = 'DIV2K_train_HR'

    path_to_zip_file = join(directory, zip_file_name)
    path_to_unzip_directory = join(directory, unzip_directory)

    retrieve_file(DIV2K_TRAIN_URL, path_to_zip_file)
    unzip_file(path_to_zip_file, directory)

    assert (fs.dir_exists(path_to_unzip_directory))

    return path_to_unzip_directory
コード例 #9
0
ファイル: processor.py プロジェクト: DAI-Lab/StegBench
def process_steganographic_directory(partition, db_name, embeddor_set,
                                     source_db_uuid, payload):
    """processes a steganographic directory"""
    embedded_master_file = lookup.get_all_files()[lookup.embedded_db_file]
    metadata_directory = lookup.get_db_dirs()[lookup.metadata]

    db_uuid = fs.get_uuid()
    target_directory = join(metadata_directory, db_uuid)

    assert (fs.dir_exists(metadata_directory))

    embeddors = embeddor_set[lookup.embeddor]
    embeddor_set_uuid = embeddor_set[lookup.uuid_descriptor]

    info_images, compatible_types, directory, = process_steganographic_list(
        partition, embeddors)
    rows = [lookup.steganographic_image_header] + info_images

    fs.make_dir(target_directory)
    fs.write_to_csv_file(join(target_directory, lookup.db_file), rows)

    num_images = len(info_images)
    compatible_types = list(compatible_types)

    steganographic_dataset_info = [(
        db_uuid,
        abspath(directory),
        db_name,
        num_images,
        compatible_types,
        source_db_uuid,
        embeddor_set_uuid,
        payload,
    )]
    fs.write_to_csv_file(embedded_master_file, steganographic_dataset_info)

    return db_uuid
コード例 #10
0
ファイル: detector_cmds.py プロジェクト: DAI-Lab/StegBench
def preprocess_docker(algorithm_info, to_detect_list):
    """starts docker command and updates parameters appropriately"""
    image_name = algorithm_info[lookup.DOCKER_IMAGE]
    cmd = lookup.get_cmd(algorithm_info)
    volumes = {}

    if lookup.INPUT_IMAGE_DIRECTORY in cmd:
        updated_detect_list = generator.get_directories(to_detect_list)
        for updated_detect in updated_detect_list:
            docker_directory = '/' + fs.get_uuid()
            volumes[updated_detect[lookup.INPUT_IMAGE_DIRECTORY]] = {
                'bind': docker_directory,
                'mode': 'rw'
            }
            updated_detect[lookup.INPUT_IMAGE_DIRECTORY] = docker_directory
    elif lookup.INPUT_IMAGE_PATH in cmd:
        for to_detect in to_detect_list:
            original_input_path = to_detect[lookup.INPUT_IMAGE_PATH]
            original_input_path = abspath(original_input_path)

            local_input_dir = fs.get_directory(original_input_path)
            volumes[local_input_dir] = {'bind': lookup.input_dir, 'mode': 'rw'}

            input_filename = fs.get_filename(original_input_path)
            new_input_path = join(lookup.input_dir, input_filename)
            to_detect[lookup.INPUT_IMAGE_PATH] = new_input_path

    result_directory = abspath(lookup.get_algo_asset_dirs()[lookup.detector])
    assert (fs.dir_exists(result_directory))

    volumes[result_directory] = {'bind': lookup.result_dir, 'mode': 'rw'}

    container_id = runner.start_docker(image_name, volumes=volumes)
    for to_detect in to_detect_list:
        to_detect[lookup.container_id] = container_id

    return [], to_detect_list
コード例 #11
0
ファイル: algo_processor.py プロジェクト: DAI-Lab/StegBench
def get_config_files(config_directory):
	assert(fs.dir_exists(config_directory))
	config_files = [abspath(join(config_directory, f)) for f in listdir(config_directory) if is_config_file(f)]
	return config_files