コード例 #1
0
def preprocess_docker(algorithm_info, to_verify_list):
    """starts docker command and updates parameters appropriately"""
    image_name = algorithm_info[lookup.DOCKER_IMAGE]
    cmd = lookup.get_verify_cmd(algorithm_info)
    volumes = {}

    if lookup.VERIFY_TXT_FILE in cmd:
        asset_directory = abspath(
            lookup.get_algo_asset_dirs()[lookup.embeddor])
        volumes[asset_directory] = {'bind': lookup.asset_dir, 'mode': 'rw'}
        for to_verify in to_verify_list:
            to_verify[lookup.VERIFY_TXT_FILE] = generator.generate_verify_file(
                algorithm_info, to_verify)

    for to_verify in to_verify_list:
        assert (lookup.INPUT_IMAGE_PATH in to_verify)
        original_input_path = to_verify[lookup.INPUT_IMAGE_PATH]
        original_input_path = abspath(original_input_path)

        local_input_dir = fs.get_directory(original_input_path)
        volumes[local_input_dir] = {'bind': lookup.input_dir, 'mode': 'rw'}

        input_filename = fs.get_filename(original_input_path)
        new_input_path = join(lookup.input_dir, input_filename)
        to_verify[lookup.INPUT_IMAGE_PATH] = new_input_path

    container_id = runner.start_docker(image_name, volumes=volumes)
    for to_verify in to_verify_list:
        to_verify[lookup.container_id] = container_id

    return [], to_verify_list
コード例 #2
0
ファイル: generator.py プロジェクト: DAI-Lab/StegBench
def generate_result_file(algorithm_info, to_detect, file_type, temp=False):
    output_file_name = algorithm_info[lookup.uuid_descriptor]
    cmd = lookup.get_cmd(algorithm_info)

    if algorithm_info[
            lookup.
            COMMAND_TYPE] == lookup.DOCKER and lookup.PIPE_OUTPUT not in algorithm_info:  #hacky fix for piping output
        output_directory = lookup.result_dir
    else:
        output_directory = abspath(
            lookup.get_algo_asset_dirs()[lookup.detector])

    if lookup.INPUT_IMAGE_PATH in cmd:
        output_file_name += '_' + fs.get_filename(
            to_detect[lookup.INPUT_IMAGE_PATH], extension=False)

    elif lookup.INPUT_IMAGE_DIRECTORY in cmd:
        output_file_name += '_' + fs.get_filename(
            to_detect[lookup.INPUT_IMAGE_DIRECTORY])

    if temp:
        output_file_name += '-temp'

    output_file_name += '.' + file_type
    return join(output_directory, output_file_name)
コード例 #3
0
def terimination_docker(algorithm_info, verified_list):
    termination_cmds = []
    cmd = lookup.get_verify_cmd(algorithm_info)

    docker_containers = list(
        set(
            list(
                map(lambda verified: verified[lookup.container_id],
                    verified_list))))
    for container_id in docker_containers:
        termination_cmds.append({
            lookup.COMMAND_TYPE: lookup.END_DOCKER,
            lookup.COMMAND: [container_id]
        })

    for verified in verified_list:
        asset_file_name = fs.get_filename(
            generator.generate_verify_file(algorithm_info, verified))
        asset_directory = lookup.get_algo_asset_dirs()[algorithm_info[
            lookup.ALGORITHM_TYPE]]

        old_asset_file_path = join(asset_directory, asset_file_name)
        removal_cmd = ' '.join([lookup.removal_prefix, old_asset_file_path])

        termination_cmds.append({
            lookup.COMMAND_TYPE: lookup.NATIVE,
            lookup.COMMAND: [removal_cmd]
        })

    return termination_cmds
コード例 #4
0
ファイル: algo_processor.py プロジェクト: DAI-Lab/StegBench
def compile_txt_results(algorithm_info, source_db):
	algorithm_uuid = algorithm_info[lookup.uuid_descriptor]
	asset_dir = abspath(lookup.get_algo_asset_dirs()[lookup.detector])
	
	image_files = lookup.get_image_list(source_db)

	result_file_func = lambda file: join(asset_dir, algorithm_uuid + '_' + fs.get_filename(file[lookup.file_path], extension=False) + '.txt')
	result_files = [{lookup.file_path: result_file_func(file), lookup.label: file[lookup.label]} for file in image_files]

	results = []

	for idx, result_file_info in enumerate(result_files):
		file_result = fs.read_txt_file(result_file_info[lookup.file_path])
		file_result = ''.join(file_result)

		result = None
		if algorithm_info[lookup.DETECTOR_TYPE] == lookup.binary_detector:
			yes_filter = algorithm_info[lookup.regex_filter_yes]
			no_filter = algorithm_info[lookup.regex_filter_no]

			stego = re.search(yes_filter, file_result)
			cover = re.search(no_filter, file_result)
			assert (stego or cover and not (stego and cover))

			if stego: 
				result = lookup.stego
			else:
				result = lookup.cover
		else:
			result = float(file_result)

		result_file_info.update({lookup.result: result})
		results.append(result_file_info)

	return results
コード例 #5
0
ファイル: algo_info.py プロジェクト: DAI-Lab/StegBench
def calculate_statistics_threshold(detector_results):
    """calculates all the relevant analyzer statistics"""
    metrics = collections.OrderedDict()
    labels = np.array(
        list(
            map(lambda d: 1
                if d[lookup.label] == lookup.stego else 0, detector_results)))
    prediction_single_classes = np.array(
        list(map(lambda d: float(d[lookup.result]), detector_results)))
    prediction_both_classes = np.array(
        list(
            map(
                lambda d:
                (float(d[lookup.result]), 1.0 - float(d[lookup.result])),
                detector_results)))

    if len(set(labels)) == 1:
        print('roc values require at least 2 labels in the test dataset')
        return {lookup.result_metric: metrics}

    auc_score = roc_auc_score(labels, prediction_single_classes)
    ap_score = average_precision_score(labels, prediction_single_classes)

    skplt.metrics.plot_roc(labels, prediction_both_classes)

    detector_result_dir = lookup.get_algo_asset_dirs()[lookup.detector]
    roc_curve_name = fs.get_uuid() + '-roc.png'
    roc_curve_path = abspath(join(detector_result_dir, roc_curve_name))
    plt.savefig(roc_curve_path, bbox_inches='tight')

    metrics[lookup.roc_auc] = auc_score
    metrics[lookup.ap_score] = ap_score
    metrics[lookup.roc_curve] = roc_curve_path

    return {lookup.result_metric: metrics}
コード例 #6
0
ファイル: algo_processor.py プロジェクト: DAI-Lab/StegBench
def compile_csv_results(algorithm_info, source_db):
	algorithm_uuid = algorithm_info[lookup.uuid_descriptor]
	asset_dir = abspath(lookup.get_algo_asset_dirs()[lookup.detector])

	image_files = lookup.get_image_list(source_db)
	image_filepath = [abspath(file[lookup.file_path]) for file in image_files]

	result_files = [algorithm_uuid + '_' + fs.get_filename(file[lookup.file_path], extension=False) + '.csv' for file in image_files]
	result_files = [join(asset_dir, result_file) for result_file in result_files]

	raise NotImplementedError
コード例 #7
0
ファイル: generator.py プロジェクト: DAI-Lab/StegBench
def generate_verify_file(algorithm_info, to_verify):
    command_type = algorithm_info[lookup.COMMAND_TYPE]
    if command_type == lookup.DOCKER:
        file_dir = lookup.asset_dir
    else:
        file_dir = abspath(lookup.get_algo_asset_dirs()[lookup.embeddor])

    file_name = algorithm_info[lookup.uuid_descriptor] + '_' + fs.get_filename(
        to_verify[lookup.INPUT_IMAGE_PATH], extension=False) + '.txt'
    file_path = join(file_dir, file_name)

    return file_path
コード例 #8
0
ファイル: algo_processor.py プロジェクト: DAI-Lab/StegBench
def compile_csv_directory(algorithm_info, source_db):
	algorithm_uuid = algorithm_info[lookup.uuid_descriptor]
	asset_dir = abspath(lookup.get_algo_asset_dirs()[lookup.detector])

	image_list = lookup.get_image_list(source_db)

	directory = list(set([fs.get_directory(image[lookup.file_path]) for image in image_list]))
	assert(len(directory) == 1)
	directory = directory[0]
	result_csv_file = algorithm_uuid + '_' + fs.get_filename(directory) + '.csv'
	result_csv_file = join(asset_dir, result_csv_file)

	data = fs.read_csv_file(result_csv_file)
	results = []

	def get_image_info(file_name):
		transform = lambda img: img[lookup.file_path]
		if lookup.OUTPUT_FILE in algorithm_info:
			if algorithm_info[lookup.OUTPUT_FILE] == lookup.INPUT_IMAGE_NAME:
				transform = lambda img: fs.get_filename(img[lookup.file_path])

		filtered_list = list(filter(lambda img: transform(img) == file_name, image_list))
		assert(len(filtered_list) == 1)
		return filtered_list[0]

	for result in data:
		result_info = get_image_info(result[0])
		file_result = result[1]

		if algorithm_info[lookup.DETECTOR_TYPE] == lookup.binary_detector:
			yes_filter = algorithm_info[lookup.regex_filter_yes]
			no_filter = algorithm_info[lookup.regex_filter_no]

			stego = re.search(yes_filter, file_result)
			cover = re.search(no_filter, file_result)
			assert (stego or cover and not (stego and cover))

			if stego: 
				result = lookup.stego
			else:
				result = lookup.cover
		else:
			result = float(file_result)
		
		result_info.update({lookup.result: result})
		results.append(result_info)

	return results
コード例 #9
0
ファイル: algo_processor.py プロジェクト: DAI-Lab/StegBench
def verify_embedding(verify_db, embeddors):
	embeddor_results = defaultdict(list)
	image_files = lookup.get_image_list(verify_db)

	for image_file in image_files:
		image_file[lookup.INPUT_IMAGE_PATH] = image_file[lookup.file_path]
		embeddor_uuid = image_file[lookup.uuid_descriptor]
		verify_txt_file = generator.generate_verify_file(embeddors[embeddor_uuid], image_file)

		asset_file_name = fs.get_filename(verify_txt_file)
		asset_directory = lookup.get_algo_asset_dirs()[lookup.embeddor]
		verify_file_path = abspath(join(asset_directory, asset_file_name))

		data = fs.read_txt_file(verify_file_path)

		if (len(data[0])) == int(image_file[lookup.secret_txt_length]):
			verification_result = True
		else:
			verification_result = False

		embeddor_results[embeddor_uuid].append({lookup.INPUT_IMAGE_PATH: image_file[lookup.INPUT_IMAGE_PATH], lookup.result: verification_result})

	return embeddor_results
コード例 #10
0
ファイル: detector_cmds.py プロジェクト: DAI-Lab/StegBench
def preprocess_docker(algorithm_info, to_detect_list):
    """starts docker command and updates parameters appropriately"""
    image_name = algorithm_info[lookup.DOCKER_IMAGE]
    cmd = lookup.get_cmd(algorithm_info)
    volumes = {}

    if lookup.INPUT_IMAGE_DIRECTORY in cmd:
        updated_detect_list = generator.get_directories(to_detect_list)
        for updated_detect in updated_detect_list:
            docker_directory = '/' + fs.get_uuid()
            volumes[updated_detect[lookup.INPUT_IMAGE_DIRECTORY]] = {
                'bind': docker_directory,
                'mode': 'rw'
            }
            updated_detect[lookup.INPUT_IMAGE_DIRECTORY] = docker_directory
    elif lookup.INPUT_IMAGE_PATH in cmd:
        for to_detect in to_detect_list:
            original_input_path = to_detect[lookup.INPUT_IMAGE_PATH]
            original_input_path = abspath(original_input_path)

            local_input_dir = fs.get_directory(original_input_path)
            volumes[local_input_dir] = {'bind': lookup.input_dir, 'mode': 'rw'}

            input_filename = fs.get_filename(original_input_path)
            new_input_path = join(lookup.input_dir, input_filename)
            to_detect[lookup.INPUT_IMAGE_PATH] = new_input_path

    result_directory = abspath(lookup.get_algo_asset_dirs()[lookup.detector])
    assert (fs.dir_exists(result_directory))

    volumes[result_directory] = {'bind': lookup.result_dir, 'mode': 'rw'}

    container_id = runner.start_docker(image_name, volumes=volumes)
    for to_detect in to_detect_list:
        to_detect[lookup.container_id] = container_id

    return [], to_detect_list