Example #1
0
def initialize_filesystem(directory):
    """Clears and adds needed directories for stegdetect to work"""
    print('initializing fs at ' + directory)
    try:
        os.chdir(directory)
    except:
        raise OSError(
            'directory: ' + directory +
            ' is not a valid directory. Please initialize with a valid directory'
        )

    print('cleaning fs...')
    fs.clean_filesystem([stegbench_tld])

    fs.make_dir(stegbench_tld)

    print('initializing directories...')

    directories = get_all_dirs()

    for directory in directories:
        fs.make_dir(directory)

    print('initializing all files...')
    algo_files = get_all_files()
    for file_type in algo_files.keys():
        path_to_master_file = algo_files[file_type]
        master_file_header = get_master_header(file_type)

        fs.write_to_csv_file(path_to_master_file, [master_file_header])
Example #2
0
def process_image_directory(path_to_directory, db_name, operation_dict):
    """processes an image directory"""
    source_master_file = lookup.get_all_files()[lookup.source_db_file]
    metadata_directory = lookup.get_db_dirs()[lookup.metadata]
    output_directory = path_to_directory

    db_uuid = fs.get_uuid()
    target_directory = join(metadata_directory, db_uuid)

    assert (fs.dir_exists(path_to_directory))
    assert (fs.dir_exists(metadata_directory))

    absolute_path = abspath(path_to_directory)

    files = [
        join(absolute_path, f) for f in listdir(absolute_path)
        if img.is_image_file(join(absolute_path, f))
    ]

    if operation_dict:
        dataset_directory = lookup.get_db_dirs()[lookup.dataset]
        output_directory = abspath(join(dataset_directory, fs.get_uuid()))
        fs.make_dir(output_directory)

        files = modify_images(absolute_path, output_directory, operation_dict)

    else:
        files = [
            join(absolute_path, f) for f in listdir(absolute_path)
            if img.is_image_file(join(absolute_path, f))
        ]

    info_images, compatible_types = process_cover_list(files)
    rows = [lookup.cover_image_header] + info_images

    fs.make_dir(target_directory)
    fs.write_to_csv_file(join(target_directory, lookup.db_file), rows)

    num_images = len(files)
    compatible_types = list(compatible_types)

    dataset_info = [(db_uuid, abspath(output_directory), db_name, num_images,
                     compatible_types)]
    fs.write_to_csv_file(source_master_file, dataset_info)

    return db_uuid
Example #3
0
def create_algorithm_set(algorithm_type: str, algorithms: [str]):
    """creates a new algorithm set"""
    set_uuid = fs.get_uuid()
    set_file_directory = lookup.get_algo_set_dirs()[algorithm_type]
    individual_set_file_path = join(set_file_directory,
                                    fs.create_name_from_uuid(set_uuid, 'csv'))

    fs.make_file(individual_set_file_path)

    file_header = lookup.individual_set_header
    data_to_write = [file_header]
    for algorithm_uuid in algorithms:
        algorithm_info = get_algorithm_info(algorithm_type, algorithm_uuid)
        data_to_write.append([algorithm_info[lookup.uuid_descriptor]])

    fs.write_to_csv_file(individual_set_file_path, data_to_write)

    return set_uuid
Example #4
0
	def run(self, source_dbs:list):
		detect_dbs = list(source_dbs)
		for db in source_dbs:
			print('embedding source db: (' + db + ')')
			generated_db = self.embeddor.embed_ratio(db, float(self.metadata[lookup.payload]))
			verify = self.verifier.verify(generated_db)
			if verify:
				print('generated db is verfied steganographic: (' + generated_db + ')')
			else:
				raise ValueError('The db was not properly embedded')

			detect_dbs.append(generated_db)

		results = self.detector.detect(detect_dbs)

		if lookup.result_file in self.metadata:
			fs.write_to_csv_file(self.metadata[lookup.result_file], self.format_results(results), override=True)

		return results
Example #5
0
    def process_csv_file(self, input_file, result_file, probability=False):
        data = fs.read_csv_file(input_file)

        data = data[2:]
        data_to_write = []
        for row in data:
            file_name = row[0]

            if probability:
                result = float(row[len(row) - 1])
            else:
                stego = row[1]
                if stego == 'true':
                    result = True
                else:
                    result = False

            data_to_write.append([file_name, result])

        fs.write_to_csv_file(result_file, data_to_write)
Example #6
0
def generate_labels(database_uuids, output_csv_file, relative=False):
    """generates labels.csv file for a set of databases"""
    db_image_list = [('cover', 'steganographic')]
    label_file_directory = abspath(lookup.get_top_level_dirs()[lookup.db])
    path_to_label_file = join(label_file_directory, output_csv_file)
    for db in database_uuids:
        db_image_dict = lookup.get_image_list(db)
        if relative:
            db_image_list = db_image_list + list(
                map(
                    lambda img:
                    (relpath(img[lookup.source_image], label_file_directory),
                     relpath(img[lookup.file_path], label_file_directory)),
                    db_image_dict))
        else:
            db_image_list = db_image_list + list(
                map(
                    lambda img:
                    (img[lookup.source_image], img[lookup.file_path]),
                    db_image_dict))

    fs.write_to_csv_file(path_to_label_file, db_image_list)
    print('The labels file can be found here: ' + path_to_label_file)
Example #7
0
def process_steganographic_directory(partition, db_name, embeddor_set,
                                     source_db_uuid, payload):
    """processes a steganographic directory"""
    embedded_master_file = lookup.get_all_files()[lookup.embedded_db_file]
    metadata_directory = lookup.get_db_dirs()[lookup.metadata]

    db_uuid = fs.get_uuid()
    target_directory = join(metadata_directory, db_uuid)

    assert (fs.dir_exists(metadata_directory))

    embeddors = embeddor_set[lookup.embeddor]
    embeddor_set_uuid = embeddor_set[lookup.uuid_descriptor]

    info_images, compatible_types, directory, = process_steganographic_list(
        partition, embeddors)
    rows = [lookup.steganographic_image_header] + info_images

    fs.make_dir(target_directory)
    fs.write_to_csv_file(join(target_directory, lookup.db_file), rows)

    num_images = len(info_images)
    compatible_types = list(compatible_types)

    steganographic_dataset_info = [(
        db_uuid,
        abspath(directory),
        db_name,
        num_images,
        compatible_types,
        source_db_uuid,
        embeddor_set_uuid,
        payload,
    )]
    fs.write_to_csv_file(embedded_master_file, steganographic_dataset_info)

    return db_uuid
Example #8
0
def add_to_algorithm_set(algorithm_type: str, set_uuid: str,
                         algorithms: [str]):
    """adds to the current algorithm set"""
    algorithm_set = get_algorithm_set(algorithm_type, set_uuid)
    compatible_types_set = set(algorithm_set[lookup.compatible_descriptor])

    algorithm_data = []
    for algorithm_uuid in algorithms:
        algorithm_info = get_algorithm_info(algorithm_type, algorithm_uuid)
        compatible_types_algorithm = set(
            algorithm_info[lookup.compatible_descriptor])

        compatible_types_set = compatible_types_set.intersection(
            compatible_types_algorithm)
        algorithm_data.append([algorithm_info[lookup.uuid_descriptor]])

    if (len(compatible_types_set) == 0):
        raise ValueError(
            'algorithm could not be added because it does not support compatible types'
        )

    all_set_files = get_set_files(algorithm_type)
    specific_set_file = list(
        filter(lambda set_file: set_file[lookup.uuid_descriptor] == set_uuid,
               all_set_files))

    if len(specific_set_file) != 1:
        raise ValueError('uuid: ' + set_uuid +
                         ' not found among algorithm sets of type: ' +
                         algorithm_type)

    specific_set_info = specific_set_file[0]
    set_file_path = specific_set_info[lookup.filepath_descriptor]

    fs.write_to_csv_file(set_file_path, algorithm_data)

    return set_uuid
Example #9
0
def process_algorithm(algorithm_type, algorithm_dict, config_file_path):
	master_file = lookup.get_algo_master_files()[algorithm_type]
	master_file_path = lookup.get_all_files()[master_file]

	data_to_write = [[fs.get_uuid(), name, config_file_path] for name in algorithm_dict.keys()]
	fs.write_to_csv_file(master_file_path, data_to_write)