示例#1
0
def start():
    global inputdir, outputdir
    helper.initialize_sqli()
    image_list = list(helper.list_files(inputdir, "image/jpeg"))

    for filename in image_list:
        print("Processing %s" % (filename, ))
        # Creation of the SQLite row for the file
        helper.image_row("evidences", filename)
        extractor.basic_info(filename)
        if gps.get():
            extractor.PIL_exif_data_GPS(filename)
        if md5.get():
            extractor.md5(filename)
        if sha256.get():
            extractor.sha256(filename)
        if sha512:
            extractor.sha512(filename)
        if exif.get():
            extractor.exif_info(filename)
        helper.create_csv(outputdir)
示例#2
0
def generate_xml_files_with_nn(test_images_path):
    """
    Bemeneti képekre futtat egy dtektálást majd ezeket egy XML fájlba menti ki.
    A modellt amivel a detektálást hajtja végra azt ki lehet cserélni nagyobb modellre.
    """
    # What model to download.
    MODEL_NAME = 'ssd_mobilenet_v1_coco_2017_11_17'
    MODEL_FILE = MODEL_NAME + '.tar.gz'
    DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'

    # Path to frozen detection graph. This is the actual model that is used for the object detection.
    PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'

    # List of the strings that is used to add correct label for each box.
    PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')

    NUM_CLASSES = 90

    if not os.path.isdir("./ssd_mobilenet_v1_coco_2017_11_17"):
        opener = urllib.request.URLopener()
        opener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE)
        tar_file = tarfile.open(MODEL_FILE)
        for file in tar_file.getmembers():
            file_name = os.path.basename(file.name)
            if 'frozen_inference_graph.pb' in file_name:
                tar_file.extract(file, os.getcwd())

    detection_graph = tf.Graph()
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')

    label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
    categories = label_map_util.convert_label_map_to_categories(
        label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
    category_index = label_map_util.create_category_index(categories)

    # Size, in inches, of the output images.
    IMAGE_SIZE = (24, 16)

    i = 0
    size = helper.num_of_files_in_dir(test_images_path, "jpg")
    for image_path in helper.list_files(test_images_path, "jpg"):

        with detection_graph.as_default():
            with tf.Session() as sess:

                print("Status-------------> " + str(i) + "/" + str(size))
                i = i + 1
                full_image_path = test_images_path + "/" + image_path
                print("Opening -> ", full_image_path)
                start = timeit.default_timer()
                image = Image.open(full_image_path)
                image_np = load_image_into_numpy_array(image)
                image_np_expanded = np.expand_dims(image_np, axis=0)
                all_detection_dict = run_inference_for_single_image(
                    sess, image_np, detection_graph)
                write_objects_detected_to_xml(
                    get_valid_detections(all_detection_dict), category_index,
                    full_image_path)
                stop = timeit.default_timer()
                print('Detection and xml creation took: ' + str(stop - start) +
                      " sec")
示例#3
0
def main(args=None):
	print """\
##################################################
# imago.py                                       #
# Digital evidences from images!                 #
# Made with <3 by Matteo Redaelli                #
# Twitter: @solventred                           #
##################################################
	"""
	if args is None:
		args = sys.argv[1:]
	parser = argparse.ArgumentParser()
	parser.add_argument('-i','--input', help='Input directory path', type=str, required=True)
	parser.add_argument('-x','--exif', help='Extract exif metadata', action='store_true')
	parser.add_argument('-g','--gps', help='Extract, parse and convert to coordinates, GPS exif metadata from images (if any)It works only with JPEG.', action='store_true')
	parser.add_argument('-e','--ela', help='Extract, Error Level Analysis image,It works only with JPEG. *BETA*', action='store_true')
	parser.add_argument('-n','--nude', help='Detect Nudity, It works only with JPEG, *BETA*', action='store_true')
	parser.add_argument('-d','--digest', help='Calculate perceptual image hashing', type=str, choices=["md5", "sha256", "sha512", "all"])
	parser.add_argument('-p','--percentualhash', help='Calculate hash digest', type=str, choices=["ahash", "phash", "dhash","whash","all"])
	parser.add_argument('-o','--output', help='Output directory path', type=str)
	parser.add_argument('-s','--sqli', help='Keep SQLite file after the computation', action='store_true')
	parser.add_argument('-t','--type', help='Select the image, this flag can be JPEG or TIFF, if this argument it is not provided, imago will process all the image types(i.e. JPEG, TIFF)', type=str, choices=["jpeg","tiff"])
	args = parser.parse_args()

	if (args.exif or args.gps or args.ela or args.digest or args.nude or args.percentualhash):

		filetype = ""
		if (args.type == "jpeg"):
			filetype = "image/jpeg"
		elif (args.type == "tiff"):
			filetype = "image/tiff"
		else:
			filetype = "image"
		if args.output:
			output_path = args.output
		else:
			output_path = "."
		base_dir = args.input
		helper.initialize_sqli()
		image_list = list(helper.list_files(base_dir, filetype))
		for filename in image_list:
			print ("Processing %s" % (filename,))
			# Creation of the SQLite row for the file
			helper.image_row("evidences", filename)
			extractor.basic_info(filename)
			if args.nude:
				extractor.detect_nudity(filename)
			if args.gps:
				extractor.PIL_exif_data_GPS(filename)

			if args.percentualhash == "ahash":
				extractor.ahash(filename)
			elif args.percentualhash == "phash":
				extractor.phash(filename)
			elif args.percentualhash == "dhash":
				extractor.dhash(filename)
			elif args.percentualhash == "whash":
				extractor.whash(filename)
			elif args.percentualhash == "all":
				extractor.ahash(filename)
				extractor.phash(filename)
				extractor.whash(filename)
				extractor.dhash(filename)

			if args.digest == "md5":
				extractor.md5(filename)
			elif args.digest == "sha256":
				extractor.sha256(filename)
			elif args.digest == "sha512":
				extractor.sha512(filename)
			elif args.digest == "all":
				extractor.md5(filename)
				extractor.sha256(filename)
				extractor.sha512(filename)
			if args.exif:
				extractor.exif_info(filename)
			if args.ela:
				extractor.ela(filename,output_path)
			print ("Processing of %s completed!" % (filename,))

		# Creation of the file CSV
		helper.create_csv(output_path)
		if not args.sqli:
			os.remove('metadata.db')
		elif args.sqli and args.output:
			os.rename("metadata.db", os.path.join(args.output,"metadata.db"))
	else:
		print("ERROR: Select at least one type of extraction")
def _get_files(img_dir):
    files = helper.list_files(img_dir)
    return [os.path.join(img_dir, x) for x in files]