示例#1
0
def run(sources):
    """
    sources: list of image folders
    """

    files = set()
    for source in sources:
        path = Path(source)
        for image in find_images(source, ['.jpg', '.jpeg']):
            filename = path / image
            if image_orientation(filename) == 1:
                continue
            files.add(filename.resolve())

    for filename in files:
        logger.info(filename.name)
        image_fix_orientation(filename)
示例#2
0
import ntpath
from utils import find_images, sort_nicely
# encoding: UTF-8

dir = os.path.dirname(os.path.realpath(__file__))
INPUT_DIR = dir + "/detection_all_frames"
OUTPUT_FILENAME = "output.mp4"
OUTPUT_FILEPATH = dir + "/" + OUTPUT_FILENAME

print("Cleaning output files...")
if os.path.isfile(OUTPUT_FILEPATH):
    os.unlink(OUTPUT_FILEPATH)

print("Reading images...")
img_paths = []
for img_path in find_images(INPUT_DIR):

    img_paths.append(ntpath.basename(img_path))

sort_nicely(img_paths)

print("Writing " + OUTPUT_FILENAME + "...")

height, width, layers = cv2.imread(INPUT_DIR + "/" + img_paths[0]).shape
size = (width, height)
writer = cv2.VideoWriter(OUTPUT_FILENAME, cv2.VideoWriter_fourcc(*'MP4V'), 30,
                         size)

for img_path in img_paths:
    img = cv2.imread(INPUT_DIR + "/" + img_path)
    writer.write(img)
示例#3
0
#!/usr/bin/env python
# -*- coding: utf-8 -*- 

from utils import (
	BrightnessCategory,
	find_images,
)
from ultra_image import UltraImage
from html_gallery import generate_gal_html


# Podmienic na cos co istnieje
PICTURE_FOLDER = '/home/user/example_images'

categories = [
	BrightnessCategory('bright', 130, 255),
	BrightnessCategory('medium', 90, 129),
	BrightnessCategory('dark', 0, 89),
]


if __name__ == '__main__':
	for image_path in find_images(PICTURE_FOLDER):
		uim = UltraImage(image_path)
		for cat in categories:
			cat.add_if_acceptable(uim)

print generate_gal_html(categories)
示例#4
0
    def run_detection(self,
                      input_path,
                      generate_bbox_images=True,
                      recursive=True,
                      n_cores=0,
                      results=None,
                      checkpoint_path=None,
                      checkpoint_frequency=-1,
                      electron=False):

        image_file_names = find_images(input_path, recursive=recursive)
        print(len(image_file_names))
        #flash(len(image_file_names))

        if results is None:
            results = []

        already_processed = set([i['file'] for i in results])

        gpu_available = True if tf.config.list_physical_devices(
            'GPU') else False

        if n_cores > 1 and gpu_available:
            logging.warning(
                'Multiple cores requested, but a GPU is available; '
                'parallelization across GPUs is not currently '
                'supported, defaulting to one GPU')

        # If we're not using multiprocessing...
        if n_cores <= 1 or gpu_available:
            count = 0  # Does not count those already processed
            # Note: stylising the bar with custom characters breaks in Electron; need to investigate
            print("we're in")
            #flash('innnnn')
            with click.progressbar(length=len(image_file_names),
                                   label='Processing Images',
                                   show_pos=True,
                                   show_eta=True,
                                   show_percent=True,
                                   info_sep='|') as bar:
                for im_file in image_file_names:
                    # Will not add additional entries not in the starter checkpoint
                    if im_file in already_processed:
                        logging.info(
                            f'Bypassing already processed image: {im_file}')
                        continue

                    count += 1

                    result = self.__process_image(im_file,
                                                  generate_bbox_images)
                    results.append(result)
                    bar.update(1)

                    # this is for megadetector-gui usage
                    if electron:
                        print(bar.format_progress_line(), flush=True)

                    # checkpoint
                    if checkpoint_frequency != -1 and count % checkpoint_frequency == 0:
                        logging.info(
                            f'Writing a new checkpoint after having '
                            f'processed {count} images since last restart')
                        with open(checkpoint_path, 'w') as f:
                            json.dump({'images': results}, f)

        else:
            # when using multiprocessing, let the workers load the model
            logging.info(f'Creating pool with {n_cores} cores')

            if len(already_processed) > 0:
                logging.warning(
                    'When using multiprocessing, all images are reprocessed')

            pool = workerpool(n_cores)

            image_batches = list(chunk_list(image_file_names, n_cores))
            results = pool.map(partial(self.__process_images, image_batches),
                               image_batches, generate_bbox_images)
            results = list(itertools.chain.from_iterable(results))

        self.save(results)

        return results
示例#5
0
def upload_file():
    if request.method == 'POST':
        #check if the post request has the file part

        if 'file' not in request.files:
            flash('No file part')
            return redirect(request.url)

        file = request.files['file']
        conf = request.form['value']

        if file and allowed_file(file.filename):

            shutil.rmtree(UPLOAD_FOLDER)
            os.mkdir(UPLOAD_FOLDER)
            shutil.rmtree(OUTPUT_FOLDER)
            os.mkdir(OUTPUT_FOLDER)

            filename = secure_filename(file.filename)
            file.save(os.path.join(UPLOAD_FOLDER, filename))
            zip_ref = zipfile.ZipFile(os.path.join(UPLOAD_FOLDER, filename),
                                      'r')
            zip_ref.extractall(UPLOAD_FOLDER)
            zip_ref.close()

            os.remove(os.path.join(UPLOAD_FOLDER, filename))
            shutil.rmtree(os.path.join(UPLOAD_FOLDER, "__MACOSX"))

            tf_detector = TFDetector(model_path='md_v4.1.0.pb',
                                     output_path='output',
                                     render_conf_threshold=conf)
            image_file_names = find_images('uploads', recursive=True)
            results = []
            count = 0
            with click.progressbar(length=len(image_file_names),
                                   label='Processing Images',
                                   show_pos=True,
                                   show_eta=True,
                                   show_percent=True,
                                   info_sep='|') as bar:
                for im_file in image_file_names:
                    result = tf_detector._TFDetector__process_image(
                        im_file, True)
                    results.append(result)
                    bar.update(1)
                    count += 1
                    flash(count)
                    return redirect(url_for('upload_file'))

            tf_detector.save(results)

            zf = zipfile.ZipFile(filename, 'w', zipfile.ZIP_DEFLATED)
            for root, dirs, files in os.walk(OUTPUT_FOLDER):
                for file in files:
                    zf.write(os.path.join(root, file))
            zf.close()

            return send_file(filename,
                             attachment_filename='Processed_' + filename,
                             as_attachment=True)

    return render_template('index.html')
示例#6
0
文件: reader.py 项目: nitrotm/tf-cnn
 def gen():
     for filename in find_images(path):
         yield str(Path(path) / filename)
示例#7
0
import cv2
import numpy

from detection import estimate_blur
from utils import find_images

dir = os.path.dirname(os.path.realpath(__file__))

INPUT_DIR = dir + "/detection_cropped_output"

logging.basicConfig(level=logging.INFO)

results = []

for input_path in find_images(INPUT_DIR):
    try:
        logging.info("processing {0}".format(input_path))
        input_image = cv2.imread(input_path)

        blur_map, score, blurry = estimate_blur(input_image)

        # logging.info("input_path: {0}, score: {1}, blurry: {2}".format(input_path, score, blurry))
        results.append({
            "input_path": input_path,
            "score": score,
            "blurry": blurry
        })

        #     cv2.imshow("input", input_image)
        #     cv2.imshow("result", pretty_blur_map(blur_map))
示例#8
0
    print("Cleaning output directories...")
    cleanDir(FULLFRAME_OUTPUT_DIR)
    cleanDir(CROPPED_OUTPUT_DIR)

    print("Loading cascades...")

    side_view_face_cascade = cv2.CascadeClassifier(
        r"lbpcascade_profileface.xml")

    frontal_face_cascade = cv2.CascadeClassifier(
        r"haarcascade_frontalface_default.xml")

    print("Detecting faces...")

    num_images = 0
    for path in find_images(FRAMES_DIR):
        num_images += 1
        img = cv2.imread(path)
        img2gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        cv2.equalizeHist(img2gray)
        frontal_face_position = frontal_face_cascade\
            .detectMultiScale(img2gray, 1.3, 5)

        if isinstance(frontal_face_position, np.ndarray):
            for (x, y, w, h) in frontal_face_position:
                roi_gray = img2gray[y:y + h, x:x + w]
                roi_color = img[y:y + h, x:x + w]
                cv2.imwrite(
                    CROPPED_OUTPUT_DIR + "/" + str(num_images) + ".png",
                    roi_color)