Ejemplo n.º 1
0
def sort_by_face_yaw(input_path):
    print ("Sorting by face yaw...")
    img_list = []
    for filepath in tqdm( Path_utils.get_image_paths(input_path), desc="Loading"):
        filepath = Path(filepath)
        
        if filepath.suffix != '.png':
            print ("%s is not a png file required for sort_by_face_dissim" % (filepath.name) ) 
            continue
        
        a_png = AlignedPNG.load (str(filepath))
        if a_png is None:
            print ("%s failed to load" % (filepath.name) ) 
            continue
            
        d = a_png.getFaceswapDictData()
        
        if d is None or d['yaw_value'] is None:          
            print ("%s - no embedded data found required for sort_by_face_dissim" % (filepath.name) )
            continue
        
        img_list.append( [str(filepath), np.array(d['yaw_value']) ] )

    print ("Sorting...")
    img_list = sorted(img_list, key=operator.itemgetter(1), reverse=True)
    
    return img_list
Ejemplo n.º 2
0
def sort_by_origname(input_path):
    print ("Sort by original filename...")
    
    img_list = []
    for filepath in tqdm( Path_utils.get_image_paths(input_path), desc="Loading"):
        filepath = Path(filepath)
        
        if filepath.suffix != '.png':
            print ("%s is not a png file required for sort_by_origname" % (filepath.name) ) 
            continue
        
        a_png = AlignedPNG.load (str(filepath))
        if a_png is None:
            print ("%s failed to load" % (filepath.name) ) 
            continue
            
        d = a_png.getFaceswapDictData()
        
        if d is None or d['source_filename'] is None:          
            print ("%s - no embedded data found required for sort_by_origname" % (filepath.name) )
            continue

        img_list.append( [str(filepath), d['source_filename']] )

    print ("Sorting...")
    img_list = sorted(img_list, key=operator.itemgetter(1))
    return img_list
Ejemplo n.º 3
0
def X_LOAD(RAWS):
    sample_list = []

    for s in tqdm(RAWS, desc="Loading"):

        s_filename_path = Path(s.filename)
        if s_filename_path.suffix != '.png':
            print("%s is not a png file required for training" %
                  (s_filename_path.name))
            continue

        a_png = AlignedPNG.load(str(s_filename_path))
        if a_png is None:
            print("%s failed to load" % (s_filename_path.name))
            continue

        d = a_png.getFaceswapDictData()
        if d is None or d['landmarks'] is None or d['yaw_value'] is None:
            print(
                "%s - no embedded faceswap info found required for training" %
                (s_filename_path.name))
            continue

        sample_list.append(
            s.copy_and_set(shape=a_png.get_shape(),
                           landmarks=d['landmarks'],
                           yaw=d['yaw_value']))

    return sample_list
Ejemplo n.º 4
0
    def onClientProcessData(self, data):
        filename_path = Path(data)

        files_processed = 1
        faces_processed = 0

        output_filename_path = self.output_path / filename_path.name
        if self.converter.get_mode(
        ) == ConverterBase.MODE_FACE and filename_path.stem not in self.alignments.keys(
        ):
            if not self.debug:
                print('no faces found for %s, copying without faces' %
                      (filename_path.name))
                shutil.copy(str(filename_path), str(output_filename_path))
        else:
            image = (cv2.imread(str(filename_path)) / 255.0).astype(np.float32)

            if self.converter.get_mode() == ConverterBase.MODE_IMAGE:
                image_landmarks = None
                a_png = AlignedPNG.load(str(filename_path))
                if a_png is not None:
                    d = a_png.getFaceswapDictData()
                    if d is not None and 'landmarks' in d.keys():
                        image_landmarks = np.array(d['landmarks'])

                image = self.converter.convert_image(image, image_landmarks,
                                                     self.debug)
                if self.debug:
                    for img in image:
                        cv2.imshow('Debug convert', img)
                        cv2.waitKey(0)
                faces_processed = 1
            elif self.converter.get_mode() == ConverterBase.MODE_FACE:
                faces = self.alignments[filename_path.stem]
                for face_num, image_landmarks in enumerate(faces):
                    try:
                        if self.debug:
                            print('\nConverting face_num [%d] in file [%s]' %
                                  (face_num, filename_path))

                        image = self.converter.convert_face(
                            image, image_landmarks, self.debug)
                        if self.debug:
                            for img in image:
                                cv2.imshow('Debug convert', img)
                                cv2.waitKey(0)
                    except Exception as e:
                        print(
                            'Error while converting face_num [%d] in file [%s]: %s'
                            % (face_num, filename_path, str(e)))
                        traceback.print_exc()
                faces_processed = len(faces)

            if not self.debug:
                cv2.imwrite(str(output_filename_path),
                            (image * 255).astype(np.uint8))

        return (files_processed, faces_processed)
Ejemplo n.º 5
0
def sort_by_face(input_path):

    print("Sorting by face similarity...")

    img_list = []
    for filepath in tqdm(Path_utils.get_image_paths(input_path),
                         desc="Loading"):
        filepath = Path(filepath)

        if filepath.suffix != '.png':
            print("%s is not a png file required for sort_by_face" %
                  (filepath.name))
            continue

        a_png = AlignedPNG.load(str(filepath))
        if a_png is None:
            print("%s failed to load" % (filepath.name))
            continue

        d = a_png.getFaceswapDictData()

        if d is None or d['landmarks'] is None:
            print("%s - no embedded data found required for sort_by_face" %
                  (filepath.name))
            continue

        img_list.append([str(filepath), np.array(d['landmarks'])])

    img_list_len = len(img_list)
    for i in tqdm(range(0, img_list_len - 1), desc="Sorting", file=sys.stdout):
        min_score = float("inf")
        j_min_score = i + 1
        for j in range(i + 1, len(img_list)):

            fl1 = img_list[i][1]
            fl2 = img_list[j][1]
            score = np.sum(np.absolute((fl2 - fl1).flatten()))

            if score < min_score:
                min_score = score
                j_min_score = j
        img_list[i +
                 1], img_list[j_min_score] = img_list[j_min_score], img_list[i
                                                                             +
                                                                             1]

    return img_list
Ejemplo n.º 6
0
def sort_by_face_dissim(input_path):

    print("Sorting by face dissimilarity...")

    img_list = []
    for filepath in tqdm(Path_utils.get_image_paths(input_path),
                         desc="Loading"):
        filepath = Path(filepath)

        if filepath.suffix != '.png':
            print("%s is not a png file required for sort_by_face_dissim" %
                  (filepath.name))
            continue

        a_png = AlignedPNG.load(str(filepath))
        if a_png is None:
            print("%s failed to load" % (filepath.name))
            continue

        d = a_png.getFaceswapDictData()

        if d is None or d['landmarks'] is None:
            print(
                "%s - no embedded data found required for sort_by_face_dissim"
                % (filepath.name))
            continue

        img_list.append([str(filepath), np.array(d['landmarks']), 0])

    img_list_len = len(img_list)
    for i in tqdm(range(0, img_list_len - 1), desc="Sorting", file=sys.stdout):
        score_total = 0
        for j in range(i + 1, len(img_list)):
            if i == j:
                continue
            fl1 = img_list[i][1]
            fl2 = img_list[j][1]
            score_total += np.sum(np.absolute((fl2 - fl1).flatten()))

        img_list[i][2] = score_total

    print("Sorting...")
    img_list = sorted(img_list, key=operator.itemgetter(2), reverse=True)

    return img_list
Ejemplo n.º 7
0
def sort_by_blur(input_path):
    def estimate_blur(image):
        if image.ndim == 3:
            image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

        blur_map = cv2.Laplacian(image, cv2.CV_64F)
        score = np.var(blur_map)
        return score

    img_list = []
    print("Sorting by blur...")
    for filepath in tqdm(Path_utils.get_image_paths(input_path),
                         desc="Loading"):
        filepath = Path(filepath)

        if filepath.suffix != '.png':
            print("%s is not a png file required for sort_by_face" %
                  (filepath.name))
            continue

        a_png = AlignedPNG.load(str(filepath))
        if a_png is None:
            print("%s failed to load" % (filepath.name))
            continue

        d = a_png.getFaceswapDictData()

        if d is None or d['landmarks'] is None:
            print("%s - no embedded data found required for sort_by_face" %
                  (filepath.name))
            continue
        landmarks = np.array(d['landmarks'])
        img = cv2.imread(
            str(filepath)
        )  #never mask it by face hull, it worse than whole image blur estimate
        img_list.append([str(filepath), estimate_blur(img)])

    print("Sorting...")
    img_list = sorted(img_list, key=operator.itemgetter(1), reverse=True)

    return img_list
Ejemplo n.º 8
0
def restore_origname(input_path):
    print ("Restoring original filename...")
    
    img_list = []
    for filepath in tqdm( Path_utils.get_image_paths(input_path), desc="Loading"):
        filepath = Path(filepath)

        if filepath.suffix != '.png':
            print ("%s is not a png file required for sort_by_origname" % (filepath.name) )
            continue

        a_png = AlignedPNG.load (str(filepath))
        if a_png is None:
            print ("%s failed to load" % (filepath.name) )
            continue

        d = a_png.getFaceswapDictData()

        if d is None or d['source_filename'] is None:
            print ("%s - no embedded data found required for sort_by_origname" % (filepath.name) )
            continue

        dst = os.path.join( input_path, os.path.basename(d['source_filename']) )
        os.rename( filepath, dst )
Ejemplo n.º 9
0
def extract_pass_process(sq, cq):
    e = None
    type = None
    device_idx = None
    debug = False
    output_path = None
    detector = None
    image_size = None
    face_type = None
    while True:
        obj = sq.get()
        obj_op = obj['op']

        if obj_op == 'extract':
            data = obj['data']

            filename_path = Path(data[0])

            if not filename_path.exists():
                cq.put({
                    'op':
                    'error',
                    'close':
                    False,
                    'is_file_not_found':
                    True,
                    'data':
                    obj['data'],
                    'message':
                    'Failed to extract %s, reason: file not found.' %
                    (str(filename_path))
                })
            else:
                try:
                    image = cv2.imread(str(filename_path))

                    if type == 'rects':
                        rects = e.extract_from_bgr(image)
                        cq.put({
                            'op': 'extract_success',
                            'data': obj['data'],
                            'result': [str(filename_path), rects]
                        })

                    elif type == 'landmarks':
                        rects = data[1]
                        landmarks = e.extract_from_bgr(image, rects)
                        cq.put({
                            'op': 'extract_success',
                            'data': obj['data'],
                            'result': [str(filename_path), landmarks]
                        })

                    elif type == 'final':
                        result = []
                        faces = data[1]

                        if debug:
                            debug_output_file = '{}_{}'.format(
                                str(
                                    Path(str(output_path) + '_debug') /
                                    filename_path.stem), 'debug.png')
                            debug_image = image.copy()

                        for (face_idx, face) in enumerate(faces):
                            rect = face[0]
                            image_landmarks = np.array(face[1])
                            image_to_face_mat = facelib.LandmarksProcessor.get_transform_mat(
                                image_landmarks, image_size, face_type)
                            output_file = '{}_{}{}'.format(
                                str(output_path / filename_path.stem),
                                str(face_idx), '.png')

                            if debug:
                                facelib.LandmarksProcessor.draw_rect_landmarks(
                                    debug_image, rect, image_landmarks,
                                    image_size, face_type)

                            face_image = cv2.warpAffine(
                                image, image_to_face_mat,
                                (image_size, image_size))
                            face_image_landmarks = facelib.LandmarksProcessor.transform_points(
                                image_landmarks, image_to_face_mat)

                            cv2.imwrite(output_file, face_image)

                            a_png = AlignedPNG.load(output_file)

                            d = {
                                'type':
                                'face',
                                'landmarks':
                                face_image_landmarks.tolist(),
                                'yaw_value':
                                facelib.LandmarksProcessor.calc_face_yaw(
                                    face_image_landmarks),
                                'pitch_value':
                                facelib.LandmarksProcessor.calc_face_pitch(
                                    face_image_landmarks),
                                'source_filename':
                                filename_path.name,
                                'source_rect':
                                rect,
                                'source_landmarks':
                                image_landmarks.tolist()
                            }
                            a_png.setFaceswapDictData(d)
                            a_png.save(output_file)

                            result.append(output_file)

                        if debug:
                            cv2.imwrite(debug_output_file, debug_image)

                        cq.put({
                            'op': 'extract_success',
                            'data': obj['data'],
                            'result': result
                        })

                except Exception as e:
                    cq.put({
                        'op':
                        'error',
                        'close':
                        True,
                        'data':
                        obj['data'],
                        'message':
                        'Failed to extract %s, reason: %s. \r\n%s' %
                        (str(filename_path), str(e), traceback.format_exc())
                    })
                    break

        elif obj_op == 'init':
            try:
                type = obj['type']
                image_size = obj['image_size']
                face_type = obj['face_type']
                device_idx = obj['device_idx']
                output_path = Path(
                    obj['output_dir']) if 'output_dir' in obj.keys() else None
                debug = obj['debug']
                detector = obj['detector']

                if type == 'rects':
                    if detector is not None:
                        if detector == 'mt':
                            tf = gpufmkmgr.import_tf([device_idx],
                                                     allow_growth=True)
                            tf_session = gpufmkmgr.get_tf_session()
                            keras = gpufmkmgr.import_keras()
                            e = facelib.MTCExtractor(keras, tf, tf_session)
                        elif detector == 'dlib':
                            dlib = gpufmkmgr.import_dlib(device_idx)
                            e = facelib.DLIBExtractor(dlib)
                        e.__enter__()

                elif type == 'landmarks':
                    gpufmkmgr.import_tf([device_idx], allow_growth=True)
                    keras = gpufmkmgr.import_keras()
                    e = facelib.LandmarksExtractor(keras)
                    e.__enter__()
                elif type == 'final':
                    pass

                cq.put({'op': 'init_ok'})
            except Exception as e:
                cq.put({
                    'op':
                    'error',
                    'close':
                    True,
                    'message':
                    'Exception while initialization: %s' %
                    (traceback.format_exc())
                })
                break

    if detector is not None and (type == 'rects' or type == 'landmarks'):
        e.__exit__()
Ejemplo n.º 10
0
def main(input_dir, output_dir, aligned_dir, model_dir, model_name,
         **in_options):
    print("Running converter.\r\n")

    debug = in_options['debug']

    try:
        input_path = Path(input_dir)
        output_path = Path(output_dir)
        aligned_path = Path(aligned_dir)
        model_path = Path(model_dir)

        if not input_path.exists():
            print('Input directory not found. Please ensure it exists.')
            return

        if output_path.exists():
            for filename in Path_utils.get_image_paths(output_path):
                Path(filename).unlink()
        else:
            output_path.mkdir(parents=True, exist_ok=True)

        if not aligned_path.exists():
            print('Aligned directory not found. Please ensure it exists.')
            return

        if not model_path.exists():
            print('Model directory not found. Please ensure it exists.')
            return

        model_sq = multiprocessing.Queue()
        model_cq = multiprocessing.Queue()
        model_lock = multiprocessing.Lock()
        model_p = multiprocessing.Process(target=model_process,
                                          args=(model_name, model_dir,
                                                in_options, model_sq,
                                                model_cq))
        model_p.start()

        while True:
            if not model_cq.empty():
                obj = model_cq.get()
                obj_op = obj['op']
                if obj_op == 'init':
                    converter = obj['converter']
                    break

        alignments = {}
        if converter.get_mode() == ConverterBase.MODE_FACE:
            aligned_path_image_paths = Path_utils.get_image_paths(aligned_path)
            for filename in tqdm(aligned_path_image_paths,
                                 desc="Collecting alignments"):
                a_png = AlignedPNG.load(str(filename))
                if a_png is None:
                    print("%s - no embedded data found." % (filename))
                    continue
                d = a_png.getFaceswapDictData()
                if d is None or d['source_filename'] is None or d[
                        'source_rect'] is None or d['source_landmarks'] is None:
                    print("%s - no embedded data found." % (filename))
                    continue

                source_filename_stem = Path(d['source_filename']).stem
                if source_filename_stem not in alignments.keys():
                    alignments[source_filename_stem] = []

                alignments[source_filename_stem].append(
                    np.array(d['source_landmarks']))

        files_processed, faces_processed = ConvertSubprocessor(
            converter=converter.copy_and_set_predictor(
                model_process_predictor(model_sq, model_cq, model_lock)),
            input_path_image_paths=Path_utils.get_image_paths(input_path),
            output_path=output_path,
            alignments=alignments,
            **in_options).process()

        model_sq.put({'op': 'close'})
        model_p.join()
        '''            
        if model_name == 'AVATAR':
            output_path_image_paths = Path_utils.get_image_paths(output_path)
            
            last_ok_frame = -1
            for filename in output_path_image_paths:
                filename_path = Path(filename)
                stem = Path(filename).stem
                try:
                    frame = int(stem)
                except:
                    raise Exception ('Aligned avatars must be created from indexed sequence files.')
                    
                if frame-last_ok_frame > 1:
                    start = last_ok_frame + 1
                    end = frame - 1
                    
                    print ("Filling gaps: [%d...%d]" % (start, end) )
                    for i in range (start, end+1):                    
                        shutil.copy ( str(filename), str( output_path / ('%.5d%s' % (i, filename_path.suffix ))  ) )
                    
                last_ok_frame = frame
        '''

    except Exception as e:
        print('Error: %s' % (str(e)))
        traceback.print_exc()
Ejemplo n.º 11
0
    def onClientProcessData(self, data):
        filename_path = Path(data[0])

        image = cv2.imread(str(filename_path))
        if image is None:
            print('Failed to extract %s, reason: cv2.imread() fail.' %
                  (str(filename_path)))
        else:
            if self.type == 'rects':
                rects = self.e.extract_from_bgr(image)
                return [str(filename_path), rects]

            elif self.type == 'landmarks':
                rects = data[1]
                landmarks = self.e.extract_from_bgr(image, rects)
                return [str(filename_path), landmarks]

            elif self.type == 'final':
                result = []
                faces = data[1]

                if self.debug:
                    debug_output_file = '{}_{}'.format(
                        str(
                            Path(str(self.output_path) + '_debug') /
                            filename_path.stem), 'debug.png')
                    debug_image = image.copy()

                for (face_idx, face) in enumerate(faces):
                    output_file = '{}_{}{}'.format(
                        str(self.output_path / filename_path.stem),
                        str(face_idx), '.png')

                    rect = face[0]
                    image_landmarks = np.array(face[1])

                    if self.debug:
                        facelib.LandmarksProcessor.draw_rect_landmarks(
                            debug_image, rect, image_landmarks,
                            self.image_size, self.face_type)

                    if self.face_type == FaceType.MARK_ONLY:
                        face_image = image
                        face_image_landmarks = image_landmarks
                    else:
                        image_to_face_mat = facelib.LandmarksProcessor.get_transform_mat(
                            image_landmarks, self.image_size, self.face_type)
                        face_image = cv2.warpAffine(
                            image, image_to_face_mat,
                            (self.image_size, self.image_size),
                            cv2.INTER_LANCZOS4)
                        face_image_landmarks = facelib.LandmarksProcessor.transform_points(
                            image_landmarks, image_to_face_mat)

                    cv2.imwrite(output_file, face_image)

                    a_png = AlignedPNG.load(output_file)

                    d = {
                        'face_type':
                        FaceType.toString(self.face_type),
                        'landmarks':
                        face_image_landmarks.tolist(),
                        'yaw_value':
                        facelib.LandmarksProcessor.calc_face_yaw(
                            face_image_landmarks),
                        'pitch_value':
                        facelib.LandmarksProcessor.calc_face_pitch(
                            face_image_landmarks),
                        'source_filename':
                        filename_path.name,
                        'source_rect':
                        rect,
                        'source_landmarks':
                        image_landmarks.tolist()
                    }
                    a_png.setFaceswapDictData(d)
                    a_png.save(output_file)

                    result.append(output_file)

                if self.debug:
                    cv2.imwrite(debug_output_file, debug_image)

                return result
        return None
Ejemplo n.º 12
0
def main (input_dir, output_dir, aligned_dir, model_dir, model_name, **in_options):
    print ("Running converter.\r\n")
    
    try:
        input_path = Path(input_dir)
        output_path = Path(output_dir)
        aligned_path = Path(aligned_dir)
        model_path = Path(model_dir)
        
        if not input_path.exists():
            print('Input directory not found. Please ensure it exists.')
            return

        if output_path.exists():
            for filename in Path_utils.get_image_paths(output_path):
                Path(filename).unlink()
        else:
            output_path.mkdir(parents=True, exist_ok=True)
            
        if not aligned_path.exists():
            print('Aligned directory not found. Please ensure it exists.')
            return
            
        if not model_path.exists():
            print('Model directory not found. Please ensure it exists.')
            return

        import models 
        model = models.import_model(model_name)(model_path, **in_options)
        converter = model.get_converter(**in_options)
        
        input_path_image_paths = Path_utils.get_image_paths(input_path)
        aligned_path_image_paths = Path_utils.get_image_paths(aligned_path)
        
        alignments = {}
        for filename in tqdm(aligned_path_image_paths, desc= "Collecting alignments" ):
            a_png = AlignedPNG.load( str(filename) )
            if a_png is None:
                print ( "%s - no embedded data found." % (filename) )
                continue
            d = a_png.getFaceswapDictData()
            if d is None or d['source_filename'] is None or d['source_rect'] is None or d['source_landmarks'] is None:
                print ( "%s - no embedded data found." % (filename) )
                continue
            
            source_filename_stem = Path(d['source_filename']).stem
            if source_filename_stem not in alignments.keys():
                alignments[ source_filename_stem ] = []

            alignments[ source_filename_stem ].append ( np.array(d['source_landmarks']) )

        
        for filename in tqdm( input_path_image_paths, desc="Converting"):
            filename_path = Path(filename)
            output_filename_path = output_path / filename_path.name
         
            if filename_path.stem not in alignments.keys():                        
                if not model.is_debug():
                    print ( 'no faces found for %s, copying without faces' % (filename_path.name) )                
                    shutil.copy ( str(filename_path), str(output_filename_path) )                
            else:                    
                image = (cv2.imread(filename) / 255.0).astype('float32')
                faces = alignments[filename_path.stem]
                for image_landmarks in faces:                
                    image = converter.convert(image, image_landmarks, model.is_debug()) 
        
                    if model.is_debug():
                        for img in image:
                            cv2.imshow ('Debug convert', img )
                            cv2.waitKey(0)
                
                if not model.is_debug():
                    cv2.imwrite (str(output_filename_path), (image*255).astype(np.uint8) )
        
        model.finalize()
    except Exception as e:
        print ( 'Error: %s' % (str(e)))
        traceback.print_exc()
Ejemplo n.º 13
0
def main(input_dir, output_dir, aligned_dir, model_dir, model_name,
         **in_options):
    print("Running converter.\r\n")

    debug = in_options['debug']

    try:
        input_path = Path(input_dir)
        output_path = Path(output_dir)
        aligned_path = Path(aligned_dir)
        model_path = Path(model_dir)

        if not input_path.exists():
            print('Input directory not found. Please ensure it exists.')
            return

        if output_path.exists():
            for filename in Path_utils.get_image_paths(output_path):
                Path(filename).unlink()
        else:
            output_path.mkdir(parents=True, exist_ok=True)

        if not aligned_path.exists():
            print('Aligned directory not found. Please ensure it exists.')
            return

        if not model_path.exists():
            print('Model directory not found. Please ensure it exists.')
            return

        aligned_path_image_paths = Path_utils.get_image_paths(aligned_path)

        alignments = {}
        for filename in tqdm(aligned_path_image_paths,
                             desc="Collecting alignments"):
            a_png = AlignedPNG.load(str(filename))
            if a_png is None:
                print("%s - no embedded data found." % (filename))
                continue
            d = a_png.getFaceswapDictData()
            if d is None or d['source_filename'] is None or d[
                    'source_rect'] is None or d['source_landmarks'] is None:
                print("%s - no embedded data found." % (filename))
                continue

            source_filename_stem = Path(d['source_filename']).stem
            if source_filename_stem not in alignments.keys():
                alignments[source_filename_stem] = []

            alignments[source_filename_stem].append(
                np.array(d['source_landmarks']))

        model_sq = multiprocessing.Queue()
        model_cq = multiprocessing.Queue()
        model_lock = multiprocessing.Lock()

        model_p = multiprocessing.Process(target=model_process,
                                          args=(model_name, model_dir,
                                                in_options, model_sq,
                                                model_cq))
        model_p.start()

        while True:
            if not model_cq.empty():
                obj = model_cq.get()
                obj_op = obj['op']
                if obj_op == 'init':
                    converter = obj['converter']
                    break

        files_processed, faces_processed = ConvertSubprocessor(
            converter=converter.copy_and_set_predictor(
                model_process_predictor(model_sq, model_cq, model_lock)),
            input_path_image_paths=Path_utils.get_image_paths(input_path),
            output_path=output_path,
            alignments=alignments,
            debug=debug).start()

        model_sq.put({'op': 'close'})
        model_p.join()

    except Exception as e:
        print('Error: %s' % (str(e)))
        traceback.print_exc()