コード例 #1
0
ファイル: test_tool_set.py プロジェクト: barrettsa/maskgen
 def test_fileMask(self):
     pre = tool_set.openImageFile(self.locateFile('tests/images/prefill.png'))
     post = tool_set.openImageFile(self.locateFile('tests/images/postfill.png'))
     mask,analysis,error = tool_set.createMask(pre,post,invert=False,arguments={'tolerance' : 2500})
     withtolerance = sum(sum(mask.image_array))
     mask.save(self.locateFile('tests/images/maskfill.png'))
     mask, analysis,error = tool_set.createMask(pre, post, invert=False)
     withouttolerance = sum(sum(mask.image_array))
     mask, analysis ,error= tool_set.createMask(pre, post, invert=False, arguments={'tolerance': 2500,'equalize_colors':True})
     mask.save(self.locateFile('tests/images/maskfillt.png'))
     withtoleranceandqu = sum(sum(mask.image_array))
     self.assertTrue(withouttolerance < withtolerance)
     self.assertTrue(withtolerance <= withtoleranceandqu)
コード例 #2
0
ファイル: __init__.py プロジェクト: barrettsa/maskgen
def transform(img, source, target, **kwargs):
    image_to_cover = numpy.asarray(img)
    kernalsize = (int(kwargs['kernelsize']),
                  int(kwargs['kernelsize'])) if 'kernelsize' in kwargs else (5,
                                                                             5)
    kernel = numpy.ones(kernalsize, numpy.uint8)
    if 'inputmaskname' not in kwargs:
        blurred_region = cv2.GaussianBlur(image_to_cover, kernalsize, 0)
        Image.fromarray(blurred_region).save(target)
        return None, None

    mask = tool_set.openImageFile(kwargs['inputmaskname']).to_mask()
    mask_array = numpy.asarray(mask)
    mask_array = cv2.dilate(mask_array, kernel, iterations=5)
    region_to_blur = cv2.bitwise_and(image_to_cover,
                                     image_to_cover,
                                     mask=mask_array)
    #region_to_keep = cv2.bitwise_and(image_to_cover, image_to_cover, mask=255-mask_array)
    blurred_region = cv2.GaussianBlur(region_to_blur, kernalsize, 0)
    mask_array = cv2.erode(mask_array, kernel, iterations=3)
    flipped_mask = 255 - mask_array
    #image_to_cover =cv2.bitwise_or(blurred_region,region_to_keep)
    image_to_cover = numpy.copy(image_to_cover)
    for c in range(0, 3):
        image_to_cover[:, :, c] = \
          image_to_cover[:, :, c] * \
          (flipped_mask[:, :] / 255) + \
          blurred_region[:, :, c] * \
          (mask_array[:, :]/255)
    Image.fromarray(image_to_cover).save(target)
    return None, None
コード例 #3
0
ファイル: __init__.py プロジェクト: barrettsa/maskgen
def transform(img, source, target, **kwargs):
    kernelSize = int(kwargs['kernelSize']) if 'kernelSize' in kwargs else 25
    rgb = img.convert('RGB')
    cv_image = numpy.array(rgb)
    if 'inputmaskname' in kwargs:
        mask = numpy.asarray(
            tool_set.openImageFile(kwargs['inputmaskname']).to_mask())
        mask[mask > 0] == 1
    else:
        mask = numpy.ones(
            (cv_image.shape[0], cv_image.shape[1])).astype('uint8')
    inverted_mask = numpy.ones(
        (cv_image.shape[0], cv_image.shape[1])).astype('uint8')
    inverted_mask[mask == 1] = 0
    side = int(kernelSize**(1 / 2.0))
    psf = numpy.ones((side, side)) / kernelSize
    img = color.rgb2grey(cv_image)
    deconvolved_img = restoration.wiener(img, psf, 1)[0]
    for c in range(3):
        cv_image[:, :,
                 c] = deconvolved_img * cv_image[:, :,
                                                 c] * mask + cv_image[:, :,
                                                                      c] * inverted_mask
    Image.fromarray(cv_image, 'RGB').save(target)
    return {'Blur Type': 'Wiener'}, None
コード例 #4
0
ファイル: test_tool_set.py プロジェクト: barrettsa/maskgen
 def testCropCompare(self):
     import cv2
     pre = tool_set.openImageFile(self.locateFile('tests/images/prefill.png')).to_array()
     post = pre[10:-10,10:-10]
     resized_post = cv2.resize(post, (pre.shape[1],pre.shape[0]))
     mask, analysis = tool_set.cropResizeCompare(pre,resized_post, arguments={'crop width':pre.shape[1]-20,'crop height':pre.shape[0]-20})
     self.assertEquals((10,10), tool_set.toIntTuple(analysis['location']))
コード例 #5
0
ファイル: __init__.py プロジェクト: barrettsa/maskgen
def transform(img,source,target,**kwargs):
    kernelSize = 25
    mask = numpy.asarray(tool_set.openImageFile(kwargs['inputmaskname']).to_mask())
    rgb = img.convert('RGB')
    cv_image = numpy.array(rgb)
    blur_image = cv2.medianBlur(cv_image,kernelSize)
    cv_image_copy = numpy.copy(cv_image)
    cv_image_copy[mask == 255] = blur_image[mask == 255]
    Image.fromarray(cv_image_copy,'RGB').save(target)
    return None,None
コード例 #6
0
ファイル: seam_carving.py プロジェクト: barrettsa/maskgen
    def __init__(self,
                 filename,
                 shape=None,
                 mask_filename=None,
                 energy_function=ScharrEnergyFunc(),
                 keep_size=False,
                 seam_function=base_energy_function):
        # initialize parameter
        self.filename = filename
        self.keep_size = keep_size

        # read in image and store as np.float64 format
        img = tool_set.openImageFile(filename).to_array()
        self.img_type = img.dtype
        self.image = img.astype(np.float64)
        if shape is None:
            self.shape = (self.image.shape[0], self.image.shape[1])
        else:
            self.shape = shape
        self.energy_function = energy_function
        self.seam_function = seam_function

        self.protected = np.ones(
            (self.image.shape[0], self.image.shape[1])).astype(np.float64)
        self.removal = np.ones(
            (self.image.shape[0], self.image.shape[1])).astype(np.float64)
        self.mask_tracker = MaskTracker(
            (self.image.shape[0], self.image.shape[1]))

        if mask_filename is not None:
            mask = tool_set.openImageFile(mask_filename).to_array()
            self.protected[mask[:, :, 1] > 2] = 1000.0
            self.removal[mask[:, :, 0] > 2] = -1000.0

        self.narrow_bounds = True

        # kernel for forward energy map calculation
        self.kernel_x = np.array([[0., 0., 0.], [-1., 0., 1.], [0., 0., 0.]],
                                 dtype=np.float64)
        self.kernel_y_left = np.array(
            [[0., 0., 0.], [0., 0., 1.], [0., -1., 0.]], dtype=np.float64)
        self.kernel_y_right = np.array(
            [[0., 0., 0.], [1., 0., 0.], [0., -1., 0.]], dtype=np.float64)
コード例 #7
0
ファイル: __init__.py プロジェクト: barrettsa/maskgen
def transform(img, source, target, **kwargs):
    mask = numpy.asarray(
        tool_set.openImageFile(kwargs['inputmaskname']).to_mask())
    source_im = numpy.asarray(tool_set.openImageFile(source))
    paste_x = int(kwargs['paste_x'])
    paste_y = int(kwargs['paste_y'])
    x, y, w, h = widthandheight(mask)
    w += w % 2
    h += h % 2
    image_to_cover = numpy.copy(source_im)
    flipped_mask = 255 - mask
    for c in range(0, source_im.shape[2]):
        image_to_cover[paste_y:paste_y + h, paste_x:paste_x + w, c] = \
            image_to_cover[paste_y:paste_y + h, paste_x:paste_x + w, c] * \
            (flipped_mask[y:y + h, x:x + w] / 255) + \
            image_to_cover[y:y + h, x:x + w, c] * \
            (mask[y:y + h, x:x + w] / 255)
    target_im = image_wrap.ImageWrapper(image_to_cover)
    target_im.save(target)
    return {'purpose': 'clone'}, None
コード例 #8
0
 def __init__(self, base_image, **args):
     if type(base_image) == np.ndarray:
         self.base = base_image
         self.name = "base image"
     elif isinstance(base_image, ImageWrapper):
         self.base = base_image.image_array
         self.name = os.path.basename(base_image.filename)
     elif type(base_image) == str:
         wrapper = openImageFile(base_image)
         self.base = wrapper.image_array
         self.name = os.path.basename(wrapper.filename)
     else:
         raise ValueError("Invalid image type: {0}".format(
             type(base_image)))
コード例 #9
0
ファイル: __init__.py プロジェクト: barrettsa/maskgen
def transform(img, source, target, **kwargs):
    source_im = numpy.asarray(img)
    mask = tool_set.openImageFile(kwargs['inputmaskname']).to_mask()
    mask_array = numpy.asarray(mask)
    black_image = source_im.copy()
    new_im = source_im.copy()
    black_image[:, :] = (0, 0, 0)
    cv2.bitwise_and(source_im, black_image, new_im, mask_array)
    save_im = image_wrap.ImageWrapper(new_im)
    maskfd, maskfile = tempfile.mkstemp(suffix='.png')
    os.close(maskfd)
    save_im.save(maskfile)
    target_im = None
    try:
        lqrCommandLine = [
            'gmic', maskfile, '-gimp_inpaint_patchmatch',
            ' 0,9,10,5,1,0,0,0,0,3,0', '-o', target
        ]
        pcommand = subprocess.Popen(" ".join(lqrCommandLine),
                                    shell=True,
                                    stdout=subprocess.PIPE,
                                    stderr=subprocess.PIPE)
        stdout, stderr = pcommand.communicate()
        if pcommand.returncode == 0:
            target_im = numpy.asarray(tool_set.openImageFile(target))
        else:
            logging.getLogger('maskgen').error(
                'Failure of Remove (inpainting) plugin {}'.format(str(stderr)))
    except Exception as e:
        logging.getLogger('maskgen').error(
            'Failure of Remove (inpainting) plugin {}'.format(str(e)))
    os.remove(maskfile)
    if target_im is None:
        target_im = cv2.inpaint(source_im, mask_array, 3, cv2.INPAINT_TELEA)
    save_im = image_wrap.ImageWrapper(target_im)
    save_im.save(target)
    return {'purpose': 'remove'}, None
コード例 #10
0
ファイル: core.py プロジェクト: j-h-m/Media-Journaling-Tool
    def check_input_mask(edge, op, graph, frm, to):
        inputmaskname = edge[
            'inputmaskname'] if 'inputmaskname' in edge else None

        if inputmaskname is not None and len(inputmaskname) > 0 and \
                not os.path.exists(os.path.join(graph.dir, inputmaskname)):
            return [
                ValidationMessage(
                    Severity.ERROR, frm, to,
                    "Input mask file {} is missing".format(inputmaskname),
                    'Input Mask', repairMask)
            ]
        if inputmaskname is not None and len(inputmaskname) > 0 and \
                os.path.exists(os.path.join(graph.dir, inputmaskname)):
            ft = fileType(os.path.join(graph.dir, inputmaskname))
            if ft == 'audio':
                return []
            inputmask = openImage(os.path.join(graph.dir, inputmaskname))
            if inputmask is None:
                return [
                    ValidationMessage(
                        Severity.ERROR, frm, to,
                        "Input mask file {} is missing".format(inputmaskname),
                        'Input Mask', repairMask if ft == 'image' else None)
                ]
            inputmask = inputmask.to_mask().to_array()
            mask = openImageFile(os.path.join(
                graph.dir, edge['maskname'])).invert().to_array()
            if inputmask.shape != mask.shape:
                return [
                    ValidationMessage(
                        Severity.ERROR, frm, to,
                        'input mask name parameter has an invalid size',
                        'Input Mask', repairMask if ft == 'image' else None)
                ]
        return []
コード例 #11
0
def transform(img, source, target, **kwargs):
    mask = numpy.asarray(
        tool_set.openImageFile(kwargs['inputmaskname']).to_mask())
    img_array = img.to_array()
    result = None
    if len(img_array.shape) == 2:
        # grey
        result = numpy.zeros(
            (img_array.shape[0], img_array.shape[1], 2)).astype('uint8')
        result[:, :, 0] = img_array
        result[:, :, 1] = mask
    elif len(img_array.shape) == 3:
        if img_array.shape[2] == 4:
            result = img_array
            result[:, :, 4] = mask
        else:
            result = numpy.zeros(
                (img_array.shape[0], img_array.shape[1], 4)).astype('uint8')
            result[:, :, 0] = img_array[:, :, 0]
            result[:, :, 1] = img_array[:, :, 1]
            result[:, :, 2] = img_array[:, :, 2]
            result[:, :, 3] = mask
    ImageWrapper(result).save(target)
    return None, None
コード例 #12
0
def saveAsPng(source, target):
    openImageFile(source, args={
        'Bits per Channel': 16
    }).save(target, format='PNG')
コード例 #13
0
    def align(self, image, warp_mode=cv2.MOTION_TRANSLATION):
        if type(image) == np.ndarray:
            m = "Aligning image to " + self.name
            second = image
        elif isinstance(image, ImageWrapper):
            m = "Aligning {0} to {1}".format(os.path.basename(image.filename),
                                             self.name)
            second = image.image_array
        elif type(image) == str:
            second_wrapper = openImageFile(image)
            second = second_wrapper.image_array
            m = "Aligning {0} to {1}".format(second_wrapper.filename,
                                             self.name)
        else:
            raise ValueError("Invalid image type: {0}".format(type(image)))
        self.logger.debug(m)

        if self.base.shape != second.shape:
            raise ValueError("Shape Mismatch Error: {0} {1}".format(
                self.base.shape, second.shape))

        # Can use self.base or image when referring to shape, using self.base for consistency
        final = np.empty(self.base.shape)
        alpha = self.base.shape[-1] % 2 == 0 and len(self.base.shape) > 2
        if alpha:
            final[:, :, -1] = image[:, :, -1]

        for channel in range(
                0,
                self.base.shape[-1] if not alpha else self.base.shape[-1] - 1):

            if warp_mode == cv2.MOTION_HOMOGRAPHY:
                warp_matrix = np.eye(3, 3, dtype=np.float32)
            else:
                warp_matrix = np.eye(2, 3, dtype=np.float32)

            number_of_iterations = 5000
            termination_eps = 1e-10

            # Define termination criteria
            criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
                        number_of_iterations, termination_eps)

            # Run the ECC algorithm. The results are stored in warp_matrix.
            (cc, warp_matrix) = cv2.findTransformECC(self.base[:, :, channel],
                                                     second[:, :, channel],
                                                     warp_matrix, warp_mode,
                                                     criteria)
            if warp_mode == cv2.MOTION_HOMOGRAPHY:
                # Use warpPerspective for Homography
                cfinal = cv2.warpPerspective(
                    second[:, :, channel],
                    warp_matrix, (self.base.shape[1], self.base.shape[0]),
                    flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP)
            else:
                # Use warpAffine for Translation, Euclidean and Affine
                cfinal = cv2.warpAffine(
                    second[:, :, channel],
                    warp_matrix, (self.base.shape[1], self.base.shape[0]),
                    flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP)
            final[:, :, channel] = cfinal

        return final
コード例 #14
0
ファイル: seam_carving.py プロジェクト: barrettsa/maskgen
 def read_dropped_mask(self, filename):
     self.dropped_mask = tool_set.openImageFile(filename).to_array() / 255