Ejemplo n.º 1
0
 def create(self, arguments={}, invert=False):
     import numpy as np
     if getValue(arguments, 'homography', 'None') == 'None':
         if self.startIm.has_alpha():
             img_array = np.asarray(self.startIm)
             mask = np.copy(img_array[:, :, 3])
             #accept the alpha channel as what is kept
             mask[mask > 0] = 255
             #invert since 0 in the donor mask indicates the donor pixels
             return ImageWrapper(mask).invert()
         # use the pre select mask (inverted) as the selection...invert what was removed to be what is kept
         return _pre_select_mask(self.graph, self.donor_start, self.startIm)
     mask = self.graph.get_edge_image(self.parent_of_end, self.donor_end,
                                      'arguments.pastemask')
     if mask is None:
         mask = self.graph.get_edge_image(self.parent_of_end,
                                          self.donor_end, 'maskname')
     mask, analysis = interpolateMask(mask,
                                      self.startIm,
                                      self.destIm,
                                      arguments=arguments,
                                      invert=invert)
     if mask is not None and mask.shape != (0, 0):
         mask = ImageWrapper(mask)
     else:
         mask = None
     return mask
Ejemplo n.º 2
0
def transform(img, source, target, **kwargs):
    img_to_paste = openImageFile(kwargs['donor'])
    pasteregionsize = kwargs['region size'] if 'region size' in kwargs else 1.0
    approach = kwargs['approach'] if 'approach' in kwargs else 'simple'
    segment_algorithm = kwargs[
        'segment'] if 'segment' in kwargs else 'felzenszwalb'

    if pasteregionsize < 1.0:
        dims = (int(img.size[1] * pasteregionsize),
                int(img.size[0] * pasteregionsize))
    else:
        dims = (img.size[1], img.size[0])
    x = (img.size[1] - dims[0]) / 2
    y = (img.size[0] - dims[1]) / 2
    imgarray = np.asarray(img)
    if len(imgarray.shape) > 2:
        newimg = imgarray[x:dims[0] + x, y:dims[1] + y, :]
    else:
        newimg = imgarray[x:dims[0] + x, y:dims[1] + y]

    transform_matrix, out = performPaste(ImageWrapper(newimg), img_to_paste,
                                         approach, segment_algorithm)
    if pasteregionsize < 1.0:
        out2 = np.copy(imgarray)
        if len(imgarray.shape) > 2:
            out2[x:dims[0] + x, y:dims[1] + y, :] = out
        else:
            out2[x:dims[0] + x, y:dims[1] + y] = out
        out = out2
    ImageWrapper(out).save(target)
    return {
        'transform matrix': tool_set.serializeMatrix(transform_matrix)
    } if transform_matrix is not None else None, None
Ejemplo n.º 3
0
    def test_global_transform_analysis(self):
        from maskgen.image_wrap import ImageWrapper
        analysis = {}
        mask = np.random.randint(0, 2, (1000, 1000), dtype=np.uint8)
        mask[mask > 0] = 255
        tool_set.globalTransformAnalysis(analysis,
                                         ImageWrapper(mask),
                                         ImageWrapper(mask),
                                         mask=mask,
                                         linktype='image.image',
                                         arguments={},
                                         directory='.')
        self.assertEquals('yes', analysis['global'])

        mask = np.zeros((1000, 1000), dtype=np.uint8)
        mask[0:30, 0:30] = 255
        tool_set.globalTransformAnalysis(analysis,
                                         ImageWrapper(mask),
                                         ImageWrapper(mask),
                                         mask=mask,
                                         linktype='image.image',
                                         arguments={},
                                         directory='.')
        self.assertEquals('no', analysis['global'])
        self.assertEquals('small', analysis['change size category'])
        mask = np.zeros((1000, 1000), dtype=np.uint8)
        mask[0:75, 0:75] = 255
        tool_set.globalTransformAnalysis(analysis,
                                         ImageWrapper(mask),
                                         ImageWrapper(mask),
                                         mask=mask,
                                         linktype='image.image',
                                         arguments={},
                                         directory='.')
        self.assertEquals('no', analysis['global'])
        self.assertEquals('medium', analysis['change size category'])
        mask[0:100, 0:100] = 255
        tool_set.globalTransformAnalysis(analysis,
                                         ImageWrapper(mask),
                                         ImageWrapper(mask),
                                         mask=mask,
                                         linktype='image.image',
                                         arguments={},
                                         directory='.')
        self.assertEquals('no', analysis['global'])
        self.assertEquals('large', analysis['change size category'])
        tool_set.globalTransformAnalysis(analysis,
                                         ImageWrapper(mask),
                                         ImageWrapper(mask),
                                         mask=mask,
                                         linktype='image.image',
                                         arguments={},
                                         directory='.')
Ejemplo n.º 4
0
def carveSeams(source,
               target,
               shape,
               mask_filename,
               approach='backward',
               energy='Sobel',
               keep_size=False):
    """
    :param img:
    :return:
    @type img: ImageWrapper
    """
    import traceback
    import sys
    try:
        sc = SeamCarver(source,
                        shape=shape,
                        energy_function=SobelFunc()
                        if energy == 'Sobel' else ScharrEnergyFunc(),
                        mask_filename=mask_filename,
                        keep_size=keep_size,
                        seam_function=foward_base_energy_function
                        if approach == 'forward' else base_energy_function)
        image, mask = sc.remove_seams()
    except IndexError as ex:
        texc_type, exc_value, exc_traceback = sys.exc_info()
        traceback.print_tb(exc_traceback, limit=10, file=sys.stdout)
        raise ex
    maskname = os.path.join(
        os.path.dirname(source),
        shortenName(os.path.basename(source),
                    '_real_mask.png',
                    identifier=uniqueId()))
    adjusternames = os.path.join(
        os.path.dirname(source),
        shortenName(os.path.basename(source), '.png', identifier=uniqueId()))
    finalmaskname = os.path.join(
        os.path.dirname(source),
        shortenName(os.path.basename(source),
                    '_final_mask.png',
                    identifier=uniqueId()))
    ImageWrapper(mask).save(os.path.join(os.path.dirname(source), maskname))
    adjusternames_row, adjusternames_col = sc.mask_tracker.save_adjusters(
        adjusternames)
    sc.mask_tracker.save_neighbors_mask(finalmaskname)
    ImageWrapper(image).save(target)
    return {
        'neighbor mask': finalmaskname,
        'column adjuster': adjusternames_col,
        'row adjuster': adjusternames_row,
        'plugin mask': maskname
    }
Ejemplo n.º 5
0
def transform(img, source, target, **kwargs):
    finalimage = openImageFile(kwargs['Final Image'])
    output = None
    if 'inputmaskname' not in kwargs:
        pastemask, analsys, error = createMask(img, finalimage)
        if error:
            logging.getLogger('maskgen').error("Error creating inputmask " +
                                               error)
        splits = os.path.split(source)
        pastemask.invert()
        pastemask.save(splits[0] + '_inputmask.png')
        pastemask = pastemask.to_array()
        output = {'inputmaskname': splits[0] + '_inputmask.png'}
    else:
        pastemask = openImageFile(kwargs['inputmaskname']).to_array()
    finalimage = finalimage.to_array()
    sourceimg = np.copy(img.to_array()).astype('float')
    if len(pastemask.shape) > 2:
        if pastemask.shape[2] > 3:
            mult = pastemask[:, :, 3] / 255.0
        else:
            mult = pastemask[:, :, 1] / 255.0
    else:
        mult = pastemask / 255.0
    for dim in range(sourceimg.shape[2]):
        sourceimg[:,:,dim] = \
             (sourceimg[:,:,dim]*(1.0-mult)).astype('uint8') + \
             (finalimage[:,:,dim]*(mult)).astype('uint8')
    ImageWrapper(sourceimg.astype('uint8')).save(target)
    return output, None
Ejemplo n.º 6
0
def transform(img, source, target, **kwargs):
    # NOTE: arguments passed on AS IS!!
    im = openImageFile(source, args=kwargs)
    imarray = np.array(im)
    #deal with grayscale image
    if len(imarray.shape) == 2:
        w, h = imarray.shape
        ret = np.empty((w, h, 3), dtype=np.uint8)
        ret[:, :, :] = imarray[:, :, np.newaxis]
        imarray = ret

    analysis = {}
    if 'Crop' in kwargs and kwargs['Crop'] == 'yes':
        dims = getExifDimensions(source, crop=True)
        if len(dims) > 0 and imarray.shape[0] != dims[0][0]:
            h = int(imarray.shape[0] - dims[0]) / 2
            w = int(imarray.shape[1] - dims[1]) / 2
            imarray = imarray[h:-h, w:-w]
            analysis['location'] = str((h, w))
    if 'Image Rotated' in kwargs and kwargs['Image Rotated'] == 'yes':
        orientation = exif.getOrientationFromExif(source)
        if orientation is not None:
            analysis.update(exif.rotateAnalysis(orientation))
            imarray = exif.rotateAccordingToExif(imarray,
                                                 orientation,
                                                 counter=True)
    ImageWrapper(imarray).save(target, format='PNG')
    analysis['Image Rotated'] = 'yes' if 'rotation' in analysis else 'no'
    return analysis, None
Ejemplo n.º 7
0
def transform(img, source, target, **kwargs):
    pixelWidth = int(kwargs['width'])
    pixelHeight = int(kwargs['height'])
    ImageWrapper(
        resizeImage(img.to_array(), (pixelHeight, pixelWidth),
                    kwargs['interpolation'])).save(target)
    return None, None
Ejemplo n.º 8
0
    def test_image_donor(self):
        import numpy as np
        from maskgen.image_wrap import ImageWrapper
        graph = Mock()

        def lkup_preds(x):
            return {'b': ['a'], 'e': ['d']}[x]

        def lkup_edge(x, y):
            return \
            {'ab': {'op': 'NoSelect'}, 'de': {'op': 'SelectRegion'}}[
                x + y]

        withoutalpha = ImageWrapper(np.zeros((400, 400, 3), dtype=np.uint8))
        withAlpha = ImageWrapper(np.zeros((400, 400, 4), dtype=np.uint8))
        mask = ImageWrapper(np.ones((400, 400), dtype=np.uint8) * 255)
        mask.image_array[0:30, 0:30] = 0
        withAlpha.image_array[0:30, 0:30, 3] = 255

        graph.predecessors = lkup_preds
        graph.get_edge = lkup_edge
        graph.dir = '.'
        graph.get_edge_image = Mock(return_value=mask)

        donor = InterpolateDonor(
            graph, 'e', 'f', 'x',
            (withoutalpha, self.locateFile('tests/videos/sample1.mov')),
            (withAlpha, self.locateFile('tests/videos/sample1.mov')))
        mask = donor.create(arguments={})
        self.assertTrue(np.all(mask.image_array[0:30, 0:30] == 255))
        self.assertEquals(900, np.sum((mask.image_array / 255)))

        donor = InterpolateDonor(
            graph, 'b', 'c', 'x',
            (withoutalpha, self.locateFile('tests/videos/sample1.mov')),
            (withAlpha, self.locateFile('tests/videos/sample1.mov')))
        mask = donor.create(arguments={})
        self.assertIsNone(mask)

        donor = InterpolateDonor(
            graph, 'b', 'c', 'x',
            (withAlpha, self.locateFile('tests/videos/sample1.mov')),
            (withAlpha, self.locateFile('tests/videos/sample1.mov')))
        mask = donor.create(arguments={})
        self.assertTrue(np.all(mask.image_array[0:30, 0:30] == 0))
        self.assertEquals(159100, np.sum((mask.image_array / 255)))
Ejemplo n.º 9
0
def transform(img, source, target, **kwargs):
    areaConstraints = (int(kwargs['area.lower.bound']) if 'area.lower.bound'
                       in kwargs else 0, int(kwargs['area.upper.bound'])
                       if 'area.upper.bound' in kwargs else sys.maxint)
    annotation, mask = createMaskImageWithParams(
        np.asarray(img), source, kwargs, areaConstraint=areaConstraints)
    ImageWrapper(mask).save(target)
    return {'subject': annotation}, None
Ejemplo n.º 10
0
def transform(img, source, target, **kwargs):
    pixelWidth = int(kwargs['pixel_width'])
    pixelHeight = int(kwargs['pixel_height'])
    x = int(kwargs['crop_x'])
    y = int(kwargs['crop_y'])
    cv_image = numpy.array(img)
    new_img = cv_image[y:-(pixelHeight - y), x:-(pixelWidth - x), :]
    ImageWrapper(new_img).save(target)
    return None, None
Ejemplo n.º 11
0
def transform(img, source, target, **kwargs):
    channel_map = {"red": 0, "green": 1, "blue": 2}
    donor = kwargs['mask'] if 'mask' in kwargs else source
    channel_name = kwargs['channel'] if 'channel' in kwargs else "green"
    img = openImageFile(donor)
    color_im = np.zeros((img.size[1], img.size[0], 3), dtype=np.uint8)
    color_im[:, :, channel_map[channel_name]] = img.image_array
    ImageWrapper(color_im).save(target)
    return None, None
Ejemplo n.º 12
0
 def xtest_mask_gen(self):
     from maskgen import tool_set
     from maskgen.image_wrap import ImageWrapper
     aorig = image_wrap.openImageFile('tests/images/0c5a0bed2548b1d77717b1fb4d5bbf5a-TGT-17-CLONE.png')
     a = aorig.convert('YCbCr')
     borig = image_wrap.openImageFile('tests/images/0c5a0bed2548b1d77717b1fb4d5bbf5a-TGT-18-CARVE.png')
     b = borig.convert('YCbCr')
     mask = tool_set._tallySeam((a.to_array()[:, :, 0]),
                                 (b.to_array()[20:, :, 0]))
     ImageWrapper(mask).save('seam_mask.png')
Ejemplo n.º 13
0
def transform(img, source, target, **kwargs):
    # source = zip of images
    if 'Registration Type' in kwargs:
        reg_type = kwargs['Registration Type']
    else:
        reg_type = 'ECC'
    zipf = ZipCapture(source)
    imgs = []
    logger = logging.getLogger("maskgen")

    retrieved, zip_image = zipf.read()
    if not retrieved:
        raise ValueError("Zip File {0} is empty".format(
            os.path.basename(source)))

    registrar = {
        'ECC': OpenCVECCRegistration(os.path.join(zipf.dir, zipf.names[0]))
    }
    reg_tool = registrar[reg_type]

    if 'Image Rotated' in kwargs and kwargs['Image Rotated'] == 'yes':
        try:
            orientation = getValue(zipf.get_exif(), 'Orientation', None)
        except KeyError:
            orientation = None
    else:
        orientation = None

    logger.debug("Beginning image alignment for " + os.path.basename(source))
    while retrieved:
        aligned = reg_tool.align(zip_image)
        imgs.append(aligned)
        retrieved, zip_image = zipf.read()
    logger.debug(os.path.basename(source) + " alignment complete")

    if not imgs:
        return None, False

    stacks = np.stack(np.asarray(imgs))
    median_img = np.median(stacks, 0)

    analysis = {'Merge Operation': 'Median Pixel'}
    if orientation is not None:
        analysis.update(exif.rotateAnalysis(orientation))
        median_img = exif.rotateAccordingToExif(median_img,
                                                orientation,
                                                counter=True)

    ImageWrapper(median_img).save(target, format='PNG')
    analysis['Image Rotated'] = 'yes' if 'rotation' in analysis else 'no'

    return analysis, None
Ejemplo n.º 14
0
def open_heic(filename, isMask=False):
    from wand.image import Image as WandImage
    from PIL import Image
    import numpy as np
    from io import BytesIO
    from maskgen.image_wrap import ImageWrapper
    depthmap = {'8': 'uint8', '16':'uint16', '32':'uint32'}
    with WandImage(filename=filename) as wand_img:
       with wand_img.convert(format='bmp') as img:
           img_buffer = np.asarray(bytearray(img.make_blob()), dtype=depthmap[str(img.depth)])
           bytesio = BytesIO(img_buffer)
           pilImage = Image.open(fp=bytesio)
           return ImageWrapper(np.asarray(pilImage), mode=pilImage.mode, info=pilImage.info, to_mask=isMask, filename=filename)
Ejemplo n.º 15
0
def transform(img, source, target, **kwargs):
    cv_image = img.to_array()
    shape = cv_image.shape
    percentageWidth = float(kwargs['percentage_width'])
    percentageHeight = float(kwargs['percentage_height'])
    pixelWidth = int(shape[1] * percentageWidth)
    pixelHeight = int(shape[0] * percentageHeight)
    pixelWidth = pixelWidth - pixelWidth % 8
    pixelHeight = pixelHeight - pixelHeight % 8
    ImageWrapper(
        resizeImage(cv_image, (pixelHeight, pixelWidth),
                    kwargs['interpolation'])).save(target)
    return None, None
Ejemplo n.º 16
0
def pasteAnywhere(img, img_to_paste, mask_of_image_to_paste, simple):
    # get gravity center for rotation
    w, h, area, cx_gra, cy_gra = minimum_bounding_box(mask_of_image_to_paste)

    if not simple:
        # use gravity center to rotate
        rot_mat = build_random_transform(img_to_paste, mask_of_image_to_paste,
                                         (cx_gra, cy_gra))
        img_to_paste = cv2.warpAffine(
            img_to_paste, rot_mat,
            (img_to_paste.shape[1], img_to_paste.shape[0]))
        mask_of_image_to_paste = cv2.warpAffine(
            mask_of_image_to_paste, rot_mat,
            (img_to_paste.shape[1], img_to_paste.shape[0]))
        # x,y is the Geometry center(gravity center), which can't align to the crop center(bounding box center)
        w, h, area, cx, cy = minimum_bounding_box(mask_of_image_to_paste)
    else:
        rot_mat = np.array([[1, 0, 0], [0, 1, 0]]).astype('float')

    # To calculate the bbox center
    x, y, w1, h1 = tool_set.widthandheight(mask_of_image_to_paste)

    if img.size[0] < w + 4:
        w = img.size[0] - 2
        xplacement = w / 2 + 1
    else:
        xplacement = random.randint(w / 2 + 1, img.size[0] - w / 2 - 1)

    if img.size[1] < h + 4:
        h = img.size[1] - 2
        yplacement = h / 2 + 1
    else:
        yplacement = random.randint(h / 2 + 1, img.size[1] - h / 2 - 1)

    output_matrix = np.eye(3, dtype=float)

    for i in range(2):
        for j in range(2):
            output_matrix[i, j] = rot_mat[i, j]

    # That is the correct offset
    output_matrix[0, 2] = rot_mat[0, 2] + xplacement - x - w1 / 2
    output_matrix[1, 2] = rot_mat[1, 2] + yplacement - y - h1 / 2

    return output_matrix, tool_set.place_in_image(
        ImageWrapper(img_to_paste).to_mask().to_array(),
        img_to_paste,
        np.asarray(img),
        (xplacement, yplacement),
        # x,y have no use
        rect=(x, y, w, h))
Ejemplo n.º 17
0
def test_two_images(self):
    from maskgen import tool_set
    from maskgen.image_wrap import ImageWrapper
    aorig = image_wrap.openImageFile('tests/images/0c5a0bed2548b1d77717b1fb4d5bbf5a-TGT-17-CLONE.png')
    a = aorig.convert('YCbCr')
    borig = image_wrap.openImageFile('tests/images/0c5a0bed2548b1d77717b1fb4d5bbf5a-TGT-18-CARVE.png')
    b = borig.convert('YCbCr')
    index = CSHSingleIndexer()
    index.init(number_of_tables=2, length_of_tables=6)
    index.hash_images(ImageWrapper(a.to_array()[:, :, 0]),
                                                ImageWrapper(b.to_array()[:, :, 0]))

    collector = ImageLabel()
    analysis = {}
    src_dst_pts = tool_set.getMatchedSIFeatures()
    data_set, labels = find_lines(src_dst_pts[0], src_dst_pts[1])

    label_set = set(np.unique(labels))
    label_set = set(label_set).difference(set([0, 1]))
    dist = 125 / len(label_set)
    label_map = {}
    i = 0
    for label in np.unique(labels):
        if label >= 0:
            label_map[label] = 124 + i * dist
            i += 1
    amask = np.zeros(a.to_array().shape, dtype=np.uint8)
    bmask = np.zeros(b.to_array().shape, dtype=np.uint8)

    for i in range(len(data_set)):
        result = data_set[i]
        if labels[i] >= 0:
            amask[max(int(result[2][0][0]) - 5, 0):min(int(result[2][0][0]) + 5, amask.shape[0]),
            max(int(result[2][0][1]) - 5, 0):min(int(result[2][0][1]) + 5, amask.shape[1]), :] = label_map[labels[i]]
            bmask[max(int(result[3][0][0]) - 5, 0):min(int(result[3][0][0]) + 5, amask.shape[0]),
            max(int(result[3][0][1]) - 5, 0):min(int(result[3][0][1]) + 5, amask.shape[1]), :] = label_map[labels[i]]
    ImageWrapper(amask).save('amask.png')
    ImageWrapper(bmask).save('bmask.png')
Ejemplo n.º 18
0
 def test_wh_generation(self):
     from  maskgen.image_wrap import ImageWrapper
     seq2 = gen_wh(4).astype(int)
     self.assertEqual(seq2.shape[0], 16)
     im = np.copy(seq2)
     im[im < 0 ]= 0
     im[im > 0] =255
     ImageWrapper(im.astype('uint8')).save('foo.png')
     for i in range(4):
         for j in range(4):
             quadsum = sum(sum(seq2[i*4:i*4+4,j*4:j*4+4]))
             if (i,j)==(0,0):
                 self.assertEqual(16,quadsum, msg=str((i,j)))
             else:
                 self.assertEqual(0, quadsum,msg=str((i,j)))
Ejemplo n.º 19
0
def transform(img, source, target, **kwargs):
    cv_image = numpy.array(img)
    shape = cv_image.shape
    snapto8 = 'eightbit_boundary' in kwargs and kwargs[
        'eightbit_boundary'] == 'yes'
    percentageWidth = float(kwargs['percentage_width'])
    percentageHeight = float(kwargs['percentage_height'])
    pixelWidth = int(shape[1] * percentageWidth)
    pixelHeight = int(shape[0] * percentageHeight)
    # only percentages are too big
    if pixelHeight > shape[0] / 2:
        pixelHeight = shape[0] / 2 - 8
    if pixelWidth > shape[1] / 2:
        pixelWidth = shape[1] / 2 - 8

    start_y = 0
    end_y = shape[0]
    start_x = 0
    end_x = shape[1]
    if snapto8:
        pixelWidth = (pixelWidth + (8 - pixelWidth % 8))
        pixelHeight = (pixelHeight + (8 - pixelHeight % 8))

    # case where width is not cropped
    if pixelWidth > 0:
        if snapto8:
            start_x = randint(1, pixelWidth / 8 -
                              1) * 8 if pixelWidth > 8 else 0
        else:
            start_x = randint(1, pixelWidth - 1) if pixelWidth > 1 else 0
        end_x = -(pixelWidth - start_x)

    if pixelHeight > 0:
        if snapto8:
            start_y = randint(1, pixelHeight / 8 -
                              1) * 8 if pixelHeight > 8 else 0
        else:
            start_y = randint(1, pixelHeight - 1) if pixelHeight > 1 else 0
        end_y = -(pixelHeight - start_y)

    new_img = cv_image[start_y:end_y, start_x:end_x, :]
    ImageWrapper(new_img).save(target)
    return {
        'crop_x': start_x,
        'crop_y': start_y,
        'crop_width': pixelWidth,
        'crop_height': pixelHeight
    }, None
Ejemplo n.º 20
0
    def test_SIFT(self):
        from maskgen.image_wrap import ImageWrapper
        img1 = ImageWrapper(
            np.random.randint(0, 255, (4000, 5000, 3), dtype='uint8'))
        img2 = ImageWrapper(
            np.random.randint(0, 255, (8000, 8000, 3), dtype='uint8'))
        img2.image_array[1000:2000,
                         1000:2000, :] = img1.image_array[2000:3000,
                                                          2000:3000, :]
        mask1 = ImageWrapper(np.zeros((4000, 5000), dtype='uint8'))
        mask1.image_array[2000:3000, 2000:3000] = 255
        mask2 = ImageWrapper(np.zeros((8000, 8000), dtype='uint8'))
        mask2.image_array[1000:2000, 1000:2000] = 255

        features = tool_set.getMatchedSIFeatures(img1,
                                                 img2,
                                                 mask1=mask1,
                                                 mask2=mask2,
                                                 arguments={
                                                     'homography max matches':
                                                     '2000',
                                                     'homography': 'RANSAC-4'
                                                 })

        img1 = ImageWrapper(
            np.random.randint(0, 65535, (4000, 5000, 3), dtype='uint16'))
        img2 = ImageWrapper(
            np.random.randint(0, 65535, (8000, 8000, 3), dtype='uint16'))
        img2.image_array[1000:2000,
                         1000:2000, :] = img1.image_array[2000:3000,
                                                          2000:3000, :]
        mask1 = ImageWrapper(np.zeros((4000, 5000), dtype='uint8'))
        mask1.image_array[2000:3000, 2000:3000] = 255
        mask2 = ImageWrapper(np.zeros((8000, 8000), dtype='uint8'))
        mask2.image_array[1000:2000, 1000:2000] = 255

        features = tool_set.getMatchedSIFeatures(img1,
                                                 img2,
                                                 mask1=mask1,
                                                 mask2=mask2,
                                                 arguments={
                                                     'homography max matches':
                                                     '2000',
                                                     'homography': 'RANSAC-4'
                                                 })
Ejemplo n.º 21
0
def transform(img, source, target, **kwargs):
    rotation = int(kwargs['rotation'])
    imgdata = img.to_array()
    #if len(imgdata.shape) > 2:
    #    imgdata = cv2.cvtColor(imgdata, cv2.COLOR_RGB2GRAY)
    rows, cols, channels = imgdata.shape
    M = cv2.getRotationMatrix2D((cols / 2, rows / 2), rotation, 1)
    constant = int(imgdata.max())
    rotated_img = cv2.warpAffine(imgdata,
                                 M, (cols, rows),
                                 flags=cv2.INTER_LINEAR,
                                 borderMode=cv2.BORDER_REPLICATE)
    #rotated_img = rotateImage(rotation,(rows / 2,cols / 2),imgdata)
    #out_img = cv2.cvtColor(rotated_img, cv2.COLOR_GRAY2RGB)
    # display(out_img, 'rotation')
    ImageWrapper(rotated_img).save(target)
    return {'transform matrix': serializeMatrix(M)}, None
Ejemplo n.º 22
0
def transform(img, source, target, **kwargs):
    donor = kwargs['donor']  # raise error if missing donor

    im_source = openImageFile(source).image_array
    im_donor_trace = openImageFile(donor).image_array

    if np.shape(im_source)[0:2] != np.shape(im_donor_trace)[0:2]:
        orientation_source = np.shape(im_source)[0] - np.shape(im_source)[1]
        orientation_donor = np.shape(im_donor_trace)[0] - np.shape(
            im_donor_trace)[1]
        if sign(orientation_source) != sign(orientation_donor):
            im_donor_trace = np.rot90(im_donor_trace, -1)
        location, im_source = centeredCrop(im_source, im_donor_trace)
        ImageWrapper(im_source).save(target, format='PNG')
    else:
        location = (0, 0)
    return {'location': location}, None
Ejemplo n.º 23
0
def pca(i, component=0, normalize=False):
    from sklearn.decomposition import PCA
    import cv2
    i1 = np.reshape(i.image_array[:, :, 0], i.size[1] * i.size[0])
    i2 = np.reshape(i.image_array[:, :, 1], i.size[1] * i.size[0])
    i3 = np.reshape(i.image_array[:, :, 2], i.size[1] * i.size[0])
    X = np.stack([i1, i2, i3])
    pca = PCA(3)
    pca.fit_transform(X)
    A = pca.components_[component] * pca.explained_variance_ratio_[component]
    A1 = (A - min(A)) / (max(A) - min(A)) * 255
    if normalize:
        imhist, bins = np.histogram(A1, 256, normed=True)
        cdf = imhist.cumsum()  # cumulative distribution function
        cdf = 255 * cdf / cdf[-1]
        A1 = np.interp(A1, bins[:-1], cdf)
    PCI = np.reshape(A1, i.image_array[:, :, 0].shape).astype('uint8')
    return ImageWrapper(PCI)
Ejemplo n.º 24
0
def ela(i):
    from PIL import Image
    import time
    import os
    tmp = 't' + str(time.clock()) + '.jpg'
    Image.fromarray(i.image_array).save(tmp, 'JPEG', quality=95)
    with open(tmp, 'rb') as f:
        i_qa = Image.open(f)
        i_qa_array = np.asarray(i_qa)
        i_qa.load()
    os.remove(tmp)
    ela_im = i.image_array - i_qa_array
    maxdiff = np.max(ela_im)
    mindiff = np.min(ela_im)
    scale = 255.0 / (maxdiff - mindiff)
    for channel in range(ela_im.shape[2]):
        ela_im[:, :, channel] = histeq(ela_im[:, :,
                                              channel])  # - mindiff) * scale
    return ImageWrapper(ela_im.astype('uint8'))
Ejemplo n.º 25
0
def repairMask(graph, start, end):
    """
      :param graph:
      :param start:
      :param end:
      :return:
      @type graph: ImageGraph
      @type start: str
      @type end: str
      """
    edge = graph.get_edge(start, end)
    startimage, name = graph.get_image(start)
    finalimage, fname = graph.get_image(end)
    mask = graph.get_edge_image(start, end, 'maskname')
    inputmaskname = os.path.splitext(name)[0] + '_inputmask.png'
    ImageWrapper(composeCloneMask(mask, startimage,
                                  finalimage)).save(inputmaskname)
    edge['inputmaskname'] = os.path.split(inputmaskname)[1]
    graph.setDataItem('autopastecloneinputmask', 'yes')
Ejemplo n.º 26
0
def transform(img, source, target, **kwargs):
    im_source = openImageFile(source).image_array
    dimensionX, dimensionY = coordsFromString(kwargs['crop dimensions'])
    eyePosX, eyePosY = coordsFromString(str(kwargs['right eye position'])) if 'right eye position' in kwargs else None,None
    chinPosX, chinPosY = coordsFromString(str(kwargs['chin position'])) if 'chin position' in kwargs else None,None

    filemap = kwargs['filemap'] if 'filemap' in kwargs else None
    if filemap is not None:
        face = selectfacefromdata(filemap, kwargs['donor'], im_source.shape, cropDimensions=(dimensionX,dimensionY))
        #eyePosX = face['right_eye_x']
        eyePosY = face['right_eye_y']
        chinPosX = face['chin_x']
        #chinPosY = face['chin_y']

    faceCenter = chinPosX, eyePosY

    top = (faceCenter[1] - dimensionY / 2) - (faceCenter[1] - dimensionY / 2) % 8
    left = (faceCenter[0] - dimensionX / 2) - (faceCenter[0] - dimensionX / 2) % 8

    im_source = im_source[top:top + dimensionY, left:left + dimensionX, :]

    ImageWrapper(im_source).save(target, format='PNG')
    return {"top":top, "left":left},None
Ejemplo n.º 27
0
 def paste(self, video_file, object, codec):
     vfi = cv2api_delegate.videoCapture(video_file)
     width = int(vfi.get(cv2api_delegate.prop_frame_width))
     height = int(vfi.get(cv2api_delegate.prop_frame_height))
     fourcc = fourccs[codec]
     video_prefix = video_file[:video_file.rfind('.')]
     video_file_output = video_prefix + '_paste.' + suffices[codec]
     video_file_output = os.path.split(video_file_output)[1]
     vfo = cv2api_delegate.videoWriter(video_file_output, fourcc, (vfi.get(cv2api_delegate.prop_fps)), (width, height))
     if not vfo.isOpened():
         raise ValueError('VideoWriter failed to open.')
     try:
         while vfi.isOpened() and vfo.isOpened():
             r,f = vfi.read()
             if not r:
                 break
             i = ImageWrapper(self.paste_in(f,object),mode='BGR')
             vfo.write(i.image_array)
     finally:
         vfi.release()
         vfo.release()
         self.addFileToRemove(video_file_output)
     return video_file_output
Ejemplo n.º 28
0
def transform(img, source, target, **kwargs):
    mask = numpy.asarray(
        tool_set.openImageFile(kwargs['inputmaskname']).to_mask())
    img_array = img.to_array()
    result = None
    if len(img_array.shape) == 2:
        # grey
        result = numpy.zeros(
            (img_array.shape[0], img_array.shape[1], 2)).astype('uint8')
        result[:, :, 0] = img_array
        result[:, :, 1] = mask
    elif len(img_array.shape) == 3:
        if img_array.shape[2] == 4:
            result = img_array
            result[:, :, 4] = mask
        else:
            result = numpy.zeros(
                (img_array.shape[0], img_array.shape[1], 4)).astype('uint8')
            result[:, :, 0] = img_array[:, :, 0]
            result[:, :, 1] = img_array[:, :, 1]
            result[:, :, 2] = img_array[:, :, 2]
            result[:, :, 3] = mask
    ImageWrapper(result).save(target)
    return None, None
 def create_zero(h, w):
     return ImageWrapper(np.zeros((h, w), dtype='uint8'))
Ejemplo n.º 30
0
def copyFrames(in_file,
               out_file,
               start_time,
               end_time,
               paste_time,
               codec=None):
    """
    :param in_file: is the full path of the video file from which to drop frames
    :param out_file: resulting video file
    :param start_time: (milli,frame no) to start to fill
    :param end_time: (milli,frame no) end fil
    :param codec:
    :return:
    """
    import time
    logger = logging.getLogger('maskgen')
    frames_to_write = []
    frames_to_copy = []
    cap = cv2api_delegate.videoCapture(in_file)
    fourcc = cv2api_delegate.get_fourcc(
        str(codec)) if codec is not None else cap.get(
            cv2api_delegate.fourcc_prop)
    fps = cap.get(cv2api_delegate.prop_fps)
    height = int(np.rint(cap.get(cv2api_delegate.prop_frame_height)))
    width = int(np.rint(cap.get(cv2api_delegate.prop_frame_width)))
    out_video = cv2api_delegate.videoWriter(out_file,
                                            fourcc,
                                            fps, (width, height),
                                            isColor=1)
    if not out_video.isOpened():
        err = out_file + " fourcc: " + str(fourcc) + " FPS: " + str(fps) + \
              " H: " + str(height) + " W: " + str(width)
        raise ValueError('Unable to create video ' + err)
    copy_time_manager = VidTimeManager(startTimeandFrame=start_time,
                                       stopTimeandFrame=end_time)
    paste_time_manager = VidTimeManager(startTimeandFrame=paste_time)
    write_count = 0
    try:
        while (not copy_time_manager.isPastTime() and cap.grab()):
            ret, frame = cap.retrieve()
            elapsed_time = float(cap.get(cv2api_delegate.prop_pos_msec))
            copy_time_manager.updateToNow(elapsed_time)
            paste_time_manager.updateToNow(elapsed_time)
            if not copy_time_manager.isBeforeTime(
            ) and not copy_time_manager.isPastTime():
                frames_to_copy.append(frame)
            if not paste_time_manager.isBeforeTime():
                frames_to_write.append(frame)
            else:
                out_video.write(frame)
                write_count += 1
        if logger.isEnabledFor(logging.DEBUG):
            logger.debug("First to copy {}".format(
                hashlib.sha256(frames_to_copy[0]).hexdigest()))
            logger.debug("Last to copy {}".format(
                hashlib.sha256(frames_to_copy[-1]).hexdigest()))
            ImageWrapper(frames_to_copy[0]).save('first_' + str(time.clock()) +
                                                 '.png')
            ImageWrapper(frames_to_copy[-1]).save('last_' + str(time.clock()) +
                                                  '.png')
        if len(frames_to_write) > 0:
            # paste prior to copy
            for copy_frame in frames_to_copy:
                out_video.write(copy_frame)
            for write_frame in frames_to_write:
                out_video.write(write_frame)
        else:
            # paste after to copy
            frame = None
            while (paste_time_manager.isBeforeTime() and cap.grab()):
                ret, frame = cap.retrieve()
                elapsed_time = float(cap.get(cv2api_delegate.prop_pos_msec))
                paste_time_manager.updateToNow(elapsed_time)
                if paste_time_manager.isBeforeTime():
                    out_video.write(frame)
                    write_count += 1
            for copy_frame in frames_to_copy:
                out_video.write(copy_frame)
            if frame is not None:
                out_video.write(frame)
        while (cap.grab()):
            ret, frame = cap.retrieve()
            out_video.write(frame)
    finally:
        cap.release()
        out_video.release()
    return write_count