Beispiel #1
0
def create_captcha(text, shear=0.2, size=(20, 20)):
    '''
    生成模式为L的黑白图像,生成写有text的文字,斜切值为0.2
    :param text:  str
    :param shear: 图片斜切率
    :param size:  图片大小
    :return:
    '''
    im = Image.new('L', size, color='black')
    draw = ImageDraw.Draw(im)
    draw.text(xy=(1, 1),
              text=text,
              fill=1,
              font=ImageFont.truetype(r'Coval-Black.otf', 16))
    image = np.array(im)

    # 斜切效果
    image = tf.warp(image, tf.AffineTransform(shear=shear))

    # 保存训练图片
    t = time.time()
    name = str(text) + str(shear) + str(t) + '.png'
    if not os.path.exists('Image'):
        os.mkdir('Image')

    im = Image.fromarray(np.uint8(image))
    im.save(os.path.join('Image', name))

    return image / image.max()  # 图像做归一化处理,避免单值范围过大
Beispiel #2
0
def random_affine(img, img_mask):
    flat_sum_mask = sum(img_mask.flatten())
    (row, col) = img_mask.shape
    angle = shear_deg = 0
    zoom = 1
    center_shift = np.array((1000, 1000)) / 2. - 0.5
    tform_center = transform.SimilarityTransform(translation=-center_shift)
    tform_uncenter = transform.SimilarityTransform(translation=center_shift)
    big_img = np.zeros((1000, 1000, 3), dtype=np.uint8)
    big_mask = np.zeros((1000, 1000), dtype=np.uint8)
    big_img[190:(190 + row), 144:(144 + col)] = img
    big_mask[190:(190 + row), 144:(144 + col)] = img_mask
    affine = random.choice(["rotate", "zoom", "shear"])
    if affine == "rotate":
        angle = random.uniform(-90, 90)
    if affine == "zoom":
        zoom = random.uniform(0.5, 1.5)
    if affine == "shear":
        shear_deg = random.uniform(-25, 25)

# pdb.set_trace()
    tform_aug = transform.AffineTransform(rotation=np.deg2rad(angle),
                                          scale=(1 / zoom, 1 / zoom),
                                          shear=np.deg2rad(shear_deg),
                                          translation=(0, 0))
    tform = tform_center + tform_aug + tform_uncenter
    # pdb.set_trace()
    img_tr = transform.warp((big_img), tform)
    mask_tr = transform.warp((big_mask), tform)
    # pdb.set_trace()
    masktemp = cv2.cvtColor(
        (img_tr * 255).astype(np.uint8), cv2.COLOR_BGR2GRAY) > 20
    img_tr = img_tr[np.ix_(masktemp.any(1), masktemp.any(0))]
    mask_tr = mask_tr[np.ix_(masktemp.any(1), masktemp.any(0))]
    return (img_tr * 255).astype(np.uint8), (mask_tr * 255).astype(np.uint8)
Beispiel #3
0
    def calculate_coordinates(self, type='Proj'):
        """ In laboratory spectroscopy the transformation
        should only be affine (Rotation,Translation and Shear).
        But in general cases, the transformation could
        also have deformation and the transformation is
        a projection.
        See Mathematical definition of affine
        transformation and Projection (Homography) for more details. """
        self.repere_init_array_ = self.input_data.loc[
            self.input_data['Type'].str.contains('ef')].to_numpy()[:, 1:]
        self.repere_final_array_ = self.output_data.loc[
            self.output_data['Type'].str.contains('ef')].to_numpy()[:, 1:]
        self.mesures_init_array_ = self.input_data.loc[
            self.input_data['Type'].str.contains('easure')].to_numpy()[:, 1:]

        if type == 'Affine':
            self.transformation_ = tf.AffineTransform()
            self.transformation_.estimate(self.repere_init_array_,
                                          self.repere_final_array_)
            self.mesures_final_array_ = self.transformation_(
                self.mesures_init_array_)

        if type == 'Proj':
            self.transformation_ = tf.ProjectiveTransform()
            self.transformation_.estimate(self.repere_init_array_,
                                          self.repere_final_array_)
            self.mesures_final_array_ = self.transformation_(
                self.mesures_init_array_)
    def __call__(self, sample):
        """
            img (PIL Image): Image to be transformed.

        Returns:
            PIL Image: Affine transformed image.
        """
        img, labels = sample['image'], sample['labels']
        warp_boxes = sample['warp_boxes']

        ret = self.get_params(self.degrees, self.translate, self.scale, self.shear, img.size)
        img = TF.affine(img, *ret, resample=self.resample, fillcolor=self.fillcolor)
        labels = TF.affine(labels, *ret, resample=self.resample, fillcolor=self.fillcolor)
        orig_box = warp_boxes * 256. + 256.

        # Affine boxes
        center = (img.size[0] * 0.5 + 0.5, img.size[1] * 0.5 + 0.5)
        matrix = np.array(TF._get_inverse_affine_matrix(center, *ret)).reshape(2, 3)
        matrix = np.vstack([matrix, np.eye(3)[2]])
        assert matrix.shape == (3, 3)
        affine_trans = trans.AffineTransform(matrix=matrix)
        new_boxes = affine_trans.inverse(orig_box.reshape(-1, 2)) * (1. / 256.) - 1
        new_boxes = torch.from_numpy(new_boxes.reshape(-1, 4).astype(np.float32))

        sample.update({'image': img,
                       'labels': labels,
                       'warp_boxes': new_boxes
                       })
        return sample
Beispiel #5
0
def points_matcher(src, dst):
    model = transform.AffineTransform()

    # estimate rot and trans from src to dst (linear least square)
    model.estimate(src=src, dst=dst)

    return model.params
Beispiel #6
0
def deformation(image):
    random_shear_angl = np.random.random() * np.pi / 6 - np.pi / 12
    random_rot_angl = np.random.random(
    ) * np.pi / 7 - np.pi / 12 - random_shear_angl
    random_x_scale = np.random.random() * .4 + .8
    random_y_scale = np.random.random() * .4 + .8
    random_x_trans = np.random.random(
    ) * image.shape[0] / 4 - image.shape[0] / 8
    random_y_trans = np.random.random(
    ) * image.shape[1] / 4 - image.shape[1] / 8

    dx = image.shape[0]/2. \
            - random_x_scale * image.shape[0]/2 * np.cos(random_rot_angl)\
            + random_y_scale * image.shape[1]/2 * np.sin(random_rot_angl + random_shear_angl)

    dy = image.shape[1]/2. \
            - random_x_scale * image.shape[0]/2 * np.sin(random_rot_angl)\
            - random_y_scale * image.shape[1]/2 * np.cos(random_rot_angl + random_shear_angl)

    trans_mat = tf.AffineTransform(rotation=random_rot_angl,
                                   translation=(dx + random_x_trans,
                                                dy + random_y_trans),
                                   shear=random_shear_angl,
                                   scale=(random_x_scale, random_y_scale))

    return tf.warp(image, trans_mat.inverse, output_shape=image.shape)
Beispiel #7
0
def shear_image(img: np.ndarray, shear_factor: float) -> np.ndarray:
    # TODO: write description
    # Create Afine transform
    afine_tf = transform.AffineTransform(shear=shear_factor)
    # Apply transform to image data
    modified_img = transform.warp(img, inverse_map=afine_tf)
    return (modified_img * 255.).astype('uint8')
Beispiel #8
0
def shear(name, img_arr):
    shear_angle = random.uniform(0, MAX_SHEAR_ANGLE)
    affine_tf = tf.AffineTransform(shear=shear_angle, rotation=-shear_angle)
    sheared = tf.warp(img_arr, affine_tf, mode=FILL_MODE)
    sheared_inv = tf.warp(img_arr, affine_tf.inverse, mode=FILL_MODE)
    io.imsave(name + "_sheared" + IMG_TYPE, sheared)
    io.imsave(name + "_sheared_inverse" + IMG_TYPE, sheared_inv)
def composeSkimage(foreground, mask, background, transX=0, transY=0):
    """
    对于前景图和背景图大小相等的合成
    @param
    foreground: 待插入对象所在的原始图片
    mask: 待对象在原始图片的掩码
    background: 待插入的背景图
    transX: 需要平移的横向距离,向右为正
    transY:需要平移的纵向距离,向下为正
    """
    # 平移前景图
    tform = transform.AffineTransform(translation=(transX, transY))
    foreAug = transform.warp(foreground, tform.inverse)
    foreAug = img_as_ubyte(foreAug)
    # foreAug = np.roll(foreground, transX, axis=1)
    # foreAug = np.roll(foreAug, transY, axis=0)

    # 平移mask
    new_mask = np.roll(mask, transX, axis=1)  # 向右平移
    new_mask = np.roll(new_mask, transY, axis=0)  # 向下平移

    # subtract the foreground area from the background
    background = background * (
        1 - new_mask.reshape(mask.shape[0], mask.shape[1], 1))
    # extract the object from the foreground
    foreAug = foreAug * new_mask.reshape(mask.shape[0], mask.shape[1], 1)

    composed_image = background + foreAug
    composed_image = composed_image.astype(np.uint8)
    io.imshow(composed_image)
    plt.show()
    return composed_image
Beispiel #10
0
def scale_down(image):
    scale_x = random.uniform(1.1, 1.15)
    scale_y = scale_x
    c = (image.shape[0] / 2 * (1 - scale_x), image.shape[1] / 2 * (1 - scale_y))
    transform = imgtf.AffineTransform(scale=(scale_x, scale_y), translation=c)
    image = imgtf.warp(image, transform, mode='wrap', preserve_range=True)
    return np.clip(image, 0, 255).astype(np.uint8)
    def align_face(
            self,
            image,
            face_rect, *,
            dim=96,
            border=0,
            mask=FaceAlignMask.INNER_EYES_AND_BOTTOM_LIP
    ):
        mask = np.array(mask.value)

        landmarks = self.get_landmarks(image, face_rect)
        proper_landmarks = border + dim * self.face_template[mask]
        A = np.hstack([landmarks[mask], np.ones((3, 1))]).astype(np.float64)
        B = np.hstack([proper_landmarks, np.ones((3, 1))]).astype(np.float64)
        T = np.linalg.solve(A, B).T

        return tr.warp(
            image,
            tr.AffineTransform(T).inverse,
            output_shape=(dim + 2 * border, dim + 2 * border),
            order=3,
            mode='constant',
            cval=0,
            clip=True,
            preserve_range=True
        )
def data_aug(image, label, angle = 30, resize_rate = 0.9):
    flip = random.randint(0, 1)
    size = image.shape[0]
    rsize = random.randint(np.floor(resize_rate * size), size)
    w_s = random.randint(0, size - rsize)
    h_s = random.randint(0, size - rsize)
    sh = random.random()/2 - 0.25
    rotate_angle = random.random()/180*np.pi*angle
    
    # Create Afine transform
    afine_tf = transform.AffineTransform(shear=sh,rotation=rotate_angle)
    
    # Apply transform to image data
    image = transform.warp(image, inverse_map=afine_tf,mode='edge')
    label = transform.warp(label, inverse_map=afine_tf,mode='edge')
    
    # Randomly cropping image frame
    image = image[w_s:w_s+size, h_s:h_s+size,:]
    label = label[w_s:w_s+size, h_s:h_s+size]
    
    # Randomly flip frame
    if flip:
        image = image[:,::-1,:]
        label = label[:,::-1]
        
    return image, label
Beispiel #13
0
def apply_transformation(Tstack, trans_matrix):
    """
    only applies transformation matrix calcluated from other slices

    Parameters
    ----------
    Tstack : uint16 3D array [x,y,t]
        slice to be registered in time
    trans_matrix : 
        transformation matrix

    Returns
    -------
    outStack : uint16 3D array [x,y,t]
        in time registered slice

    """
    Tstack = np.transpose(Tstack, (2, 0, 1))
    outStack = np.zeros(Tstack.shape).astype(np.float)
    for t in range(Tstack.shape[0]):
        tform = tf.AffineTransform(matrix=trans_matrix[t, :, :])
        outStack[t, :, :] = tf.warp(Tstack[t, :, :], tform)
    outStack = np.uint16(outStack * 2**16)
    outStack = np.transpose(outStack, (1, 2, 0))

    return outStack
Beispiel #14
0
    def augment_img(self, img):
        """
        Performs augmentation of the image if required and returns a tensor
        """
        # logging.debug('Shape of the array: {}'.format(img.shape))
        if self.aug_img is False:
            # logging.debug('No augmentation max before image transform: {}'.format(np.max(np.max(img))))
            return self.transform(img)
        if random.random() < self.aug_dict['prob']:
            img_transform = skitransform.AffineTransform(
                scale=tuple([
                    random.uniform(self.aug_dict['lscale'],
                                   self.aug_dict['uscale'])
                ] * 2),
                rotation=random.uniform(-self.aug_dict['rot'],
                                        self.aug_dict['rot']),
                translation=(random.uniform(-self.aug_dict['trans'],
                                            self.aug_dict['trans']),
                             random.uniform(-self.aug_dict['trans'],
                                            self.aug_dict['trans'])))
            img = skitransform.warp(img, img_transform)
            if random.random() < self.aug_dict['flip_lr']:
                img = np.fliplr(img)
            # logging.debug('Augmentation max before image transform: {}'.format(np.max(np.max(img))))
            return self.transform(np.uint8(img * 255))

        else:
            # logging.debug('Augmentation max before image transform (no aug): {}'.format(np.max(np.max(img))))
            return self.transform(img)
Beispiel #15
0
def get_overlay(fifo):
    # get the whole FIFO
    ir_raw = fifo.read()
    # trim to 128 bytes
    ir_trimmed = ir_raw[0:128]
    # go all numpy on it
    ir = np.frombuffer(ir_trimmed, np.uint16)
    # set the array shape to the sensor shape (16x4)
    ir = ir.reshape((16, 4))[::-1, ::-1]
    ir = img_as_float(ir)
    # stretch contrast on our heat map
    p2, p98 = np.percentile(ir, (2, 98))
    ir = exposure.rescale_intensity(ir, in_range=(p2, p98))
    # increase even further? (optional)
    # ir = exposure.equalize_hist(ir)

    # turn our array into pretty colors
    cmap = plt.get_cmap('spectral')
    rgba_img = cmap(ir)
    rgb_img = np.delete(rgba_img, 3, 2)

    # align the IR array with the camera
    tform = transform.AffineTransform(scale=SCALE,
                                      rotation=ROT,
                                      translation=OFFSET)
    ir_aligned = transform.warp(rgb_img,
                                tform.inverse,
                                mode='constant',
                                output_shape=im.shape)
    # turn it back into a ubyte so it'll display on the preview overlay
    ir_byte = img_as_ubyte(ir_aligned)
    # return buffer
    return np.getbuffer(ir_byte)
Beispiel #16
0
def scale(x, severity=3):
    c = [(1 / .9, 1 / .9), (1 / .8, 1 / .8), (1 / .7, 1 / .7),
         (1 / .6, 1 / .6), (1 / .5, 1 / .5)][severity - 1]

    aff = transform.AffineTransform(scale=c)

    a1, a2 = aff.params[0, :2]
    b1, b2 = aff.params[1, :2]
    a3 = 13.5 * (1 - a1 - a2)
    b3 = 13.5 * (1 - b1 - b2)
    aff = transform.AffineTransform(scale=c, translation=[a3, b3])

    x = np.array(x) / 255.
    x = transform.warp(x, inverse_map=aff)
    x = np.clip(x, 0, 1) * 255
    return x.astype(np.float32)
Beispiel #17
0
def do_shear(img_X,img_Y,sh,horz):

  if len(img_X) > 0:
    h,w = img_X.shape[:2]
  else:
    h,w = img_Y.shape[:2]

  matrix = np.zeros((3,3))
  matrix[0,0] = 1.0
  matrix[1,1] = 1.0
  matrix[2,2] = 1.0

  if horz:
    matrix[0,1] = sh
    matrix[0,2] = -np.sin(sh)* w / 2
  else:
    matrix[1,0] = sh
    matrix[1,2] = -np.sin(sh)* h / 2

  # Create Afine transform
  afine_tf = transform.AffineTransform(matrix)
  #print(afine_tf.params)
  # Apply transform to image data
  if len(img_X) > 0:
    img_X = transform.warp(img_X, inverse_map=afine_tf,mode='edge', order=3)
  if len(img_Y) > 0:
    img_Y = transform.warp(img_Y, inverse_map=afine_tf,mode='edge', order=0)

  return img_X, img_Y
    def process_LK(self, sparse_flag='sd', f_tail='_lksd'):

        if sparse_flag == 'sd':
            input_data = [self.prev_frame2, self.prev_frame1, self.now_frame]
            # print(np.array(input_data,np.uint8).shape)
            pts_source, pts_target_container = sparse_sd(
                np.array(input_data, np.uint8), lead_steps=self.pre_end + 1)
        else:
            pts_source, pts_target_container = sparse_linear(
                np.array(input_data, np.uint8), lead_steps=self.pre_end + 1)

        trf = sktf.AffineTransform()

        nowcasts = []
        print(self.pre_end + 1)
        for lead_step in range(self.pre_step, self.pre_end + 1, self.pre_step):

            print('Predict:::::', lead_step, ' hours later with lk')

            pts_target = pts_target_container[lead_step]
            # estimate transformation matrix
            # based on source and traget points
            trf.estimate(pts_source, pts_target)

            # make a nowcast
            nowcst_frame = sktf.warp(input_data[-1] / 255, trf.inverse)
            nowcst_frame = nowcst_frame * 255.0

            # out_pic = self.out_path + self.fn_head + self.fn_mid1 + str(self.start_ind + lead_step) + \
            #         self.fn_mid2 + str(self.ch).zfill(2) + f_tail + '.jpg'
            # cv2.imwrite(out_pic, nowcst_frame)

            nowcasts.append(nowcst_frame)

        return np.array(nowcasts)
def augment_transform(image, label):
    '''
    This function randomly:
        - translates the input image by +-width_range and +-height_range (percentage).
        - scales the image by y_scaling and x_scaling (percentage)
        - shears the image by shearing_factor (radians)
    '''

    ty = random.uniform(-random_y_translation, random_y_translation)
    tx = random.uniform(-random_x_translation, random_x_translation)

    sx = random.uniform(1. - random_y_scaling, 1. + random_y_scaling)
    sy = random.uniform(1. - random_x_scaling, 1. + random_x_scaling)

    s = random.uniform(-random_shearing, random_shearing)

    gamma = random.uniform(0.001, 2)
    image = exposure.adjust_gamma(image, gamma)
    st = skimage_tf.AffineTransform(scale=(sx, sy),
                                    shear=s,
                                    translation=(tx * image.shape[1],
                                                 ty * image.shape[0]))
    augmented_image = skimage_tf.warp(image, st, cval=1.0)

    return transform(augmented_image * 255., label)
Beispiel #20
0
def rescale(img_as_array,min_xy_scale_factor):
    """Randomly downscales the x and y ranges of an image while zero-padding
       image to retain the input image size shape.  Newly downscaled image
       occupies the upper-left most region of the output image.
       Please note this function is not commutative.  See augment() function.
       
      Args:
        img_as_array: image as 3D numpy array.
        min_xy_scale_factor: lower bound of downscaling factor for x and y
                             values.
        
      Returns:
        image: rescaled imaged as 3D numpy uint8 array.
        scale_xy: randomly determined downscale factors as tuple of x and y
                  factors.
    """

    scale_x = random.uniform(min_xy_scale_factor,1.0)
    scale_y = random.uniform(min_xy_scale_factor,1.0)
    scale_xy = (scale_x, scale_y)
    scale_xy_inverted = (1/scale_x, 1/scale_y)
    
    affine_transform = transform.AffineTransform(scale=scale_xy_inverted)

    return transform.warp(img_as_array, affine_transform, map_args={},
                          output_shape=None, order=1, mode='constant',
                          cval=140.0, clip=True,
                          preserve_range=True).astype(np.uint8), \
           scale_xy
def transform_point(point_list, transform_parameters):
    scale_x, scale_y = transform_parameters['zy'], transform_parameters['zx']
    translate_x_px, translate_y_px = transform_parameters[
        'ty'], transform_parameters['tx']
    rotate = transform_parameters['theta']
    shear = transform_parameters['shear']
    flip_horizontal = transform_parameters.get('flip_horizontal', False)
    flip_vertical = transform_parameters.get('flip_vertical', False)
    if scale_x != 1.0 or scale_y != 1.0 or translate_x_px != 0 or translate_y_px != 0 or rotate != 0 \
            or shear != 0:
        matrix_to_topleft = skimage_tf.SimilarityTransform(
            translation=[-0.5, -0.5])
        matrix_transforms = skimage_tf.AffineTransform(
            scale=(scale_x, scale_y),
            translation=(translate_x_px, translate_y_px),
            rotation=math.radians(rotate),
            shear=math.radians(shear))
        matrix_to_center = skimage_tf.SimilarityTransform(
            translation=[0.5, 0.5])
        matrix = (matrix_to_topleft + matrix_transforms + matrix_to_center)
        point_list = skimage_tf.matrix_transform(point_list, matrix.params)
    if flip_horizontal or flip_vertical:
        matrix_to_topleft = skimage_tf.SimilarityTransform(
            translation=[-0.5, -0.5])
        point_list = skimage_tf.matrix_transform(point_list,
                                                 matrix_to_topleft.params)
        if flip_horizontal:
            point_list = [(-x, y) for x, y in point_list]
        if flip_vertical:
            point_list = [(x, -y) for x, y in point_list]
        matrix_to_center = skimage_tf.SimilarityTransform(
            translation=[0.5, 0.5])
        point_list = skimage_tf.matrix_transform(point_list,
                                                 matrix_to_center.params)
    return point_list
Beispiel #22
0
def translate(img_as_array,output_size,scale_xy):
    """Translates an image and pads with zeros.  The provided x and y
       rescaling factors from a previous rescaling operation ensures that a
       randomly chosen translation does not cause the output image to lose any
       pixels. Please note this function is not commutative.  See augment()
       function.

      Args:
        img_as_array: image as 3D numpy array.
        output_size: tuple of height, width values indicating desired output
                     image size.

      Returns:
        image: image the size of output_size as 3D numpy uint8 array.
        translation_xy: randomly determined x and y translations of the image
                        as tuple.
    """

    scaled_width = (int(output_size[0]*scale_xy[0]))
    scaled_height = (int(output_size[1]*scale_xy[1]))

    translation_x = random.randint(0, (output_size[0]-scaled_width))
    translation_y = random.randint(0, (output_size[1]-scaled_height))
    translation_xy = (-translation_x, -translation_y)

    affine_transform = transform.AffineTransform(translation=(translation_xy))
    
    return transform.warp(img_as_array, affine_transform, map_args={},
                          output_shape=None, order=1, mode='constant',
                          cval=140.0, clip=True,
                          preserve_range=True).astype(np.uint8), \
           translation_xy
Beispiel #23
0
def shift(image, vector):
    vector = (-vector[0], -vector[1])
    shifted = transform.warp(image,
                             transform.AffineTransform(translation=vector),
                             mode='wrap',
                             preserve_range=True)
    return shifted.astype(image.dtype)
Beispiel #24
0
def transform_vis_im_to_IR_coordinate_system(im):
    T = transform.AffineTransform(T_IR2v)
    im = transform.warp(im, T, output_shape=(hIR, wIR))
    imNew = np.zeros_like(im)

    newcameramtx, roi = cv2.getOptimalNewCameraMatrix(K, dist, (wIR, hIR), 1,
                                                      (wIR, hIR))
    mapx, mapy = cv2.initUndistortRectifyMap(K, dist, None, newcameramtx,
                                             (wIR, hIR), 5)
    SCALE = 1.15
    mapx = cv2.resize(mapx,
                      None,
                      fx=SCALE,
                      fy=SCALE,
                      interpolation=cv2.INTER_LINEAR)
    mapy = cv2.resize(mapy,
                      None,
                      fx=SCALE,
                      fy=SCALE,
                      interpolation=cv2.INTER_LINEAR)

    for x in range(mapx.shape[1]):
        for y in range(mapx.shape[0]):
            try:
                x2 = int(mapx[y, x])
                y2 = int(mapy[y, x])
                imNew[y2, x2, :] = im[int(y / SCALE), int(x / SCALE), :]
            except:
                no_match = True  #just do nothing

    return imNew
Beispiel #25
0
def warpAffine(img,M,shape=None):
    if shape is None:
        shape=img.shape
    if M.shape[0]==2: #OpenCV takes 2x3 instead of 3x3
        M = np.concatenate((M,np.array([[0.0,0.0,1.0]])),axis=0)
    T = transform.AffineTransform(M)
    return transform.warp(img,T,output_shape=shape)
Beispiel #26
0
def random_shear(image, gt_image, std=3.5,
                 lower=-10, upper=10, expand=True):

    assert lower < upper
    assert std > 0

    angle = truncated_normal(mean=0, std=std, lower=lower,
                             upper=upper)

    pi_angle = angle * np.pi / 360

    afine_tf = tf.AffineTransform(shear=pi_angle)

    image_r = (tf.warp(image / 255, inverse_map=afine_tf) * 255 + 0.4)\
        .astype(np.int)
    gt_image_r = tf.warp(gt_image / 255, inverse_map=afine_tf,
                         order=0)

    gt_image_r = ((255 * gt_image_r) + 0.4).astype(np.int)

    gt_image[10, 10] = 255
    if DEBUG:
        if not np.all(np.unique(gt_image_r) == np.unique(gt_image)):
            logging.info("np.unique(gt_image_r): {}".format(
                np.unique(gt_image_r)))
            logging.info("np.unique(gt_image): {}".format(np.unique(gt_image)))

            assert(False)

    return image_r, gt_image_r
Beispiel #27
0
    def _estimate_single(self, predicted, measured):
        assert predicted.shape == self.shape
        assert measured.shape == self.shape
        flow = optical_flow_tvl1(predicted, measured)
        flow[[1,0],] = flow[[0,1],]
        xy_flow = self.xy_lin - flow
        _Afunc_coord_warp = lambda transform_vec: self._coordinate_warp(transform_vec, self.xy_lin, xy_flow)    

        #estimate transform matrix from optical flow
        results = sop.fmin_l_bfgs_b(_Afunc_coord_warp, np.array([0.0,0,0]))
        transform_final = results[0]
        if results[2]["warnflag"]:
            transform_final *= 0.0
            print("Transform estimation not converged")

        #inverse warp measured image
        transform_mat = np.array([np.cos(transform_final[0]), \
                                  -np.sin(transform_final[0]), \
                                  np.sin(transform_final[0]), \
                                  np.cos(transform_final[0]), \
                                  transform_final[1], \
                                  transform_final[2]])        
        aff_mat = np.array([transform_mat[[0,1,4]], transform_mat[[2,3,5]],[0,0,1]])
        tform = transform.AffineTransform(matrix = aff_mat)
        measured_warp = transform.warp(measured, tform.inverse, cval = 1.0)

        return measured_warp, transform_final
 def build_augmentation_transform(zoom=1.0, rotation=0, shear=0, translation=(0, 0)):
     tform_augment = transform.AffineTransform(scale=(1/zoom, 1/zoom), 
                                               rotation=np.deg2rad(rotation), 
                                               shear=np.deg2rad(shear), 
                                               translation=translation)
     tform = tform_center + tform_augment + tform_uncenter # shift to center, augment, shift back (for the rotation/shearing)
     return tform
Beispiel #29
0
def convert_affine_to_transform(D, shape):
    """Converts an affine transform on a diffraction pattern to a suitable
    form for skimage.transform.warp()

    Parameters
    ----------
    D : np.array
        Affine transform to be applied
    shape : tuple
        Shape tuple in form (y,x) for the diffraction pattern

    Returns
    -------
    transformation : np.array
        3x3 numpy array of the transformation to be applied.

    """

    shift_x = (shape[1] - 1) / 2
    shift_y = (shape[0] - 1) / 2

    tf_shift = tf.SimilarityTransform(translation=[-shift_x, -shift_y])
    tf_shift_inv = tf.SimilarityTransform(translation=[shift_x, shift_y])

    # This defines the transform you want to perform
    distortion = tf.AffineTransform(matrix=D)

    # skimage transforms can be added like this, does matrix multiplication,
    # hence the need for the brackets. (Note tf.warp takes the inverse)
    transformation = (tf_shift + (distortion + tf_shift_inv)).inverse

    return transformation
Beispiel #30
0
def affine_transformation(z, matrix, order, **kwargs):
    """Apply an affine transformation to a 2-dimensional array.

    Parameters
    ----------
    z : np.array
        Array to be transformed
    matrix : np.array
        3x3 numpy array specifying the affine transformation to be applied.
    order : int
        Interpolation order.
    kwargs :
        To be passed to skimage.warp

    Returns
    -------
    trans : array
        Affine transformed diffraction pattern.
    """

    # These three lines account for the transformation center not being (0,0)
    shift_y, shift_x = np.array(z.shape[:2]) / 2.
    tf_shift = tf.SimilarityTransform(translation=[-shift_x, -shift_y])
    tf_shift_inv = tf.SimilarityTransform(translation=[shift_x, shift_y])

    # This defines the transform you want to perform
    transformation = tf.AffineTransform(matrix=matrix)

    #skimage transforms can be added like this, actually matrix multiplication,
    #hence the need for the brackets. (Note tf.warp takes the inverse)
    trans = tf.warp(z, (tf_shift + (transformation + tf_shift_inv)).inverse,
                    order=order,
                    **kwargs)

    return trans