示例#1
0
 def __init__(self, res, path, size=util.MAX_THUMBNAIL_SIZE):
     super(Face, self).__init__()
     self.path = path
     img = util.rotate_image(path)
     self.bmp = img.ConvertToBitmap()
     self.name = None
     if res.get('faceId'):
         self.id = res['faceId']
     if res.get('persistedFaceId'):
         self.persisted_id = res['persistedFaceId']
     if res.get('faceRectangle'):
         self.rect = Rect(res['faceRectangle'])
         self.bmp = self.bmp.GetSubBitmap(
             wx.Rect(
                 self.rect.left,
                 self.rect.top,
                 self.rect.width,
                 self.rect.height,
             ))
     if res.get('faceAttributes'):
         self.attr = Attribute(res['faceAttributes'])
     self.bmp = util.scale_image(
         self.bmp.ConvertToImage(),
         size=size,
     ).ConvertToBitmap()
示例#2
0
  def render(self, card_dims, text):
    """ Generate a transparent PIL card layer with the text on it """
    # If the user has set a max width, respect that.
    # If not, we use the edge of the card.

    if (self.rotation == 0):
      maxwidth, maxheight = util.aligned_maxdims((self.x, self.y),
                                                 (self.width, self.height),
                                                 card_dims,
                                                 self.x_align,
                                                 self.y_align)
    else:
      # Due to rotation of the text, we don't fully take the card's edges into account.
      maxdim = max(card_dims)
      maxwidth = min(self.width, maxdim)
      maxheight = min(self.height, maxdim)

    # Split the text into lines, in a way that fits our width
    lines = [text]
    if (self.wordwrap):
      lines = wrap_pixel_width(text, maxwidth, self.font, linesep='\\n')

    if (lines is None):
      sys.stderr.write("Warning: Unable to wrap text label \"%s\"\n" % text)
      return None

    # Render the text, one line at a time
    label = render_lines(lines,
                         font=self.font,
                         color=self.color,
                         justify=self.justify,
                         spacing=self.spacing)

    if (label.width > maxwidth):
      sys.stderr.write("Warning: Text label overflows max width: \"%s\"\n" % text)
      return None

    if (label.height > maxheight):
      sys.stderr.write("Warning: Text label overflows max height: \"%s\"\n" % text)
      return None

    if (self.rotation != 0):
      label = util.rotate_image(label, self.rotation)

    # Figure out where to place the top-left corner of the label
    x,y = util.alignment_to_absolute((self.x, self.y), label.size, self.x_align, self.y_align)

    if (x < 0 or y < 0 or
        x + label.width > card_dims[0] or
        y + label.height > card_dims[1]):
      sys.stderr.write("Warning: Text label overflows card boundary: \"%s\"\n" % text)
      return None

    image = Image.new("RGBA", card_dims, (0,0,0,0))
    image.paste(label, (x,y), mask=label)

    return image
示例#3
0
 def set_path(self, path):
     """Set the image path."""
     img = util.rotate_image(path)
     width = img.GetWidth()
     img = util.scale_image(img, size=self.size)
     new_width = img.GetWidth()
     self.scale = 1.0 * new_width / width
     self.bmp = img.ConvertToBitmap()
     self.bitmap.SetBitmap(self.bmp)
     self.sizer.Layout()
示例#4
0
 def set_path(self, path):
     """Set the image path."""
     img = util.rotate_image(path)
     width = img.GetWidth()
     img = util.scale_image(img, size=self.size)
     new_width = img.GetWidth()
     self.scale = 1.0 * new_width / width
     self.bmp = img.ConvertToBitmap()
     self.bitmap.SetBitmap(self.bmp)
     self.sizer.Layout()
示例#5
0
def stitch(directory,
           n_x: int,
           n_y: int,
           x_size: int,
           y_size: int,
           rotation: float,
           filter: int = 0,
           unsharp=0):
    out = zeros((n_x * x_size, n_y * y_size, 3), dtype=uint8)

    # Filenames don't start at zero, picXXXX.jpg
    filenames = [
        filename for filename in os.listdir(directory)
        if filename.endswith(".jpg")
    ]
    filename_offset = int(sorted(filenames)[0][3:7])

    layout = layouter(n_x, n_y, True, 1)

    for i in range(n_x):
        for j in range(n_y):

            #a = n_x - i - 1
            a = i
            #b = n_y - j - 1
            b = j

            fname = directory + ("/pic%04i.jpg" %
                                 (layout(a, b) + filename_offset))
            #print("Reading '%s'"%fname)

            image = cv.imread(fname)

            if filter != 0:
                image = cv.GaussianBlur(image,
                                        (2 * filter + 1, 2 * filter + 1),
                                        filter)

            #if unsharp != 0:
            #    blurred = cv.GaussianBlur(image, (2*unsharp+1, 2*unsharp+1), unsharp)
            #    image = cv.subtract(image, blurred)

            image = rotate_image(image, rotation)
            centre_image = get_centre(image, x_size, y_size)

            out[i * x_size:(i + 1) * x_size,
                j * y_size:(j + 1) * y_size, :] = centre_image

    return out
示例#6
0
def test_rotate_image(image_path, item_size=(256, 256), scale=0.2, item_num=6):
    image_list = util.load_images(image_path)
    image = util.build_image(image_list, item_size, scale, item_num)
    pixels = image.reshape(-1, image.shape[2])
    unique_elements, counts_elements = np.unique(pixels, axis=0, return_counts=True)
    counts_elements = np.sort(counts_elements)[::-1]
    angles = [0, 90, 180, 270]
    while True:
        angle = angles[np.random.randint(0, len(angles))]
        rot_image = np.clip(image * 255, 0, 255).astype(np.uint8)
        rot_image = util.rotate_image(rot_image, angle)
        util.draw_text(rot_image, 'angle: %d' % angle, color=(255, 0, 0))
        cv2.imshow('image', rot_image)
        key = cv2.waitKey(1000000) & 0xFF
        if key == 27: # 'esc
            break
示例#7
0
  def render(self, card_dims, image):
    """ Generate a transparent PIL card layer with the image on it """

    # Since images aren't wrapped, we simply accept the user's scaling 
    # settings or (if they are 0), use the image's own dimensions.
    # If the image falls outside the card boundaries, we warn but allow it.

    w, h = image.size
    aspect = w/h

    if (self.width == 0 and self.height == 0):
      # No scaling, use image as-is
      pass
    else:
      scalew,scaleh = self.width, self.height

      if (scalew == 0):
        # Proportional scaling to given height
        scalew = round(scaleh * aspect)

      if (scaleh == 0):
        scaleh = round(scalew / aspect)

      image = image.resize((scalew, scaleh), Image.ANTIALIAS)


    if (self.rotation != 0):
      image = util.rotate_image(image, self.rotation)

    # Figure out where to place the top-left corner of the label
    x,y = util.alignment_to_absolute((self.x, self.y), image.size, self.x_align, self.y_align)

    if (x < 0 or y < 0 or
        x + image.width > card_dims[0] or
        y + image.height > card_dims[1]):
      sys.stderr.write("Warning: Image label overflows card boundary")

    card = Image.new("RGBA", card_dims, (0,0,0,0))
    card.paste(image, (x,y), mask=image)

    return card
示例#8
0
 def __init__(self, res, path, size=util.MAX_THUMBNAIL_SIZE):
     super(Face, self).__init__()
     self.path = path
     img = util.rotate_image(path)
     self.bmp = img.ConvertToBitmap()
     self.name = None
     if res.get('faceId'):
         self.id = res['faceId']
     if res.get('persistedFaceId'):
         self.persisted_id = res['persistedFaceId']
     if res.get('faceRectangle'):
         self.rect = Rect(res['faceRectangle'])
         self.bmp = self.bmp.GetSubBitmap(wx.Rect(
             self.rect.left,
             self.rect.top,
             self.rect.width,
             self.rect.height,
         ))
     if res.get('faceAttributes'):
         self.attr = Attribute(res['faceAttributes'])
     self.bmp = util.scale_image(
         self.bmp.ConvertToImage(),
         size=size,
     ).ConvertToBitmap()
for filepath in glob.glob(os.path.join(map_images_dir, 'D*')):
    filename = filepath.split('/')[-1]

    if test_only:
        if filename.split('.')[0] not in test_filenames:
            continue

    print(filepath)
    map_img = cv2.imread(filepath)
    original_shape = map_img.shape

    preds = []
    confs = []
    for angle in angles:
        rot_img, rot_mat, bounds = rotate_image(map_img, angle, original_shape)
        height = rot_img.shape[0]
        width = rot_img.shape[1]
        current_x = 0
        current_y = 0

        while current_y + crop_h < height:
            while current_x + crop_w < width:

                crop_img = rot_img[current_y:current_y + crop_h,
                                   current_x:current_x + crop_w]

                if do_preprocess:
                    crop_img = preprocess(crop_img, (512, 512))

                model_output = model.predict(np.array([crop_img]),
示例#10
0
			# print os.path.join(work_dir, mat_name)

			print os.path.join(data_dir, im_name)

			im = cv2.imread(os.path.join(data_dir, im_name))
			#im, translation = adjust_image_size(im, padding_amount=500)
			translation = (0, 0)
			#print(im.shape)
			
			all_boxes, all_scores, all_rotations = [], [], []
			for angle in range(0, 95, 5):
				#print("Running detection at angle: " + str(angle))
				image_center = tuple(np.array((im.shape[0],im.shape[1]))/2)
				R = cv2.getRotationMatrix2D(image_center, angle, scale=1.0)
				#rot_img = cv2.warpAffine(im, R, (im.shape[0], im.shape[1]), flags=cv2.INTER_LINEAR)
				rot_img, rot_mat, bounds = rotate_image(im, angle, im.shape)				
				
				# # Detect all object classes and regress object bounds
				timer = Timer()
				timer.tic()
				# scores, boxes = im_detect(net, im)
				scores, boxes = im_detect_sliding_crop(net, rot_img, crop_h, crop_w, step)
				
				print(np.max(scores))
				print("Boxes for angle " + str(angle) + ": " + str(boxes.shape[0]))				    
				Rinv = cv2.getRotationMatrix2D(image_center, -angle, scale=1.0)

				all_boxes.append(boxes)
				all_scores.append(scores)
				all_rotations.append( {'angle':angle, 'center':image_center, 'R':R, 'Rinv': Rinv} ) 
				    
def tbpp_raw_generate_data(map_images_dir,
                           image_paths,
                           regions,
                           batch_size,
                           prior_util,
                           encode=True,
                           do_rotate=False,
                           do_preprocess=False):
    crop_h = 512
    crop_w = 512
    step = 400
    angles = range(-90, 95, 5) if do_rotate else [0]

    inputs, targets = [], []

    mean = np.array([104, 117, 123])

    idxs = np.arange(len(image_paths))
    np.random.shuffle(idxs)
    for _, i in enumerate(idxs):
        filepath = os.path.join(map_images_dir, image_paths[i])

        map_img = cv2.imread(filepath)
        original_shape = map_img.shape

        for angle in angles:
            rot_img, rot_mat, _ = rotate_image(map_img, angle, original_shape)
            height = rot_img.shape[0]
            width = rot_img.shape[1]
            current_x = 0
            current_y = 0

            while current_y + crop_h < height:
                while current_x + crop_w < width:

                    crop_img = rot_img[current_y:current_y + crop_h,
                                       current_x:current_x + crop_w]
                    if do_preprocess:
                        crop_img = preprocess(crop_img, (512, 512))

                    crop_boxes = []
                    for region in regions:
                        # rotate to orientation when image is not rotated
                        image_center = (original_shape[1] // 2,
                                        original_shape[0] // 2)
                        rot_mat = cv2.getRotationMatrix2D(image_center,
                                                          angle,
                                                          scale=1.0)

                        # add col for rotation
                        region = np.concatenate(
                            [region, np.ones([region.shape[0], 1])], axis=1)

                        # rotate
                        transformed_points = rot_mat.dot(region.T).T

                        pt1 = [
                            int(transformed_points[0][0]),
                            int(transformed_points[0][1])
                        ]
                        pt2 = [
                            int(transformed_points[1][0]),
                            int(transformed_points[1][1])
                        ]
                        pt3 = [
                            int(transformed_points[2][0]),
                            int(transformed_points[2][1])
                        ]
                        pt4 = [
                            int(transformed_points[3][0]),
                            int(transformed_points[3][1])
                        ]

                        region = np.array([pt1, pt2, pt3, pt4])

                        xmin = np.min(region[:, 0])
                        xmax = np.max(region[:, 0])
                        ymin = np.min(region[:, 1])
                        ymax = np.max(region[:, 1])

                        if xmin > current_x and xmax < (
                                current_x + crop_w) and ymin < (
                                    current_y + crop_h) and ymax > current_y:
                            crop_xmin = xmin - current_x
                            crop_ymin = ymin - current_y
                            crop_xmax = xmax - current_x
                            crop_ymax = ymax - current_y

                            crop_boxes.append([
                                crop_xmin, crop_ymax, crop_xmax, crop_ymax,
                                crop_xmax, crop_ymin, crop_xmin, crop_ymin
                            ])

                    crop_boxes = np.array(crop_boxes)
                    crop_boxes[:, 0::2] /= crop_img.shape[1]
                    crop_boxes[:, 1::2] /= crop_img.shape[0]

                    # append classes
                    crop_boxes = np.concatenate(
                        [crop_boxes,
                         np.ones([crop_boxes.shape[0], 1])],
                        axis=1)

                    crop_img -= mean[np.newaxis, np.newaxis, :]
                    #img = img / 25.6

                    inputs.append(crop_img)
                    targets.append(crop_boxes)

                    #if len(targets) == batch_size or j == len(idxs)-1: # last batch in epoch can be smaller then batch_size
                    if len(targets) == batch_size:
                        if encode:
                            targets = [prior_util.encode(y) for y in targets]
                            targets = np.array(targets, dtype=np.float32)
                        tmp_inputs = np.array(inputs, dtype=np.float32)
                        tmp_targets = np.array(targets, dtype=np.float32)
                        inputs, targets = [], []
                        yield tmp_inputs, tmp_targets

                    current_x += step

            current_x = 0
            current_y += step

        print('NEW epoch')
    print('EXIT generator')