Exemplo n.º 1
0
 def perturb_frame(frame,params):
     dims = frame.image.get_hw()
     rand_affine = RandomPerturber.generate_random_affine(dims/2,dims,params)
     perturbed_frame = Frame(frame.image_path)
     perturbed_frame.image = rand_affine.apply_to_image(frame.image,dims)
     for i,obj in enumerate(frame.objects):
         # filter out completely out of bound objects
         perturbed_obj_box = rand_affine.apply_to_box(obj.box)
         perturbed_polygons = rand_affine.apply_to_polygons(obj.polygons)
         if Box.intersection(perturbed_obj_box,perturbed_frame.image.get_bounding_box()) is not None:
             obj_copy = copy.deepcopy(obj)
             obj_copy.box = perturbed_obj_box
             obj_copy.polygons = perturbed_polygons
             perturbed_frame.objects.append(obj_copy)
     return perturbed_frame
Exemplo n.º 2
0
def apply_affine_to_frame(frame, affine, output_size):
    perturbed_frame = Frame(frame.image_path)
    perturbed_frame.image = affine.apply_to_image(frame.image, output_size)
    for i, obj in enumerate(frame.objects):
        # filter out completely out of bound objects
        perturbed_obj_box = affine.apply_to_box(obj.box)
        perturbed_polygons = affine.apply_to_polygons(obj.polygons)
        if Box.intersection(
                perturbed_obj_box,
                perturbed_frame.image.get_bounding_box()) is not None:
            obj_copy = copy.deepcopy(obj)
            obj_copy.box = perturbed_obj_box
            obj_copy.polygons = perturbed_polygons
            perturbed_frame.objects.append(obj_copy)
    return perturbed_frame
Exemplo n.º 3
0
 def find_negative_crop(self, frame, objects):
     # pick a random crop, check that it does not overlap with an existing target
     # TODO, this is inefficient, fix this algorithm later
     frame_size = frame.image.get_wh()
     max_attempts = 10
     for i in range(0, max_attempts):
         randcx = random.randrange(self.crop_size[0] / 2,
                                   frame_size[0] - self.crop_size[0] / 2)
         randcy = random.randrange(self.crop_size[1] / 2,
                                   frame_size[1] - self.crop_size[1] / 2)
         new_box = Box(randcx - self.crop_size[0] / 2,
                       randcy - self.crop_size[1] / 2,
                       randcx + self.crop_size[0] / 2,
                       randcy + self.crop_size[1] / 2)
         box_found = all(
             Box.intersection(x.box, new_box) is None for x in objects)
         if box_found:
             return new_box
     return None
Exemplo n.º 4
0
    def load_train(self):
        frame1, frame2, neg_box, pos_box, anchor_box = None, None, None, None, None
        # TODO, this should probably break if never find anything for a while
        while neg_box is None:
            indices = random.sample(self.frame_ids, 2)
            frame1, frame2 = [self.source[x] for x in indices]
            frame1_objs = filter(lambda x: x.obj_type in self.obj_types,
                                 frame1.get_objects())
            frame2_objs = filter(lambda x: x.obj_type in self.obj_types,
                                 frame2.get_objects())
            # get random pos boxes
            pos_box = random.choice(frame1_objs).box
            anchor_box = random.choice(frame2_objs).box

            # find random neg crop
            neg_box = self.find_negative_crop(frame1, frame1_objs)

        perturbed_pos_box = RandomPerturber.perturb_crop_box(
            pos_box, self.perturbations)
        affine_crop0 = crop_image_resize(frame1.image, perturbed_pos_box,
                                         self.crop_size)
        pos_crop = affine_crop0.apply_to_image(frame1.image, self.crop_size)

        affine_crop1 = crop_image_resize(frame2.image, anchor_box,
                                         self.anchor_size)
        anchor_crop = affine_crop1.apply_to_image(frame2.image,
                                                  self.anchor_size)

        affine_crop2 = crop_image_resize(frame1.image, neg_box, self.crop_size)
        neg_crop = affine_crop2.apply_to_image(frame1.image, self.crop_size)
        # neg_crop.visualize(display=True,title='neg')

        # now find all the boxes that intersect with the perturbed_pos_box
        intersected_boxes = []
        for obj in filter(lambda x: x.obj_type in self.obj_types,
                          frame1.get_objects()):
            if Box.intersection(obj.box, perturbed_pos_box) is not None:
                intersected_boxes.append(obj.box)

        intersected_boxes = list(
            map(lambda x: affine_crop0.apply_to_box(x), intersected_boxes))
        # test display
        # disp_frame = Frame.from_image_and_objects(pos_crop,[Object(box_crop)])
        # disp_frame.visualize(display=True,title='pos frame')
        # pos_crop.visualize(display=True,title='pos crop')

        pos = torch.Tensor(
            pos_crop.to_order_and_class(
                Ordering.CHW, ValueClass.FLOAT01).get_data().astype(float))
        neg = torch.Tensor(
            neg_crop.to_order_and_class(
                Ordering.CHW, ValueClass.FLOAT01).get_data().astype(float))
        anchor = torch.Tensor(
            anchor_crop.to_order_and_class(
                Ordering.CHW, ValueClass.FLOAT01).get_data().astype(float))

        # pos_map = generate_response_map_from_boxes(pos_crop.get_hw(),intersected_boxes)
        # PTImage.from_2d_numpy(pos_map).visualize(display=True,title='pos frame')
        pos_map = torch.Tensor(
            generate_response_map_from_boxes(pos_crop.get_hw(),
                                             intersected_boxes))
        neg_map = torch.Tensor(
            generate_response_map_from_boxes(pos_crop.get_hw()))

        data = [pos, neg, anchor]
        target = [pos_map, neg_map, anchor]
        return TripletDetectionSample(data, target)