Example #1
0
    def test_priorbb(self):
        prior_layer_cfg = [
            # Example:
            {
                'layer_name': 'Conv5',
                'feature_dim_hw': (38, 38),
                'bbox_size': (30, 30),
                'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, 1.0)
            },
            {
                'layer_name': 'Conv11',
                'feature_dim_hw': (19, 19),
                'bbox_size': (60, 60),
                'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, 1.0)
            },
            {
                'layer_name': 'Conv14_2',
                'feature_dim_hw': (10, 10),
                'bbox_size': (111, 111),
                'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, 1.0)
            },
            {
                'layer_name': 'Conv15_2',
                'feature_dim_hw': (5, 5),
                'bbox_size': (162, 162),
                'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, 1.0)
            },
            {
                'layer_name': 'Conv16_2',
                'feature_dim_hw': (3, 3),
                'bbox_size': (213, 213),
                'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, 1.0)
            },
            {
                'layer_name': 'Conv17_2',
                'feature_dim_hw': (1, 1),
                'bbox_size': (264, 264),
                'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, 1.0)
            }
        ]
        pp = generate_prior_bboxes(prior_layer_cfg)

        print(pp[0:1], pp[39:40])
        temp = iou(pp[0:6], pp[0:1])
        print('iou', temp)
        gt_label = torch.tensor([1])
        # print(gt_label.dim[0])
        print('matching', match_priors(pp[0:38], pp[38:39], gt_label, 0.5))
        np.set_printoptions(threshold=np.inf)
        size_bounds = [0.2, 0.9]
        img_shape = [300, 300]
        # list = self.ssd_size_bounds_to_values(size_bounds,6,img_shape)
        # print(list)
        #prior_bbox = self.ssd_anchor_one_layer((300,300),(38,38),(30,60), [2, .5, 3, 1. / 3], 3)
        #print(prior_bbox)

        self.assertEqual('foo'.upper(), 'FOO')
Example #2
0
    def __init__(self, dataset_list):
        self.dataset_list = dataset_list

        # TODO: implement prior bounding box
        self.prior_bboxes = bbox_helper.generate_prior_bboxes(
            prior_layer_cfg=bbox_helper.prior_layer_cfg)

        # Pre-process parameters:
        self.mean = np.asarray((127, 127, 127))
        self.std = 128.0
    def __init__(self, dataset_list, train, show):
        self.dataset_list = dataset_list

        # prior bounding box
        self.prior_bboxes = generate_prior_bboxes()

        # Pre-process parameters:
        #  Normalize: (I-self.mean)/self.std
        self.mean = np.asarray((127, 127, 127))
        self.std = 128.0
        self.train = train
        self.show = show
Example #4
0
    def __init__(self, dataset_list, n_augmented=0, debug=False):
        self.dataset_list = dataset_list

        # TODO: implement prior bounding box
        prior_layer_cfg = [{
            'layer_name':
            'Conv6',
            'feature_dim_hw': (19, 19),
            'bbox_size': (30, 30),
            'aspect_ratio': (1.0, 1 / 2, 1 / 4, 2.0, 4.0, np.sqrt(1.16))
        }, {
            'layer_name':
            'Conv12',
            'feature_dim_hw': (10, 10),
            'bbox_size': (78, 78),
            'aspect_ratio': (1.0, 1 / 2, 1 / 4, 2.0, 4.0, np.sqrt(1.16))
        }, {
            'layer_name':
            'Conv8_2',
            'feature_dim_hw': (5, 5),
            'bbox_size': (126, 126),
            'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, np.sqrt(1.16))
        }, {
            'layer_name':
            'Conv9_2',
            'feature_dim_hw': (3, 3),
            'bbox_size': (174, 174),
            'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, np.sqrt(1.16))
        }, {
            'layer_name':
            'Conv10_2',
            'feature_dim_hw': (2, 2),
            'bbox_size': (222, 222),
            'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, np.sqrt(1.16))
        }, {
            'layer_name':
            'Conv11_2',
            'feature_dim_hw': (1, 1),
            'bbox_size': (270, 270),
            'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, np.sqrt(1.16))
        }]

        self.prior_bboxes = generate_prior_bboxes(prior_layer_cfg)

        # Pre-process parameters:
        #  Normalize: (I-self.mean)/self.std
        self.mean = np.asarray((127, 127, 127))
        self.std = 128.0
        self.n_augmented = n_augmented
        self.net_size = (300, 300)
        self.debug = debug
    def __init__(self, dataset_list, num_worker):
        self.dataset_list = dataset_list
        self.num_prior_bbox = len(self.bounding_box_ratios) + 1
        self.prepared_index = 0
        self.num_worker = num_worker

        # Initialize variables.
        self.imgWidth, self.imgHeight, self.crop_coordinates, self.locations = None, None, None, None
        self.prior_layer_cfg = [{
            'feature_dim_hw': (75, 75),
            'bbox_size': (7.5, 7.5),
            'aspect_ratio': self.bounding_box_ratios
        }, {
            'feature_dim_hw': (38, 38),
            'bbox_size': (40.71, 40.71),
            'aspect_ratio': self.bounding_box_ratios
        }, {
            'feature_dim_hw': (19, 19),
            'bbox_size': (73.93, 73.93),
            'aspect_ratio': self.bounding_box_ratios
        }, {
            'feature_dim_hw': (10, 10),
            'bbox_size': (107.14, 107.14),
            'aspect_ratio': self.bounding_box_ratios
        }, {
            'feature_dim_hw': (5, 5),
            'bbox_size': (140.36, 140.36),
            'aspect_ratio': self.bounding_box_ratios
        }, {
            'feature_dim_hw': (3, 3),
            'bbox_size': (173.57, 173.57),
            'aspect_ratio': self.bounding_box_ratios
        }, {
            'feature_dim_hw': (2, 2),
            'bbox_size': (206.79, 206.79),
            'aspect_ratio': self.bounding_box_ratios
        }, {
            'feature_dim_hw': (1, 1),
            'bbox_size': (240, 240),
            'aspect_ratio': self.bounding_box_ratios
        }]
        self.prior_bboxes = helper.generate_prior_bboxes(
            prior_layer_cfg=self.prior_layer_cfg)  # 60552.

        # Pre-process parameters, normalize: (I-self.mean)/self.std.
        self.mean = torch.Tensor([127, 127, 127])
        self.std = 128.0
Example #6
0
    def test_bbox2loc(self):
        prior_layer_cfg = [
            # Example:
            {
                'layer_name': 'Conv5',
                'feature_dim_hw': (38, 38),
                'bbox_size': (30, 30),
                'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, 1.0)
            },
            {
                'layer_name': 'Conv11',
                'feature_dim_hw': (19, 19),
                'bbox_size': (60, 60),
                'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, 1.0)
            },
            {
                'layer_name': 'Conv14_2',
                'feature_dim_hw': (10, 10),
                'bbox_size': (111, 111),
                'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, 1.0)
            },
            {
                'layer_name': 'Conv15_2',
                'feature_dim_hw': (5, 5),
                'bbox_size': (162, 162),
                'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, 1.0)
            },
            {
                'layer_name': 'Conv16_2',
                'feature_dim_hw': (3, 3),
                'bbox_size': (213, 213),
                'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, 1.0)
            },
            {
                'layer_name': 'Conv17_2',
                'feature_dim_hw': (1, 1),
                'bbox_size': (264, 264),
                'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, 1.0)
            }
        ]
        pp = generate_prior_bboxes(prior_layer_cfg)

        #print(pp[0:1], pp[39:40])
        print(bbox2loc(pp[0:5], pp[0:1]))

        self.assertEqual('foo'.upper(), 'FOO')
Example #7
0
 def test_corner2(self):
     prior_layer_cfg = [{
         'layer_name': 'Conv5',
         'feature_dim_hw': (19, 19),
         'bbox_size': (60, 60),
         'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, '1t')
     }, {
         'layer_name': 'Conv11',
         'feature_dim_hw': (10, 10),
         'bbox_size': (105, 105),
         'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, '1t')
     }, {
         'layer_name': 'Conv14_2',
         'feature_dim_hw': (5, 5),
         'bbox_size': (150, 150),
         'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, '1t')
     }, {
         'layer_name': 'Conv15_2',
         'feature_dim_hw': (3, 3),
         'bbox_size': (195, 195),
         'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, '1t')
     }, {
         'layer_name': 'Conv16_2',
         'feature_dim_hw': (2, 2),
         'bbox_size': (240, 240),
         'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, '1t')
     }, {
         'layer_name': 'Conv17_2',
         'feature_dim_hw': (1, 1),
         'bbox_size': (285, 285),
         'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, '1t')
     }]
     pp = generate_prior_bboxes(prior_layer_cfg)
     print('original', pp[0])
     test = center2corner(pp[0])
     print('corner', test)
     test = corner2center(test)
     print('center', test)
     torch.set_default_tensor_type('torch.cuda.FloatTensor')
     print('Pytorch CUDA Enabled?:', torch.cuda.is_available())
     b = 0.5 * torch.eye(3)
     b_gpu = b.cuda()
     print(b_gpu)
    def __init__(self, img_dir_list, json_dir_list, transform, mode = 'train' ,augmentation_ratio = 50):
        self.transform = transform
        self.mode = mode
        # Implement prior bounding box
        self.prior_bboxes = generate_prior_bboxes(prior_layer_cfg=
                                                  [{'layer_name': '1', 'feature_dim_hw': (19, 19), 'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, 1.0)},
                                                   {'layer_name': '2', 'feature_dim_hw': (10, 10), 'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, 1.0)},
                                                   {'layer_name': '3', 'feature_dim_hw': (5, 5),'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, 1.0)},
                                                   {'layer_name': '4', 'feature_dim_hw': (3, 3),'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, 1.0)},
                                                   {'layer_name': '5', 'feature_dim_hw': (3, 3),'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, 1.0)},
                                                   {'layer_name': '6', 'feature_dim_hw': (1, 1),'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, 1.0)}
                                                   ])

        # Pre-process parameters:
        # Normalize: (I-self.mean)/self.std
        self.mean = np.asarray((127, 127, 127)).reshape(3, 1, 1)
        self.img_dir_list = img_dir_list
        self.json_dir_list = json_dir_list
        self.original_len = len(self.img_dir_list)
        self.std = 128.0
    def __init__(self, dataset_list):
        self.dataset_list = dataset_list
        prior_layer_cfg = [
            {'layer_name': 'Conv5', 'feature_dim_hw': (19, 19), 'bbox_size': (60, 60),
             'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, '1t')},
            {'layer_name': 'Conv11', 'feature_dim_hw': (10, 10), 'bbox_size': (105, 105),
             'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, '1t')},
            {'layer_name': 'Conv14_2', 'feature_dim_hw': (5, 5), 'bbox_size': (150, 150),
             'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, '1t')},
            {'layer_name': 'Conv15_2', 'feature_dim_hw': (3, 3), 'bbox_size': (195, 195),
             'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, '1t')},
            {'layer_name': 'Conv16_2', 'feature_dim_hw': (2, 2), 'bbox_size': (240, 240),
             'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, '1t')},
            {'layer_name': 'Conv17_2', 'feature_dim_hw': (1, 1), 'bbox_size': (285, 285),
             'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, '1t')}
        ]
        self.prior_bboxes = generate_prior_bboxes(prior_layer_cfg)

        # Pre-process parameters:
        #  Normalize: (I-self.mean)/self.std
        self.mean = np.asarray((127, 127, 127))
        self.std = 128.0
Example #10
0
    def __init__(self, dataset_list):
        self.dataset_list = dataset_list
        self.image_size = 300

        # TODO: implement prior bounding box
        """
            Generate prior bounding boxes on different feature map level. This function used in 'cityscape_dataset.py'
            Use VGG_SSD 300x300 as example:
            Feature map dimension for each output layers:
               Layer     | Map Dim (h, w)  | Single bbox size that covers in the original image
            1. Conv6     | (38x38)         | (30x30) (unit. pixels)
            1. Conv11    | (19x19)        | (60x60) (unit. pixels)
            2. Conv13    | (10x10)        | (113x113)
            3. Conv14_2  | (5x5)          | (165x165)
            4. Conv15_2  | (3x3)          | (218x218)
            5. Conv16_2  | (1x1)          | (270x270)
            6. Conv17_2  | (1x1)          | (264x264)
            NOTE: The setting may be different using MobileNet v3, you have to set your own implementation.
            Tip: see the reference: 'Choosing scales and aspect ratios for default boxes' in original paper page 5.
            :param prior_layer_cfg: configuration for each feature layer, see the 'example_prior_layer_cfg' in the following.
            :return prior bounding boxes with form of (cx, cy, w, h), where the value range are from 0 to 1, dim (1, num_priors, 4)
            """
        prior_layer_cfg = [{
            'layer_name': 'Conv6',
            'feature_dim_hw': (38, 38),
            'bbox_size': (30, 30),
            'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0)
        }, {
            'layer_name': 'Conv11',
            'feature_dim_hw': (19, 19),
            'bbox_size': (60, 60),
            'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0)
        }, {
            'layer_name': 'Conv13',
            'feature_dim_hw': (10, 10),
            'bbox_size': (102, 102),
            'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0)
        }, {
            'layer_name': 'Conv14_2',
            'feature_dim_hw': (5, 5),
            'bbox_size': (144, 144),
            'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0)
        }, {
            'layer_name': 'Conv15_2',
            'feature_dim_hw': (3, 3),
            'bbox_size': (186, 186),
            'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0)
        }, {
            'layer_name': 'Conv16_2',
            'feature_dim_hw': (2, 2),
            'bbox_size': (228, 228),
            'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0)
        }, {
            'layer_name': 'Conv17_2',
            'feature_dim_hw': (1, 1),
            'bbox_size': (270, 270),
            'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0)
        }]
        self.prior_bound_boxes = generate_prior_bboxes(prior_layer_cfg)

        # Pre-process parameters:
        #  Normalize: (I-self.mean)/self.std
        self.mean = np.asarray((127, 127, 127))
        self.std = 128.0
Example #11
0
        'layer_name': 'Conv15_2',
        'feature_dim_hw': (3, 3),
        'bbox_size': (186, 186),
        'aspect_ratio': [2]
    }, {
        'layer_name': 'Conv16_2',
        'feature_dim_hw': (2, 2),
        'bbox_size': (228, 228),
        'aspect_ratio': [2]
    }, {
        'layer_name': 'Conv16_2',
        'feature_dim_hw': (1, 1),
        'bbox_size': (270, 270),
        'aspect_ratio': [2]
    }]
    prior_bboxes = bbox_helper.generate_prior_bboxes(prior_layer_cfg)
    prior_bboxes = prior_bboxes.unsqueeze(0).cpu()

    test_img = np.array(test_img)
    test_img = np.subtract(test_img, [127, 127, 127])
    test_img = np.divide(test_img, 128)
    test_img = test_img.reshape(
        (test_img.shape[2], test_img.shape[0], test_img.shape[1]))
    test_img = torch.Tensor(test_img)
    test_img = test_img.unsqueeze(0).cpu()

    images = Variable(test_img)  # Use Variable(*) to allow gradient flow\n",
    conf_preds, loc_preds = net.forward(images)  # Forward once\n",
    print(conf_preds.shape)
    print(loc_preds.shape)
Example #12
0
    def test_random2(self):
        torch.set_default_tensor_type('torch.cuda.FloatTensor')
        torch.set_printoptions(precision=10)
        prior_layer_cfg = [{
            'layer_name': 'Conv5',
            'feature_dim_hw': (19, 19),
            'bbox_size': (60, 60),
            'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, '1t')
        }, {
            'layer_name': 'Conv11',
            'feature_dim_hw': (10, 10),
            'bbox_size': (105, 105),
            'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, '1t')
        }, {
            'layer_name': 'Conv14_2',
            'feature_dim_hw': (5, 5),
            'bbox_size': (150, 150),
            'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, '1t')
        }, {
            'layer_name': 'Conv15_2',
            'feature_dim_hw': (3, 3),
            'bbox_size': (195, 195),
            'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, '1t')
        }, {
            'layer_name': 'Conv16_2',
            'feature_dim_hw': (2, 2),
            'bbox_size': (240, 240),
            'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, '1t')
        }, {
            'layer_name': 'Conv17_2',
            'feature_dim_hw': (1, 1),
            'bbox_size': (285, 285),
            'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, '1t')
        }]
        pp = generate_prior_bboxes(prior_layer_cfg)

        # test_list = load_data('../Debugimage', '../Debuglabel')
        test_list = load_data('../cityscapes_samples',
                              '../cityscapes_samples_labels')
        #print(test_list)

        test_dataset = CityScapeDataset(test_list)
        test_data_loader = torch.utils.data.DataLoader(test_dataset,
                                                       batch_size=1,
                                                       shuffle=True,
                                                       num_workers=0)
        lfw_dataset_dir = '../'
        test_net = ssd_net.SSD(3)
        test_net_state = torch.load(
            os.path.join(lfw_dataset_dir, 'ssd_net.pth'))
        test_net.load_state_dict(test_net_state)
        idx, (img, bbox, label) = next(enumerate(test_data_loader))
        pred_cof, pred_loc = test_net.forward(img)
        print(pred_loc.shape)
        import torch.nn.functional as F
        pred_loc = pred_loc.detach()
        bbox_center = loc2bbox(pred_loc[0], pp)
        pred_cof = F.softmax(pred_cof[0])
        ind = np.where(pred_cof > 0.7)
        # pred_cof = F.softmax(pred_cof[ind[0]])
        bbox_center = bbox_center[ind[0]]
        print(ind, pred_cof)
        img = img[0].cpu().numpy()
        img = img.reshape((300, 300, 3))
        img = (img * 128 + np.asarray([[127, 127, 127]])) / 255
        fig, ax = plt.subplots(1)
        imageB_array = resize(img, (600, 1200), anti_aliasing=True)
        ax.imshow(imageB_array, cmap='brg')

        bbox_corner = center2corner(bbox_center)

        for i in range(0, bbox_corner.shape[0]):
            # print('i point', bbox_corner[i, 0]*600, bbox_corner[i, 1]*300,(bbox_corner[i, 2]-bbox_corner[i, 0])*600, (bbox_corner[i, 3]-bbox_corner[i, 1])*300)
            rect = patches.Rectangle(
                (bbox_corner[i, 0] * 1200, bbox_corner[i, 1] * 600),
                (bbox_corner[i, 2] - bbox_corner[i, 0]) * 1200,
                (bbox_corner[i, 3] - bbox_corner[i, 1]) * 600,
                linewidth=2,
                edgecolor='r',
                facecolor='none')  # Create a Rectangle patch
            ax.add_patch(rect)  # Add the patch to the Axes
        plt.show()
Example #13
0
    def test_dataLoad(self):
        torch.set_default_tensor_type('torch.cuda.FloatTensor')
        torch.set_printoptions(precision=10)
        prior_layer_cfg = [{
            'layer_name': 'Conv5',
            'feature_dim_hw': (19, 19),
            'bbox_size': (60, 60),
            'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, '1t')
        }, {
            'layer_name': 'Conv11',
            'feature_dim_hw': (10, 10),
            'bbox_size': (105, 105),
            'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, '1t')
        }, {
            'layer_name': 'Conv14_2',
            'feature_dim_hw': (5, 5),
            'bbox_size': (150, 150),
            'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, '1t')
        }, {
            'layer_name': 'Conv15_2',
            'feature_dim_hw': (3, 3),
            'bbox_size': (195, 195),
            'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, '1t')
        }, {
            'layer_name': 'Conv16_2',
            'feature_dim_hw': (2, 2),
            'bbox_size': (240, 240),
            'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, '1t')
        }, {
            'layer_name': 'Conv17_2',
            'feature_dim_hw': (1, 1),
            'bbox_size': (285, 285),
            'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, '1t')
        }]
        pp = generate_prior_bboxes(prior_layer_cfg)

        #test_list = load_data('../Debugimage', '../Debuglabel')
        test_list = load_data('../cityscapes_samples',
                              '../cityscapes_samples_labels')
        print(test_list)
        gt_bbox = np.asarray(test_list[0]['label'][1]) * [
            300 / 2048, 300 / 1024, 300 / 2048, 300 / 1024
        ]
        print('ground truth from file:', test_list[0]['label'][0])
        test_dataset = CityScapeDataset(test_list)
        test_data_loader = torch.utils.data.DataLoader(test_dataset,
                                                       batch_size=1,
                                                       shuffle=True,
                                                       num_workers=0)
        idx, (img, bbox, label) = next(enumerate(test_data_loader))
        bbox = bbox[0]
        label = label[0]
        print(bbox.shape, label.shape)

        print('matched label', label[np.where(label > 0)], np.where(label > 0),
              label.shape)
        print('first bbox from data_set:', bbox[0], label[0])
        bbox_center = loc2bbox(bbox, pp)
        bbox_corner = center2corner(bbox_center)
        img = img[0].cpu().numpy()
        img = img.reshape((300, 300, 3))
        img = (img * 128 + np.asarray([[127, 127, 127]])) / 255
        # for i in range(0, bbox.shape[0]):
        #     cv2.rectangle(img, (bbox[i,0], bbox[i,1]), (bbox[i,2], bbox[i,3]), (0, 255, 0), 3)
        #cv2.imshow("img", img)
        # Create figure and axes
        fig, ax = plt.subplots(1)
        imageB_array = resize(img, (300, 300), anti_aliasing=True)
        ax.imshow(imageB_array, cmap='brg')
        bbox_corner = bbox_corner.cpu().numpy()
        bbox_corner = bbox_corner[np.where(label > 0)]
        temp_lab = label[np.where(label > 0)]
        print('matched bbox ======', bbox_corner)
        pp = center2corner(pp)
        pp = pp[np.where(label > 0)]
        print('864 tensor: ', pp)
        for i in range(0, bbox_corner.shape[0]):
            if temp_lab[i] == 1:
                # print('i point', bbox_corner[i, 0]*600, bbox_corner[i, 1]*300,(bbox_corner[i, 2]-bbox_corner[i, 0])*600, (bbox_corner[i, 3]-bbox_corner[i, 1])*300)
                rect = patches.Rectangle(
                    (bbox_corner[i, 0] * 300, bbox_corner[i, 1] * 300),
                    (bbox_corner[i, 2] - bbox_corner[i, 0]) * 300,
                    (bbox_corner[i, 3] - bbox_corner[i, 1]) * 300,
                    linewidth=2,
                    edgecolor='r',
                    facecolor='none')  # Create a Rectangle patch
                ax.add_patch(rect)  # Add the patch to the Axes
            else:
                rect = patches.Rectangle(
                    (bbox_corner[i, 0] * 1200, bbox_corner[i, 1] * 600),
                    (bbox_corner[i, 2] - bbox_corner[i, 0]) * 1200,
                    (bbox_corner[i, 3] - bbox_corner[i, 1]) * 600,
                    linewidth=2,
                    edgecolor='y',
                    facecolor='none')  # Create a Rectangle patch
                ax.add_patch(rect)  # Add the patch to the Axes
        for i in range(0, pp.shape[0]):
            rect = patches.Rectangle(
                (pp[i, 0] * 300, pp[i, 1] * 300), (pp[i, 2] - pp[i, 0]) * 300,
                (pp[i, 3] - pp[i, 1]) * 300,
                linewidth=1,
                edgecolor='blue',
                facecolor='none')  # Create a Rectangle patch
            ax.add_patch(rect)  # Add the patch to the Axes

        # for i in range(0, gt_bbox.shape[0]):
        #     rect = patches.Rectangle((gt_bbox[i][0], gt_bbox[i][1]),
        #                              (gt_bbox[i][2] - gt_bbox[i][0]),
        #                              (gt_bbox[i][3] - gt_bbox[i][1]), linewidth=1, edgecolor='g',
        #                              facecolor='none')  # Create a Rectangle patch
        #     ax.add_patch(rect)  # Add the patch to the Axes

        plt.show()
Example #14
0
        plt.text(left, top + height, texts[classes[i]],
                        horizontalalignment='left',
                        fontsize=20, color=colors[classes[i]],
                        verticalalignment='top')
    plt.show()



path_to_trained_model = 'ssd_net.pth'

img_file_path = sys.argv[1] # the index should be 1, 0 is the 'eval.py'
img = Image.open(img_file_path)
img_norm = (img - IMG_MEAN) / IMG_STD
img_np = np.asarray([img_norm], dtype="float32")
img_tensor = torch.from_numpy(img_np)
prior_bboxes = generate_prior_bboxes(prior_layer_cfg = prior_layer_cfg)

if WILL_TEST:

    if USE_GPU:
        test_net_state = torch.load(path_to_trained_model)
    else:
        test_net_state = torch.load(path_to_trained_model, map_location='cpu')
    test_net = SSD(num_classes=3)
    test_net.load_state_dict(test_net_state)
    test_net.eval()

    test_image_permuted = img_tensor.permute(0, 3, 1, 2)
    test_image_permuted = Variable(test_image_permuted.float())

    test_conf_preds, test_loc_preds = test_net.forward(test_image_permuted)
Example #15
0
def main():
    torch.set_default_tensor_type('torch.cuda.FloatTensor')

    prior_layer_cfg = [{
        'layer_name': 'Conv5',
        'feature_dim_hw': (19, 19),
        'bbox_size': (60, 60),
        'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, '1t')
    }, {
        'layer_name': 'Conv11',
        'feature_dim_hw': (10, 10),
        'bbox_size': (105, 105),
        'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, '1t')
    }, {
        'layer_name': 'Conv14_2',
        'feature_dim_hw': (5, 5),
        'bbox_size': (150, 150),
        'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, '1t')
    }, {
        'layer_name': 'Conv15_2',
        'feature_dim_hw': (3, 3),
        'bbox_size': (195, 195),
        'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, '1t')
    }, {
        'layer_name': 'Conv16_2',
        'feature_dim_hw': (2, 2),
        'bbox_size': (240, 240),
        'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, '1t')
    }, {
        'layer_name': 'Conv17_2',
        'feature_dim_hw': (1, 1),
        'bbox_size': (285, 285),
        'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, '1t')
    }]
    prior_bboxes = generate_prior_bboxes(prior_layer_cfg)

    # loading the test image
    img_file_path = sys.argv[1]
    # img_file_path = 'image.png'
    img = Image.open(img_file_path)
    img = img.resize((300, 300))
    plot_img = img.copy()
    img_array = np.asarray(img)[:, :, :3]
    mean = np.asarray((127, 127, 127))
    std = 128.0
    img_array = (img_array - mean) / std
    h, w, c = img_array.shape[0], img_array.shape[1], img_array.shape[2]
    img_tensor = torch.Tensor(img_array)
    test_input = img_tensor.view(1, c, h, w)

    # # loading test input to run test on
    # test_data_loader = torch.utils.data.DataLoader(test_input,
    #                                                 batch_size=1,
    #                                                 shuffle=True,
    #                                                 num_workers=0)
    # idx, (img) = next(enumerate(test_data_loader))
    # # Setting model to evaluate mode
    net = SSD(2)
    test_net_state = torch.load('ssd_net.pth')
    net.load_state_dict(test_net_state)
    # net.eval()
    net.cuda()
    # Forward
    test_input = Variable(test_input.cuda())
    test_cof, test_loc = net.forward(test_input)

    test_loc = test_loc.detach()
    test_loc_clone = test_loc.clone()

    # normalizing the loss to add up to 1 (for probability)
    test_cof_score = F.softmax(test_cof[0], dim=1)
    # print(test_cof_score.shape)
    # print(test_cof_score)

    # running NMS
    sel_idx = nms_bbox1(test_loc_clone[0],
                        prior_bboxes,
                        test_cof_score.detach(),
                        overlap_threshold=0.5,
                        prob_threshold=0.24)

    test_loc = loc2bbox(test_loc[0], prior_bboxes)
    test_loc = center2corner(test_loc)

    sel_bboxes = test_loc[sel_idx]

    # plotting the output
    plot_output(plot_img, sel_bboxes.cpu().detach().numpy())