def main():
    torch.set_default_tensor_type('torch.cuda.FloatTensor')
    torch.multiprocessing.set_start_method('spawn')
    data_list = load_data(cityscape_img_dir, cityscape_label_dir)
    random.shuffle(data_list)
    num_total_items = len(data_list)
    net = SSD(5)

    # Training set, ratio: 80%
    num_train_sets = 0.8 * num_total_items
    train_set_list = data_list[:int(num_train_sets)]
    validation_set_list = data_list[int(num_train_sets):]

    # Create dataloaders for training and validation
    train_dataset = CityScapeDataset(train_set_list)
    train_data_loader = torch.utils.data.DataLoader(train_dataset,
                                                    batch_size=8,
                                                    shuffle=True,
                                                    num_workers=0)
    print('Total training items',
          len(train_dataset), ', Total training mini-batches in one epoch:',
          len(train_data_loader))

    validation_dataset = CityScapeDataset(validation_set_list)
    validation_data_loader = torch.utils.data.DataLoader(validation_dataset,
                                                         batch_size=8,
                                                         shuffle=True,
                                                         num_workers=0)
    print('Total validation items:', len(validation_dataset))
    if Tuning:
        net_state = torch.load(os.path.join(pth_path, 'ssd_net.pth'))
        print('Loading trained model: ', os.path.join(pth_path, 'ssd_net.pth'))
        net.load_state_dict(net_state)
    train(net, train_data_loader, validation_data_loader)
Exemple #2
0
    def test_random2(self):
        torch.set_default_tensor_type('torch.cuda.FloatTensor')
        torch.set_printoptions(precision=10)
        prior_layer_cfg = [{
            'layer_name': 'Conv5',
            'feature_dim_hw': (19, 19),
            'bbox_size': (60, 60),
            'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, '1t')
        }, {
            'layer_name': 'Conv11',
            'feature_dim_hw': (10, 10),
            'bbox_size': (105, 105),
            'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, '1t')
        }, {
            'layer_name': 'Conv14_2',
            'feature_dim_hw': (5, 5),
            'bbox_size': (150, 150),
            'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, '1t')
        }, {
            'layer_name': 'Conv15_2',
            'feature_dim_hw': (3, 3),
            'bbox_size': (195, 195),
            'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, '1t')
        }, {
            'layer_name': 'Conv16_2',
            'feature_dim_hw': (2, 2),
            'bbox_size': (240, 240),
            'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, '1t')
        }, {
            'layer_name': 'Conv17_2',
            'feature_dim_hw': (1, 1),
            'bbox_size': (285, 285),
            'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, '1t')
        }]
        pp = generate_prior_bboxes(prior_layer_cfg)

        # test_list = load_data('../Debugimage', '../Debuglabel')
        test_list = load_data('../cityscapes_samples',
                              '../cityscapes_samples_labels')
        #print(test_list)

        test_dataset = CityScapeDataset(test_list)
        test_data_loader = torch.utils.data.DataLoader(test_dataset,
                                                       batch_size=1,
                                                       shuffle=True,
                                                       num_workers=0)
        lfw_dataset_dir = '../'
        test_net = ssd_net.SSD(3)
        test_net_state = torch.load(
            os.path.join(lfw_dataset_dir, 'ssd_net.pth'))
        test_net.load_state_dict(test_net_state)
        idx, (img, bbox, label) = next(enumerate(test_data_loader))
        pred_cof, pred_loc = test_net.forward(img)
        print(pred_loc.shape)
        import torch.nn.functional as F
        pred_loc = pred_loc.detach()
        bbox_center = loc2bbox(pred_loc[0], pp)
        pred_cof = F.softmax(pred_cof[0])
        ind = np.where(pred_cof > 0.7)
        # pred_cof = F.softmax(pred_cof[ind[0]])
        bbox_center = bbox_center[ind[0]]
        print(ind, pred_cof)
        img = img[0].cpu().numpy()
        img = img.reshape((300, 300, 3))
        img = (img * 128 + np.asarray([[127, 127, 127]])) / 255
        fig, ax = plt.subplots(1)
        imageB_array = resize(img, (600, 1200), anti_aliasing=True)
        ax.imshow(imageB_array, cmap='brg')

        bbox_corner = center2corner(bbox_center)

        for i in range(0, bbox_corner.shape[0]):
            # print('i point', bbox_corner[i, 0]*600, bbox_corner[i, 1]*300,(bbox_corner[i, 2]-bbox_corner[i, 0])*600, (bbox_corner[i, 3]-bbox_corner[i, 1])*300)
            rect = patches.Rectangle(
                (bbox_corner[i, 0] * 1200, bbox_corner[i, 1] * 600),
                (bbox_corner[i, 2] - bbox_corner[i, 0]) * 1200,
                (bbox_corner[i, 3] - bbox_corner[i, 1]) * 600,
                linewidth=2,
                edgecolor='r',
                facecolor='none')  # Create a Rectangle patch
            ax.add_patch(rect)  # Add the patch to the Axes
        plt.show()
Exemple #3
0
    def test_dataLoad(self):
        torch.set_default_tensor_type('torch.cuda.FloatTensor')
        torch.set_printoptions(precision=10)
        prior_layer_cfg = [{
            'layer_name': 'Conv5',
            'feature_dim_hw': (19, 19),
            'bbox_size': (60, 60),
            'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, '1t')
        }, {
            'layer_name': 'Conv11',
            'feature_dim_hw': (10, 10),
            'bbox_size': (105, 105),
            'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, '1t')
        }, {
            'layer_name': 'Conv14_2',
            'feature_dim_hw': (5, 5),
            'bbox_size': (150, 150),
            'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, '1t')
        }, {
            'layer_name': 'Conv15_2',
            'feature_dim_hw': (3, 3),
            'bbox_size': (195, 195),
            'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, '1t')
        }, {
            'layer_name': 'Conv16_2',
            'feature_dim_hw': (2, 2),
            'bbox_size': (240, 240),
            'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, '1t')
        }, {
            'layer_name': 'Conv17_2',
            'feature_dim_hw': (1, 1),
            'bbox_size': (285, 285),
            'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, '1t')
        }]
        pp = generate_prior_bboxes(prior_layer_cfg)

        #test_list = load_data('../Debugimage', '../Debuglabel')
        test_list = load_data('../cityscapes_samples',
                              '../cityscapes_samples_labels')
        print(test_list)
        gt_bbox = np.asarray(test_list[0]['label'][1]) * [
            300 / 2048, 300 / 1024, 300 / 2048, 300 / 1024
        ]
        print('ground truth from file:', test_list[0]['label'][0])
        test_dataset = CityScapeDataset(test_list)
        test_data_loader = torch.utils.data.DataLoader(test_dataset,
                                                       batch_size=1,
                                                       shuffle=True,
                                                       num_workers=0)
        idx, (img, bbox, label) = next(enumerate(test_data_loader))
        bbox = bbox[0]
        label = label[0]
        print(bbox.shape, label.shape)

        print('matched label', label[np.where(label > 0)], np.where(label > 0),
              label.shape)
        print('first bbox from data_set:', bbox[0], label[0])
        bbox_center = loc2bbox(bbox, pp)
        bbox_corner = center2corner(bbox_center)
        img = img[0].cpu().numpy()
        img = img.reshape((300, 300, 3))
        img = (img * 128 + np.asarray([[127, 127, 127]])) / 255
        # for i in range(0, bbox.shape[0]):
        #     cv2.rectangle(img, (bbox[i,0], bbox[i,1]), (bbox[i,2], bbox[i,3]), (0, 255, 0), 3)
        #cv2.imshow("img", img)
        # Create figure and axes
        fig, ax = plt.subplots(1)
        imageB_array = resize(img, (300, 300), anti_aliasing=True)
        ax.imshow(imageB_array, cmap='brg')
        bbox_corner = bbox_corner.cpu().numpy()
        bbox_corner = bbox_corner[np.where(label > 0)]
        temp_lab = label[np.where(label > 0)]
        print('matched bbox ======', bbox_corner)
        pp = center2corner(pp)
        pp = pp[np.where(label > 0)]
        print('864 tensor: ', pp)
        for i in range(0, bbox_corner.shape[0]):
            if temp_lab[i] == 1:
                # print('i point', bbox_corner[i, 0]*600, bbox_corner[i, 1]*300,(bbox_corner[i, 2]-bbox_corner[i, 0])*600, (bbox_corner[i, 3]-bbox_corner[i, 1])*300)
                rect = patches.Rectangle(
                    (bbox_corner[i, 0] * 300, bbox_corner[i, 1] * 300),
                    (bbox_corner[i, 2] - bbox_corner[i, 0]) * 300,
                    (bbox_corner[i, 3] - bbox_corner[i, 1]) * 300,
                    linewidth=2,
                    edgecolor='r',
                    facecolor='none')  # Create a Rectangle patch
                ax.add_patch(rect)  # Add the patch to the Axes
            else:
                rect = patches.Rectangle(
                    (bbox_corner[i, 0] * 1200, bbox_corner[i, 1] * 600),
                    (bbox_corner[i, 2] - bbox_corner[i, 0]) * 1200,
                    (bbox_corner[i, 3] - bbox_corner[i, 1]) * 600,
                    linewidth=2,
                    edgecolor='y',
                    facecolor='none')  # Create a Rectangle patch
                ax.add_patch(rect)  # Add the patch to the Axes
        for i in range(0, pp.shape[0]):
            rect = patches.Rectangle(
                (pp[i, 0] * 300, pp[i, 1] * 300), (pp[i, 2] - pp[i, 0]) * 300,
                (pp[i, 3] - pp[i, 1]) * 300,
                linewidth=1,
                edgecolor='blue',
                facecolor='none')  # Create a Rectangle patch
            ax.add_patch(rect)  # Add the patch to the Axes

        # for i in range(0, gt_bbox.shape[0]):
        #     rect = patches.Rectangle((gt_bbox[i][0], gt_bbox[i][1]),
        #                              (gt_bbox[i][2] - gt_bbox[i][0]),
        #                              (gt_bbox[i][3] - gt_bbox[i][1]), linewidth=1, edgecolor='g',
        #                              facecolor='none')  # Create a Rectangle patch
        #     ax.add_patch(rect)  # Add the patch to the Axes

        plt.show()
    test_net = SSD(len(class_labels))

    test_net_state = torch.load(os.path.join(results_path))
    test_net.load_state_dict(test_net_state)

    if torch.cuda.is_available():
        test_net.cuda()

    test_net.eval()

    # 3. Run Forward -------------------------------------------------------------------
    with torch.no_grad():
        pred_scores_tensor, pred_bbox_tensor = test_net.forward(
            img_tensor.unsqueeze(0))  # N C H W

    prior = CityScapeDataset([])
    prior_bbox = prior.get_prior_bbox()

    pred_scores_tensor = F.softmax(
        pred_scores_tensor,
        dim=2)  # eval mode softmax was disabled in ssd_test
    pred_bbox_tensor = loc2bbox(
        pred_bbox_tensor,
        CityScapeDataset([]).get_prior_bbox().unsqueeze(0))
    pred_picked = nms_bbox(pred_bbox_tensor[0], pred_scores_tensor[0])

    # 4. plot result
    test_image = img_tensor.cpu().numpy().astype(
        np.float32).transpose().copy()  # H, W, C
    test_image = ((test_image + 1) / 2)
Exemple #5
0
    batch_size = 32
    num_workers = 0

# Read all json file into data_list and randomly shuffle data
data_list = readJson(common.getJsonList()[0:10])
total_items = len(data_list)

# Divide data into train, validate and test lists
n_train_sets = 0.6 * total_items
train_set_list = data_list[:int(n_train_sets)]
n_valid_sets = 0.3 * total_items
valid_set_list = data_list[int(n_train_sets):int(n_train_sets + n_valid_sets)]
test_set_list = data_list[int(n_train_sets + n_valid_sets):]

# Load train and validate data into dataloader
train_dataset = CityScapeDataset(train_set_list)
train_data_loader = DataLoader(train_dataset,
                               batch_size=batch_size,
                               shuffle=True,
                               num_workers=num_workers)
print('Total training items', len(train_dataset),
      ', Total training mini-batches in one epoch:', len(train_data_loader))
valid_set = CityScapeDataset(valid_set_list)
valid_data_loader = DataLoader(valid_set,
                               batch_size=int(batch_size / 2),
                               shuffle=True,
                               num_workers=num_workers)
print('Total validation set:', len(valid_set))
test_set = CityScapeDataset(test_set_list)
test_data_loader = DataLoader(test_set,
                              batch_size=1,
Exemple #6
0
        sample_path = '/home/datasets/full_dataset/train_extra/'
        label_path = '/home/datasets/full_dataset_labels/train_extra'
    else:
        sample_path = '../cityscapes_samples/'
        label_path = '../cityscapes_samples_labels/'

    dataset_list = load_dataset_list(sample_path, label_path,
                                     config['label_groups'])

    # Define dataset/dataloader -------------------------------------------
    num_train = int(0.6 * len(dataset_list))
    num_valid = int(0.2 * len(dataset_list))
    if config['is_train']:
        num_train = int(0.6 * len(dataset_list))
        train_dataset = CityScapeDataset(dataset_list[:num_train],
                                         n_augmented=config['n_aug'],
                                         debug=config['debug'])
        train_loader = DataLoader(train_dataset,
                                  batch_size=config['n_batch'],
                                  shuffle=True,
                                  num_workers=config['n_worker'])
        print('Total training items: ', len(train_dataset))
        print('Total training batches size in one epoch: ', len(train_loader))

        valid_dataset = CityScapeDataset(dataset_list[num_train:(num_train +
                                                                 num_valid)],
                                         debug=config['debug'])
        valid_loader = DataLoader(valid_dataset,
                                  batch_size=config['n_batch'],
                                  shuffle=True,
                                  num_workers=config['n_worker'])