Exemple #1
0
    def test_nn(self):
        torch.set_default_tensor_type('torch.cuda.FloatTensor')

        # model = mobilenet.MobileNet()
        # module_util.summary_layers(model,(3,300,300))
        model = ssd_net.SSD(2)
        module_util.summary_layers(model, (3, 600, 300))
        self.assertEqual('foo'.upper(), 'FOO')
Exemple #2
0
import matplotlib.pyplot as plt
import torch.optim
from torch.autograd import Variable
import matplotlib.patches as patches
import sys

# torch.set_default_tensor_type('torch.cuda.FloatTensor')
current_directory = os.getcwd()  # current working directory

if __name__ == '__main__':

    img_file_path = sys.argv[1]

    test_img = Image.open(img_file_path)

    net = ssd_net.SSD(num_classes=6)

    model_path = os.path.join(current_directory, 'SSDnet_crop1.pth')

    net_state = torch.load(model_path)

    net.load_state_dict(net_state)

    net.cpu()

    net.eval()

    prior_layer_cfg = [{
        'layer_name': 'Conv11',
        'feature_dim_hw': (19, 19),
        'bbox_size': (60, 60),
Exemple #3
0
    def test_random2(self):
        torch.set_default_tensor_type('torch.cuda.FloatTensor')
        torch.set_printoptions(precision=10)
        prior_layer_cfg = [{
            'layer_name': 'Conv5',
            'feature_dim_hw': (19, 19),
            'bbox_size': (60, 60),
            'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, '1t')
        }, {
            'layer_name': 'Conv11',
            'feature_dim_hw': (10, 10),
            'bbox_size': (105, 105),
            'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, '1t')
        }, {
            'layer_name': 'Conv14_2',
            'feature_dim_hw': (5, 5),
            'bbox_size': (150, 150),
            'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, '1t')
        }, {
            'layer_name': 'Conv15_2',
            'feature_dim_hw': (3, 3),
            'bbox_size': (195, 195),
            'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, '1t')
        }, {
            'layer_name': 'Conv16_2',
            'feature_dim_hw': (2, 2),
            'bbox_size': (240, 240),
            'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, '1t')
        }, {
            'layer_name': 'Conv17_2',
            'feature_dim_hw': (1, 1),
            'bbox_size': (285, 285),
            'aspect_ratio': (1.0, 1 / 2, 1 / 3, 2.0, 3.0, '1t')
        }]
        pp = generate_prior_bboxes(prior_layer_cfg)

        # test_list = load_data('../Debugimage', '../Debuglabel')
        test_list = load_data('../cityscapes_samples',
                              '../cityscapes_samples_labels')
        #print(test_list)

        test_dataset = CityScapeDataset(test_list)
        test_data_loader = torch.utils.data.DataLoader(test_dataset,
                                                       batch_size=1,
                                                       shuffle=True,
                                                       num_workers=0)
        lfw_dataset_dir = '../'
        test_net = ssd_net.SSD(3)
        test_net_state = torch.load(
            os.path.join(lfw_dataset_dir, 'ssd_net.pth'))
        test_net.load_state_dict(test_net_state)
        idx, (img, bbox, label) = next(enumerate(test_data_loader))
        pred_cof, pred_loc = test_net.forward(img)
        print(pred_loc.shape)
        import torch.nn.functional as F
        pred_loc = pred_loc.detach()
        bbox_center = loc2bbox(pred_loc[0], pp)
        pred_cof = F.softmax(pred_cof[0])
        ind = np.where(pred_cof > 0.7)
        # pred_cof = F.softmax(pred_cof[ind[0]])
        bbox_center = bbox_center[ind[0]]
        print(ind, pred_cof)
        img = img[0].cpu().numpy()
        img = img.reshape((300, 300, 3))
        img = (img * 128 + np.asarray([[127, 127, 127]])) / 255
        fig, ax = plt.subplots(1)
        imageB_array = resize(img, (600, 1200), anti_aliasing=True)
        ax.imshow(imageB_array, cmap='brg')

        bbox_corner = center2corner(bbox_center)

        for i in range(0, bbox_corner.shape[0]):
            # print('i point', bbox_corner[i, 0]*600, bbox_corner[i, 1]*300,(bbox_corner[i, 2]-bbox_corner[i, 0])*600, (bbox_corner[i, 3]-bbox_corner[i, 1])*300)
            rect = patches.Rectangle(
                (bbox_corner[i, 0] * 1200, bbox_corner[i, 1] * 600),
                (bbox_corner[i, 2] - bbox_corner[i, 0]) * 1200,
                (bbox_corner[i, 3] - bbox_corner[i, 1]) * 600,
                linewidth=2,
                edgecolor='r',
                facecolor='none')  # Create a Rectangle patch
            ax.add_patch(rect)  # Add the patch to the Axes
        plt.show()
Exemple #4
0
labelmap = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car',
            'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike',
            'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']

parser = argparse.ArgumentParser(
    description='Learning convnet from ILSVRC2012 dataset')
parser.add_argument('path', help='Path to training image-label list file')
args = parser.parse_args()
mean = np.array([104,117,123])
image = skimage.img_as_float(skimage.io.imread(args.path, as_grey=False)).astype(np.float32)

img = resize(image, (300,300))
img = img*255 - mean[::-1]
img = img.transpose(2, 0, 1)[::-1]

model = ssd_net.SSD()
serializers.load_npz("ssd.model", model)
x = chainer.Variable(np.array([img],dtype=np.float32))
model(x,1)
a=model.detection()
plt.imshow(image)
currentAxis = plt.gca()
colors = plt.cm.hsv(np.linspace(0, 1, 21)).tolist()
for i in a:
    label, conf, x1, y1, x2, y2 = i
    label = int(label) -1
    x1 = int(round(x1 * image.shape[1]))
    x2 = int(round(x2 * image.shape[1]))
    y1 = int(round(y1 * image.shape[0]))
    y2 = int(round(y2 * image.shape[0]))
    label_name = labelmap[int(label)]
Exemple #5
0
import json
import numpy as np
import ssd_net
# with open('/home/vbatvia/VC/Lab/Object detection/cityscapes_samples_labels/konigswinter/konigswinter_000000_000000_gtCoarse_polygons.json','r') as f:
# 	frame_info = json.load(f)
# print(frame_info['objects'][0]['polygon'])
#
# polygons = np.asarray(frame_info['objects'][0]['polygon'],dtype = np.float32)
# left_top = np.min(polygons , axis = 0)
# right_bottom = np.max(polygons , axis = 0)
# print(left_top)
# print(right_bottom)

net = ssd_net.SSD(1)
Exemple #6
0
class Training():
    # Set default tenosr type, 'torch.cuda.FloatTensor' is the GPU based FloatTensor
    torch.set_default_tensor_type('torch.cuda.FloatTensor')

    current_directory = os.getcwd()  # current working directory
    training_ratio = 0.8

    if __name__ == '__main__':
        poly_init_fol_name = "cityscapes_samples_labels"
        init_img_path = "cityscapes_samples"

        compl_poly_path = os.path.join(current_directory, poly_init_fol_name,
                                       "*", "*_polygons.json")

        polygon_folder = glob(compl_poly_path)

        polygon_folder = np.array(polygon_folder)

        img_label_list = []

        for file in polygon_folder:
            with open(file, "r") as f:
                frame_info = json.load(f)
                obj_length = len(frame_info['objects'])
                file_path = file
                image_name = file_path.split("/")[-1][:-23]
                for i in range(obj_length):
                    label = frame_info['objects'][i]['label']
                    if label == "ego vehicle":
                        break
                    polygon = np.array(frame_info['objects'][i]['polygon'],
                                       dtype=np.float32)
                    left_top = np.min(polygon, axis=0)
                    right_bottom = np.max(polygon, axis=0)
                    concat = np.concatenate((left_top, right_bottom))
                    img_label_list.append({
                        'image_name': image_name,
                        'file_path': file_path,
                        'label': label,
                        'bbox': concat
                    })

        label_length = len(img_label_list)

        # get images list

        img_path = os.path.join(init_img_path, "*", "*")

        images = glob(img_path)

        images = np.array(images)

        train_datalist = []
        for i in range(0, len(images)):
            img_folder = images[i].split('/')[-2]
            img_name = images[i].split('/')[-1]
            img_class = img_name[:-16]
            image_path = os.path.join(init_img_path, img_folder, img_name)
            #print(image_path)
            bound_boxes = []
            labels = []
            for i in range(label_length):
                if img_label_list[i]["image_name"] == img_class:
                    bbox = img_label_list[i]['bbox']
                    bound_boxes.append(bbox)
                    if img_label_list[i]['label'] in ('car', 'cargroup'):
                        label = 1
                    elif img_label_list[i]['label'] in ('person',
                                                        'persongroup'):
                        label = 2
                    elif img_label_list[i]['label'] == 'traffic sign':
                        label = 3
                    else:
                        label = 0
                    labels.append(label)
            train_datalist.append({
                'image_path': image_path.rstrip(),
                'labels': labels,
                'bound_boxes': bound_boxes
            })

        random.shuffle(train_datalist)
        #print(train_datalist)
        n_train_sets = 0.8 * len(train_datalist)

        train_sets = train_datalist[:int(n_train_sets)]

        im = np.array(Image.open(train_sets[1]['image_path']), dtype=np.uint8)

        # Create figure and axes
        fig, ax = plt.subplots(1)

        # Display the image
        ax.imshow(im)

        # Create a Rectangle patch
        for i in range(0, len(train_sets[1]['labels'])):
            if train_sets[1]['labels'][i] != 0:
                rect = patches.Rectangle((train_sets[1]['bound_boxes'][i][0],
                                          train_sets[1]['bound_boxes'][i][1]),
                                         train_sets[1]['bound_boxes'][i][2],
                                         train_sets[1]['bound_boxes'][i][3],
                                         linewidth=1,
                                         edgecolor='r',
                                         facecolor='none')

        # Add the patch to the Axes
        ax.add_patch(rect)

        plt.show()

        train_dataset = cityscape_dataset.CityScapeDataset(train_sets)
        train_data_loader: object = torch.utils.data.DataLoader(train_dataset,
                                                                batch_size=20,
                                                                shuffle=True,
                                                                num_workers=0)
        print('Total training items', len(train_dataset),
              ', Total training mini-batches in one epoch:',
              len(train_data_loader))

        n_valid_sets = 0.2 * len(train_datalist)
        valid_sets = train_datalist[int(n_train_sets):int(n_train_sets +
                                                          n_valid_sets)]

        valid_set = cityscape_dataset.CityScapeDataset(valid_sets)
        valid_data_loader = torch.utils.data.DataLoader(valid_set,
                                                        batch_size=20,
                                                        shuffle=True,
                                                        num_workers=0)
        print('Total validation set:', len(valid_set),
              ', Total training mini-batches in one epoch:',
              len(valid_data_loader))

        ssd = ssd_net.SSD().cuda()
        #print(ssd)

        criterion = bbox_loss.MultiboxLoss((0.1, 0.2))

        optimizer = torch.optim.Adam(ssd.classifier.parameters(), lr=0.01)

        train_losses = []

        max_epochs = 1
        itr = 0
        #print(train_data_loader)
        for epoch_idx in range(0, max_epochs):
            for img_tensor, train_input, train_label in train_data_loader:

                itr += 1

                # Set the network works in GPU
                device = torch.device(
                    "cuda:0" if torch.cuda.is_available() else "cpu")
                ssd = ssd.to(device)
                # print(device)
                for img, features, targets in train_data_loader:
                    features = features.to(device)
                    targets = targets.to(device)

                # switch to train model
                ssd.train()

                # zero the parameter gradients
                optimizer.zero_grad()

                # Forward

                train_input = Variable(img_tensor.cuda(
                ))  # use Variable(*) to allow gradient flow
                #print(train_input.dim())
                confidence, train_out = ssd.forward(
                    train_input)  # forward once
                #print(train_out)
                # compute loss
                train_label = Variable(train_label.cuda().float())
                loss = criterion.forward(confidence, train_out, train_label,
                                         train_input)

                # do the backward and compute gradients
                loss.backward()
                #
                # update the parameters with SGD
                optimizer.step()

                train_losses.append((itr, loss.item()))

                if itr % 200 == 0:
                    print('Epoch: %d Itr: %d Loss: %f' %
                          (epoch_idx, itr, loss.item()))

        train_losses = np.asarray(train_losses)