Beispiel #1
0
def single():
    print('Mode: Single')
    img = Image.open('test_content/example_01.png').convert('RGB')

    class_encoding = color_encoding = OrderedDict([
        ('unlabeled', (0, 0, 0)), ('road', (128, 64, 128)),
        ('sidewalk', (244, 35, 232)), ('building', (70, 70, 70)),
        ('wall', (102, 102, 156)), ('fence', (190, 153, 153)),
        ('pole', (153, 153, 153)), ('traffic_light', (250, 170, 30)),
        ('traffic_sign', (220, 220, 0)), ('vegetation', (107, 142, 35)),
        ('terrain', (152, 251, 152)), ('sky', (70, 130, 180)),
        ('person', (220, 20, 60)), ('rider', (255, 0, 0)),
        ('car', (0, 0, 142)), ('truck', (0, 0, 70)), ('bus', (0, 60, 100)),
        ('train', (0, 80, 100)), ('motorcycle', (0, 0, 230)),
        ('bicycle', (119, 11, 32))
    ])

    num_classes = len(class_encoding)
    model = ERFNet(num_classes)
    model_path = os.path.join(args.save_dir, args.name)
    print('Loading model at:', model_path)
    checkpoint = torch.load(model_path)
    # model = ENet(num_classes)
    model = model.cuda()
    model.load_state_dict(checkpoint['state_dict'])
    img = img.resize((args.width, args.height), Image.BILINEAR)
    start = time.time()
    images = transforms.ToTensor()(img)
    torch.reshape(images, (1, 3, args.width, args.height))
    images = images.unsqueeze(0)
    with torch.no_grad():
        images = images.cuda()
        predictions = model(images)
        end = time.time()
        print('model speed:', int(1 / (end - start)), "FPS")
        _, predictions = torch.max(predictions.data, 1)
        label_to_rgb = transforms.Compose(
            [utils.LongTensorToRGBPIL(class_encoding),
             transforms.ToTensor()])
        color_predictions = utils.batch_transform(predictions.cpu(),
                                                  label_to_rgb)
        end = time.time()
        print('model+transform:', int(1 / (end - start)), "FPS")
        utils.imshow_batch(images.data.cpu(), color_predictions)
Beispiel #2
0
def lane_detect(im_tensor):
    # Image size
    _, HEIGHT, WIDTH = im_tensor.shape
    im_tensor = im_tensor.unsqueeze(0)

    # Creating CNNs and loading pretrained models
    segmentation_network = ERFNet(NUM_CLASSES_SEGMENTATION)
    classification_network = LCNet(NUM_CLASSES_CLASSIFICATION, DESCRIPTOR_SIZE,
                                   DESCRIPTOR_SIZE)

    segmentation_network.load_state_dict(
        torch.load(path + 'pretrained/erfnet_tusimple.pth',
                   map_location=map_location))
    model_path = path + 'pretrained/classification_{}_{}class.pth'.format(
        DESCRIPTOR_SIZE, NUM_CLASSES_CLASSIFICATION)
    classification_network.load_state_dict(
        torch.load(model_path, map_location=map_location))

    segmentation_network = segmentation_network.eval()
    classification_network = classification_network.eval()

    if torch.cuda.is_available():
        segmentation_network = segmentation_network.cuda()
        classification_network = classification_network.cuda()
        im_tensor = im_tensor.cuda()

    out_segmentation = segmentation_network(im_tensor)
    out_segmentation = out_segmentation.max(dim=1)[1]

    out_segmentation_np = out_segmentation.cpu().numpy()[0]
    descriptors, index_map = extract_descriptors(out_segmentation, im_tensor)
    classes = classification_network(descriptors).max(1)[1]

    lane_map = torch.zeros(HEIGHT, WIDTH, dtype=torch.int64)
    if torch.cuda.is_available():
        lane_map = lane_map.cuda()
    for i, lane_index in index_map.items():
        lane_map[out_segmentation_np == lane_index] = classes[i] + 1

    return lane_map
Beispiel #3
0
    def __init__(self, options, model=None):

        if __name__ == "__main__":
            print(" -> Executing script", os.path.basename(__file__))

        self.opt = options
        self.device = torch.device("cpu" if self.opt.no_cuda else "cuda")

        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #                           LABELS
        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        assert self.opt.train_set in {1, 2, 3, 12, 123}, "Invalid train_set!"
        assert self.opt.task_to_val in {0, 1, 2, 3, 12, 123}, "Invalid task!"
        keys_to_load = ['color', 'segmentation']

        # Labels
        labels = self._get_labels_cityscapes()

        # Train IDs
        self.train_ids = set([labels[i].trainId for i in range(len(labels))])
        self.train_ids.remove(255)
        self.train_ids = sorted(list(self.train_ids))

        self.num_classes_model = len(self.train_ids)

        # Task handling
        if self.opt.task_to_val != 0:
            labels_task = self._get_task_labels_cityscapes()
            train_ids_task = set(
                [labels_task[i].trainId for i in range(len(labels_task))])
            train_ids_task.remove(255)
            self.task_low = min(train_ids_task)
            self.task_high = max(train_ids_task) + 1
            labels = labels_task
            self.train_ids = sorted(list(train_ids_task))
        else:
            self.task_low = 0
            self.task_high = self.num_classes_model
            self.opt.task_to_val = self.opt.train_set

        # Number of classes for the SegmentationRunningScore
        self.num_classes_score = self.task_high - self.task_low

        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #                           DATASET DEFINITIONS
        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        # Data augmentation
        test_data_transforms = [
            mytransforms.CreateScaledImage(),
            mytransforms.Resize((self.opt.height, self.opt.width),
                                image_types=['color']),
            mytransforms.ConvertSegmentation(),
            mytransforms.CreateColoraug(new_element=True,
                                        scales=self.opt.scales),
            mytransforms.RemoveOriginals(),
            mytransforms.ToTensor(),
            mytransforms.NormalizeZeroMean(),
        ]

        # If hyperparameter search, only load the respective validation set. Else, load the full validation set.
        if self.opt.hyperparameter:
            trainvaltest_split = 'train'
            folders_to_load = CitySet.get_city_set(-1)
        else:
            trainvaltest_split = 'validation'
            folders_to_load = None

        test_dataset = CityscapesDataset(dataset='cityscapes',
                                         split=self.opt.dataset_split,
                                         trainvaltest_split=trainvaltest_split,
                                         video_mode='mono',
                                         stereo_mode='mono',
                                         scales=self.opt.scales,
                                         labels_mode='fromid',
                                         labels=labels,
                                         keys_to_load=keys_to_load,
                                         data_transforms=test_data_transforms,
                                         video_frames=self.opt.video_frames,
                                         folders_to_load=folders_to_load)

        self.test_loader = DataLoader(dataset=test_dataset,
                                      batch_size=self.opt.batch_size,
                                      shuffle=False,
                                      num_workers=self.opt.num_workers,
                                      pin_memory=True,
                                      drop_last=False)

        print(
            "++++++++++++++++++++++ INIT VALIDATION ++++++++++++++++++++++++")
        print("Using dataset\n  ", self.opt.dataset, "with split",
              self.opt.dataset_split)
        print("There are {:d} validation items\n  ".format(len(test_dataset)))
        print("Validating classes up to train set\n  ", self.opt.train_set)

        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #                           LOGGING OPTIONS
        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        # If no model is passed, standalone validation is to be carried out. The log_path needs to be set before
        # self.load_model() is invoked.
        if model is None:
            self.opt.validate = False
            self.opt.model_name = self.opt.load_model_name

        path_getter = GetPath()
        log_path = path_getter.get_checkpoint_path()
        self.log_path = os.path.join(log_path, 'erfnet', self.opt.model_name)

        # All outputs will be saved to save_path
        self.save_path = self.log_path

        # Create output path for standalone validation
        if not self.opt.validate:
            save_dir = 'eval_{}'.format(self.opt.dataset)

            if self.opt.hyperparameter:
                save_dir = save_dir + '_hyper'

            save_dir = save_dir + '_task_to_val{}'.format(self.opt.task_to_val)

            self.save_path = os.path.join(self.log_path, save_dir)

            if not os.path.exists(self.save_path):
                os.makedirs(self.save_path)

        # Copy this file to save_path
        shutil.copy2(__file__, self.save_path)

        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #                           MODEL DEFINITION
        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        # Standalone validation
        if not self.opt.validate:
            # Create a conventional ERFNet
            self.model = ERFNet(self.num_classes_model, self.opt)
            self.load_model()
            self.model.to(self.device)

        # Validate while training
        else:
            self.model = model

        self.model.eval()

        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #                           LOGGING OPTIONS II
        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        # self.called is used to decide which file mode shall be used when writing metrics to disk.
        self.called = False

        self.metric_model = SegmentationRunningScore(self.num_classes_score)

        # Metrics are only saved if val_frequency > 0!
        if self.opt.val_frequency != 0:
            print("Saving metrics to\n  ", self.save_path)

        # Set up colour output. Coloured images are only output if standalone validation is carried out!
        if not self.opt.validate and self.opt.save_pred_to_disk:
            # Output path
            self.img_path = os.path.join(
                self.save_path, 'output_{}'.format(self.opt.weights_epoch))

            if self.opt.pred_wout_blend:
                self.img_path += '_wout_blend'

            if not os.path.exists(self.img_path):
                os.makedirs(self.img_path)
            print("Saving prediction images to\n  ", self.img_path)
            print("Save frequency\n  ", self.opt.pred_frequency)

            # Get the colours from dataset.
            colors = [
                (label.trainId - self.task_low, label.color)
                for label in labels
                if label.trainId != 255 and label.trainId in self.train_ids
            ]
            colors.append((255, (0, 0, 0)))  # void class
            self.id_color = dict(colors)
            self.id_color_keys = [key for key in self.id_color.keys()]
            self.id_color_vals = [val for val in self.id_color.values()]

            # Ongoing index to name the outputs
            self.img_idx = 0

        # Set up probability output. Probabilities are only output if standalone validation is carried out!
        if not self.opt.validate and self.opt.save_probs_to_disk:
            # Output path
            self.logit_path = os.path.join(
                self.save_path,
                'probabilities_{}'.format(self.opt.weights_epoch))
            if not os.path.exists(self.logit_path):
                os.makedirs(self.logit_path)
            print("Saving probabilities to\n  ", self.logit_path)
            print("Save frequency\n  ", self.opt.probs_frequency)

            # Ongoing index to name the probability outputs
            self.probs_idx = 0

        print(
            "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")

        # Save all options to disk and print them to stdout
        self._print_options()
        self._save_opts(len(test_dataset))
Beispiel #4
0
    def __init__(self, options):
        self.opt = options
        self.device = torch.device("cpu" if self.opt.no_cuda else "cuda")

        # Assertions
        assert os.path.isfile(self.opt.image), "Invalid image!"
        self.opt.image.replace('/', os.sep)
        self.opt.image.replace('\\', os.sep)
        self.image_name = self.opt.image.split(os.sep)[-1]

        if self.opt.model_stage == 1:
            assert self.opt.task in {1}, "Invalid task!"
            assert not self.opt.with_weights, "Weights for stage 1 not available"
        elif self.opt.model_stage == 2:
            assert self.opt.task in {1, 2, 12}, "Invalid task!"
        elif self.opt.model_stage == 3:
            assert self.opt.task in {1, 2, 3, 12, 123}, "Invalid task!"

        # Model and task set-up
        self.num_classes_model = {1: 5, 2: 11, 3: 19}[self.opt.model_stage]
        self.task_low, self.task_high = {1: (0, 5), 2: (5, 11), 3: (11, 19), 12: (0, 11), 123: (0, 19)}[self.opt.task]

        # Create a conventional ERFNet
        self.model = ERFNet(self.num_classes_model, self.opt)
        self._load_model()
        self.model.to(self.device)
        self.model.eval()

        # Ground truth
        self.metrics = False
        if self.opt.ground_truth:
            assert os.path.isfile(self.opt.ground_truth), "Invalid ground truth!"
            self.metrics = True
            self.num_classes_score = self.task_high - self.task_low
            self.metric_model = SegmentationRunningScore(self.num_classes_score)

        # Output directory
        if self.opt.output_path:
            if not os.path.isdir(self.opt.output_path):
                os.makedirs(self.opt.output_path)
        else:
            self.opt.output_path = os.path.join(self.opt.image.split(os.sep)[:-1])
        image_extension_idx = self.image_name.rfind('.')
        segmentation_name = self.image_name[:image_extension_idx] + \
                            "_seg_stage_{}_task_{}".format(self.opt.model_stage, self.opt.task) + \
                            self.image_name[image_extension_idx:]
        self.output_image = os.path.join(self.opt.output_path, segmentation_name)
        ground_truth_name = self.image_name[:image_extension_idx] + \
                            "_gt_stage_{}_task_{}".format(self.opt.model_stage, self.opt.task) + \
                            self.image_name[image_extension_idx:]
        self.output_gt = os.path.join(self.opt.output_path, ground_truth_name)

        # stdout output
        print("++++++++++++++++++++++ INIT DEMO ++++++++++++++++++++++++")
        print("Image:\t {}".format(self.opt.image))
        print("GT:\t {}".format(self.opt.ground_truth))
        print("Output:\t {}".format(self.opt.output_path))
        print("Stage:\t {}".format(self.opt.model_stage))
        print("Weights: {}".format(self.opt.with_weights))
        print("Task:\t {}".format(self.opt.task))
        print("!!! MIND THAT THE MODELS WERE TRAINED USING AN IMAGE RESOLUTION OF 1024x512px !!!")

        # Class colours
        labels = labels_cityscape_seg_train3_eval.getlabels()
        colors = [(label.trainId - self.task_low, label.color) for label in labels if
                      label.trainId != 255 and label.trainId in range(0, 19)]
        colors.append((255, (0, 0, 0)))  # void class
        self.id_color = dict(colors)
        self.id_color_keys = [key for key in self.id_color.keys()]
        self.id_color_vals = [val for val in self.id_color.values()]
    def __init__(self, options):

        print(" -> Executing script", os.path.basename(__file__))

        self.opt = options
        self.device = torch.device("cpu" if self.opt.no_cuda else "cuda")

        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #                           LABELS AND CITIES
        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        assert self.opt.train_set in {123, 1}, "Invalid train_set!"
        keys_to_load = ['color', 'segmentation']

        # Labels
        if self.opt.train_set == 1:
            labels = labels_cityscape_seg_train1.getlabels()
        else:
            labels = labels_cityscape_seg_train3_eval.getlabels()

        # Train IDs
        self.train_ids = set([labels[i].trainId for i in range(len(labels))])
        self.train_ids.remove(255)

        self.num_classes = len(self.train_ids)

        # Apply city filter
        folders_to_train = CitySet.get_city_set(0)
        if self.opt.city:
            folders_to_train = CitySet.get_city_set(self.opt.train_set)

        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #                           DATASET DEFINITIONS
        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        # Data augmentation
        train_data_transforms = [
            mytransforms.RandomHorizontalFlip(),
            mytransforms.CreateScaledImage(),
            mytransforms.Resize((self.opt.height, self.opt.width),
                                image_types=keys_to_load),
            mytransforms.RandomRescale(1.5),
            mytransforms.RandomCrop(
                (self.opt.crop_height, self.opt.crop_width)),
            mytransforms.ConvertSegmentation(),
            mytransforms.CreateColoraug(new_element=True,
                                        scales=self.opt.scales),
            mytransforms.ColorJitter(brightness=0.2,
                                     contrast=0.2,
                                     saturation=0.2,
                                     hue=0.1,
                                     gamma=0.0),
            mytransforms.RemoveOriginals(),
            mytransforms.ToTensor(),
            mytransforms.NormalizeZeroMean(),
        ]

        train_dataset = CityscapesDataset(
            dataset="cityscapes",
            trainvaltest_split='train',
            video_mode='mono',
            stereo_mode='mono',
            scales=self.opt.scales,
            labels_mode='fromid',
            labels=labels,
            keys_to_load=keys_to_load,
            data_transforms=train_data_transforms,
            video_frames=self.opt.video_frames,
            folders_to_load=folders_to_train,
        )

        self.train_loader = DataLoader(dataset=train_dataset,
                                       batch_size=self.opt.batch_size,
                                       shuffle=True,
                                       num_workers=self.opt.num_workers,
                                       pin_memory=True,
                                       drop_last=True)

        val_data_transforms = [
            mytransforms.CreateScaledImage(),
            mytransforms.Resize((self.opt.height, self.opt.width),
                                image_types=keys_to_load),
            mytransforms.ConvertSegmentation(),
            mytransforms.CreateColoraug(new_element=True,
                                        scales=self.opt.scales),
            mytransforms.RemoveOriginals(),
            mytransforms.ToTensor(),
            mytransforms.NormalizeZeroMean(),
        ]

        val_dataset = CityscapesDataset(
            dataset=self.opt.dataset,
            trainvaltest_split="train",
            video_mode='mono',
            stereo_mode='mono',
            scales=self.opt.scales,
            labels_mode='fromid',
            labels=labels,
            keys_to_load=keys_to_load,
            data_transforms=val_data_transforms,
            video_frames=self.opt.video_frames,
            folders_to_load=CitySet.get_city_set(-1))

        self.val_loader = DataLoader(dataset=val_dataset,
                                     batch_size=self.opt.batch_size,
                                     shuffle=False,
                                     num_workers=self.opt.num_workers,
                                     pin_memory=True,
                                     drop_last=True)

        self.val_iter = iter(self.val_loader)

        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #                           LOGGING OPTIONS
        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        print(
            "++++++++++++++++++++++ INIT TRAINING ++++++++++++++++++++++++++")
        print("Using dataset:\n  ", self.opt.dataset, "with split",
              self.opt.dataset_split)
        print(
            "There are {:d} training items and {:d} validation items\n".format(
                len(train_dataset), len(val_dataset)))

        path_getter = GetPath()
        log_path = path_getter.get_checkpoint_path()
        self.log_path = os.path.join(log_path, 'erfnet', self.opt.model_name)

        self.writers = {}
        for mode in ["train", "validation"]:
            self.writers[mode] = SummaryWriter(
                os.path.join(self.log_path, mode))

        # Copy this file to log dir
        shutil.copy2(__file__, self.log_path)

        print("Training model named:\n  ", self.opt.model_name)
        print("Models and tensorboard events files are saved to:\n  ",
              self.log_path)
        print("Training is using:\n  ", self.device)
        print("Training takes place on train set:\n  ", self.opt.train_set)
        print(
            "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")

        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #                           MODEL DEFINITION
        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        # Instantiate model
        self.model = ERFNet(self.num_classes, self.opt)
        self.model.to(self.device)
        self.parameters_to_train = self.model.parameters()

        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #                           OPTIMIZER SET-UP
        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        self.model_optimizer = optim.Adam(params=self.parameters_to_train,
                                          lr=self.opt.learning_rate,
                                          weight_decay=self.opt.weight_decay)
        lambda1 = lambda epoch: pow((1 -
                                     ((epoch - 1) / self.opt.num_epochs)), 0.9)
        self.model_lr_scheduler = optim.lr_scheduler.LambdaLR(
            self.model_optimizer, lr_lambda=lambda1)

        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #                           LOSSES
        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        self.crossentropy = CrossEntropyLoss(ignore_background=True,
                                             device=self.device)
        self.crossentropy.to(self.device)

        self.metric_model = SegmentationRunningScore(self.num_classes)

        # Save all options to disk and print them to stdout
        self.save_opts(len(train_dataset), len(val_dataset))
        self._print_options()

        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        #                           EVALUATOR DEFINITION
        # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
        if self.opt.validate:
            self.evaluator = Evaluator(self.opt, self.model)
Beispiel #6
0
def train(train_loader, val_loader, circ_S):
    print("\nTraining...\n")

    model = ERFNet(1).to(device).double()
    #criterion = nn.MSELoss()
    criterion = ReconsLoss(circ_S)

    optimizer = optim.Adam(model.parameters(),
                           lr=args.learning_rate,
                           weight_decay=args.weight_decay)

    # Learning rate decay scheduler
    lr_updater = lr_scheduler.StepLR(optimizer, args.lr_decay_epochs,
                                     args.lr_decay)

    # Optionally resume from a checkpoint
    if args.resume:
        model, optimizer, start_epoch, best_loss, best_snr = utils.load_checkpoint(
            model, optimizer, args.save_dir, args.name)
        print("Resuming from model: Start epoch = {0} "
              "| Best mean loss = {1:.4f} Best mean snr = {1:.4f}".format(
                  start_epoch, best_snr))
    else:
        start_epoch = 0
        best_loss = 0
        best_snr = 0

    if args.visdom:
        vis = visdom.Visdom()

        loss_win = vis.line(X=np.column_stack(
            (np.array(start_epoch), np.array(start_epoch))),
                            Y=np.column_stack(
                                (np.array(best_loss), np.array(best_loss))),
                            opts=dict(legend=['train', 'test'],
                                      xlabel='epoch',
                                      ylabel='loss',
                                      title='Loss'))
        snr_win = vis.line(X=np.column_stack(
            (np.array(start_epoch), np.array(start_epoch))),
                           Y=np.column_stack((np.array(0.), np.array(0.))),
                           opts=dict(legend=['train', 'test'],
                                     xlabel='epoch',
                                     ylabel='snr',
                                     title='SNR'))

    # Start Training
    print()
    train = Train(model, train_loader, optimizer, criterion, device)
    val = Test(model, val_loader, criterion, device)
    for epoch in range(start_epoch, args.epochs):
        print(">>>> [Epoch: {0:d}] Training".format(epoch))

        epoch_loss, epoch_snr = train.run_epoch(lr_updater, args.print_step)
        lr_updater.step()
        print(
            ">>>> [Epoch: {0:d}] Avg. loss: {1:.4f} Avg. snr: {2:.4f} Lr: {3:f}"
            .format(epoch, epoch_loss, epoch_snr,
                    lr_updater.get_lr()[0]))

        if (epoch + 1) % 1 == 0 or epoch + 1 == args.epochs:
            print(">>>> [Epoch: {0:d}] Validation".format(epoch))

            loss, snr = val.run_epoch(args.print_step)

            print(">>>> [Epoch: {0:d}] Avg. loss: {1:.4f} Avg. snr: {2:.4f}".
                  format(epoch, loss, snr))

            # Save the model if it's the best thus far
            if snr > best_snr:
                print("\nBest model thus far. Saving...\n")
                best_snr = snr
                utils.save_checkpoint(model, optimizer, epoch + 1, best_loss,
                                      best_snr, args)
        if args.visdom:
            vis.line(X=np.column_stack((np.array(epoch), np.array(epoch))),
                     Y=np.column_stack((np.array(epoch_loss), np.array(loss))),
                     win=loss_win,
                     update='append')

            vis.line(X=np.column_stack((np.array(epoch), np.array(epoch))),
                     Y=np.column_stack((np.array(epoch_snr), np.array(snr))),
                     win=snr_win,
                     update='append')

    return model
Beispiel #7
0
    # Fail fast if the dataset directory doesn't exist
    assert os.path.isdir(
        args.dataset_dir), "The directory \"{0}\" doesn't exist.".format(
            args.dataset_dir)

    # Fail fast if the saving directory doesn't exist
    assert os.path.isdir(
        args.save_dir), "The directory \"{0}\" doesn't exist.".format(
            args.save_dir)

    train_loader, val_loader, test_loader = load_dataset(dataset)
    circ_S, _ = hadamard_s(args.matrix_size)

    if args.mode.lower() in {'train', 'full'}:
        model = train(train_loader, val_loader, circ_S)

    if args.mode.lower() in {'test', 'full'}:
        if args.mode.lower() == 'test':
            # Intialize a new ERFNet model
            model = ERFNet(1).to(device).double()

        # Initialize a optimizer just so we can retrieve the model from the
        # checkpoint
        optimizer = optim.Adam(model.parameters())

        # Load the previoulsy saved model state to the ERFNet model
        model = utils.load_checkpoint(model, optimizer, args.save_dir,
                                      args.name)[0]

        test(model, test_loader, circ_S)
Beispiel #8
0
# im = Image.open('images/test.jpg')
im = Image.open('/aimldl-dat/samples/lanenet/4.jpg')
print(im)
# ipynb visualization
# get_ipython().run_line_magic('matplotlib', 'inline')
imgplot = imshow(np.asarray(im))
show()

im = im.resize((WIDTH, HEIGHT))

im_tensor = ToTensor()(im)
im_tensor = im_tensor.unsqueeze(0)

# We also need to load the weights of the CNNs. We simply load it using pytorch methods.
# Creating CNNs and loading pretrained models
segmentation_network = ERFNet(NUM_CLASSES_SEGMENTATION)
classification_network = LCNet(NUM_CLASSES_CLASSIFICATION, DESCRIPTOR_SIZE,
                               DESCRIPTOR_SIZE)

segmentation_network.load_state_dict(
    torch.load('pretrained/erfnet_tusimple.pth', map_location=map_location))
model_path = 'pretrained/classification_{}_{}class.pth'.format(
    DESCRIPTOR_SIZE, NUM_CLASSES_CLASSIFICATION)
classification_network.load_state_dict(
    torch.load(model_path, map_location=map_location))

segmentation_network = segmentation_network.eval()
classification_network = classification_network.eval()

if torch.cuda.is_available():
    segmentation_network = segmentation_network.cuda()
Beispiel #9
0
def video():
    print('testing from video')
    cameraWidth = 1920
    cameraHeight = 1080
    cameraMatrix = np.matrix([[1.3878727764994030e+03, 0, cameraWidth / 2],
                              [0, 1.7987055172413220e+03, cameraHeight / 2],
                              [0, 0, 1]])

    distCoeffs = np.matrix([
        -5.8881725390917083e-01, 5.8472404395779809e-01,
        -2.8299599929891900e-01, 0
    ])

    vidcap = cv2.VideoCapture('test_content/massachusetts.mp4')
    success = True
    i = 0
    while success:
        success, img = vidcap.read()
        if i % 1000 == 0:
            print("frame: ", i)
            if args.rmdistort:
                P = cv2.fisheye.estimateNewCameraMatrixForUndistortRectify(
                    cameraMatrix, distCoeffs, (cameraWidth, cameraHeight),
                    None)
                map1, map2 = cv2.fisheye.initUndistortRectifyMap(
                    cameraMatrix, distCoeffs, np.eye(3), P, (1920, 1080),
                    cv2.CV_16SC2)
                img = cv2.remap(img, map1, map2, cv2.INTER_LINEAR)
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            img = Image.fromarray(img)
            # img = img.convert('RGB')
            # cv2.imshow('',img)
            # cv2.waitKey(0)
            # img2 = Image.open(filename).convert('RGB')
            class_encoding = color_encoding = OrderedDict([
                ('unlabeled', (0, 0, 0)), ('road', (128, 64, 128)),
                ('sidewalk', (244, 35, 232)), ('building', (70, 70, 70)),
                ('wall', (102, 102, 156)), ('fence', (190, 153, 153)),
                ('pole', (153, 153, 153)), ('traffic_light', (250, 170, 30)),
                ('traffic_sign', (220, 220, 0)),
                ('vegetation', (107, 142, 35)), ('terrain', (152, 251, 152)),
                ('sky', (70, 130, 180)), ('person', (220, 20, 60)),
                ('rider', (255, 0, 0)), ('car', (0, 0, 142)),
                ('truck', (0, 0, 70)), ('bus', (0, 60, 100)),
                ('train', (0, 80, 100)), ('motorcycle', (0, 0, 230)),
                ('bicycle', (119, 11, 32))
            ])

            num_classes = len(class_encoding)
            model_path = os.path.join(args.save_dir, args.name)
            checkpoint = torch.load(model_path)
            model = ERFNet(num_classes)
            model = model.cuda()
            model.load_state_dict(checkpoint['state_dict'])
            img = img.resize((args.width, args.height), Image.BILINEAR)
            start = time.time()
            images = transforms.ToTensor()(img)
            torch.reshape(images, (1, 3, args.width, args.height))
            images = images.unsqueeze(0)
            with torch.no_grad():
                images = images.cuda()
                predictions = model(images)
                end = time.time()
                print('model speed:', int(1 / (end - start)), "FPS")
                _, predictions = torch.max(predictions.data, 1)
                label_to_rgb = transforms.Compose([
                    utils.LongTensorToRGBPIL(class_encoding),
                    transforms.ToTensor()
                ])
                color_predictions = utils.batch_transform(
                    predictions.cpu(), label_to_rgb)
                end = time.time()
                print('model+transform:', int(1 / (end - start)), "FPS")
                utils.imshow_batch(images.data.cpu(), color_predictions)
        i += 1
Beispiel #10
0
def train(train_loader, val_loader, class_weights, class_encoding):
    print("Training...")
    num_classes = len(class_encoding)
    model = ERFNet(num_classes)
    criterion = nn.CrossEntropyLoss(weight=class_weights)
    optimizer = optim.Adam(model.parameters(),
                           lr=args.learning_rate,
                           weight_decay=args.weight_decay)
    # Learning rate decay scheduler
    lr_updater = lr_scheduler.StepLR(optimizer, args.lr_decay_epochs,
                                     args.lr_decay)

    # Evaluation metric
    if args.ignore_unlabeled:
        ignore_index = list(class_encoding).index('unlabeled')
    else:
        ignore_index = None

    metric = IoU(num_classes, ignore_index=ignore_index)

    if use_cuda:
        model = model.cuda()
        criterion = criterion.cuda()

    # Optionally resume from a checkpoint
    if args.resume:
        model, optimizer, start_epoch, best_miou, val_miou, train_miou, val_loss, train_loss = utils.load_checkpoint(
            model, optimizer, args.save_dir, args.name, True)
        print(
            "Resuming from model: Start epoch = {0} | Best mean IoU = {1:.4f}".
            format(start_epoch, best_miou))
    else:
        start_epoch = 0
        best_miou = 0
        val_miou = []
        train_miou = []
        val_loss = []
        train_loss = []

    # Start Training
    train = Train(model, train_loader, optimizer, criterion, metric, use_cuda)
    val = Test(model, val_loader, criterion, metric, use_cuda)

    for epoch in range(start_epoch, args.epochs):
        print(">> [Epoch: {0:d}] Training".format(epoch))
        lr_updater.step()
        epoch_loss, (iou, miou) = train.run_epoch(args.print_step)
        print(
            ">> [Epoch: {0:d}] Avg. loss: {1:.4f} | Mean IoU: {2:.4f}".format(
                epoch, epoch_loss, miou))
        train_loss.append(epoch_loss)
        train_miou.append(miou)

        #preform a validation test
        if (epoch + 1) % 10 == 0 or epoch + 1 == args.epochs:
            print(">>>> [Epoch: {0:d}] Validation".format(epoch))
            loss, (iou, miou) = val.run_epoch(args.print_step)
            print(">>>> [Epoch: {0:d}] Avg. loss: {1:.4f} | Mean IoU: {2:.4f}".
                  format(epoch, loss, miou))
            val_loss.append(loss)
            val_miou.append(miou)
            # Print per class IoU on last epoch or if best iou
            if epoch + 1 == args.epochs or miou > best_miou:
                for key, class_iou in zip(class_encoding.keys(), iou):
                    print("{0}: {1:.4f}".format(key, class_iou))
            # Save the model if it's the best thus far
            if miou > best_miou:
                print("Best model thus far. Saving...")
                best_miou = miou
                utils.save_checkpoint(model, optimizer, epoch + 1, best_miou,
                                      val_miou, train_miou, val_loss,
                                      train_loss, args)

    return model, train_loss, train_miou, val_loss, val_miou
Beispiel #11
0
        loaders, w_class, class_encoding = load_dataset(dataset)
        train_loader, val_loader, test_loader = loaders

        if args.mode.lower() in {'train'}:
            model, tl, tmiou, vl, vmiou = train(train_loader, val_loader,
                                                w_class, class_encoding)
            plt.plot(tl, label="train loss")
            plt.plot(tmiou, label="train miou")
            plt.plot(vl, label="val loss")
            plt.plot(vmiou, label="val miou")
            plt.legend()
            plt.xlabel("Epoch")
            plt.ylabel("loss/accuracy")
            plt.grid(True)
            plt.xticks()
            plt.savefig('./plots/train.png')
        elif args.mode.lower() == 'test':
            num_classes = len(class_encoding)
            #model = ENet(num_classes)
            model = ERFNet(num_classes)
            if use_cuda:
                model = model.cuda()
            optimizer = optim.Adam(model.parameters())
            model = utils.load_checkpoint(model, optimizer, args.save_dir,
                                          args.name)[0]
            test(model, test_loader, w_class, class_encoding)
        else:
            raise RuntimeError(
                "\"{0}\" is not a valid choice for execution mode.".format(
                    args.mode))