Exemple #1
0
def main():
    # Seed all sources of randomness to 0 for reproducibility
    np.random.seed(0)
    torch.manual_seed(0)
    torch.cuda.manual_seed(0)
    random.seed(0)

    opt = Opts().parse()

    # Create data loaders
    train_loader, test_loader = create_data_loaders(opt)

    # Create nn
    model, criterion_hm, criterion_paf, latest_inx = create_model(opt)
    model = model.cuda()
    criterion_hm = criterion_hm.cuda()
    criterion_paf = criterion_paf.cuda()

    # Create optimizer
    optimizer = create_optimizer(opt, model)

    # Other params
    n_epochs = opt.nEpoch
    to_train = opt.train
    drop_lr = opt.dropLR
    val_interval = opt.valInterval
    learn_rate = opt.LR
    visualize_out = opt.vizOut

    # train/ test
    train_net(train_loader, test_loader, model, criterion_hm, criterion_paf,
              optimizer, n_epochs, val_interval, learn_rate, drop_lr,
              opt.saveDir, visualize_out, latest_inx)
Exemple #2
0
def main():
    # Seed all sources of randomness to 0 for reproducibility
    np.random.seed(0)
    torch.manual_seed(0)
    torch.cuda.manual_seed(0)
    random.seed(0)

    opt = Opts().parse()

    # Create data loaders
    train_loader, test_loader = create_data_loaders(opt)

    # Create nn
    model, criterion_hm, criterion_paf, latest_inx = create_model(opt)
    # model = model.cuda()
    # criterion_hm = criterion_hm.cuda()
    # criterion_paf = criterion_paf.cuda()

    # Create optimizer
    optimizer = create_optimizer(opt, model)

    # Other params
    n_epochs = opt.nEpoch
    to_train = opt.train
    drop_lr = opt.dropLR
    val_interval = opt.valInterval
    learn_rate = opt.LR
    visualize_out = opt.vizOut

    # train/ test
    img, heat_map, paf, ignore_mask, keypoints = test_loader.dataset.get_item_raw(0, False)
Exemple #3
0
def main():
    # Seed all sources of randomness to 0 for reproducibility
    np.random.seed(0)
    torch.manual_seed(0)
    torch.cuda.manual_seed(0)
    random.seed(0)

    opt = Opts().parse()
    os.environ["CUDA_VISIBLE_DEVICES"] = opt["env"]["device"]
    print("Using GPU: {}".format(opt["env"]["device"]))

    # Create data loaders
    # Create data loaders
    train_loader, test_loader, _ = create_data_loaders(opt)
    # Create nn
    model, criterion_hm, criterion_paf = create_model(opt)
    model = torch.nn.DataParallel(model, device_ids=[int(index) for index in opt["env"]["device"].split(",")]).cuda() \
        if "," in opt["env"]["device"] else model.cuda()
    if opt["env"]["loadModel"] is not None and opt["typ"] == 'cpr':
        model.load_state_dict(torch.load(opt["env"]["loadModel"]))
        print('Loaded model from ' + opt["env"]["loadModel"])
    criterion_hm = criterion_hm.cuda()
    criterion_paf = criterion_paf.cuda()

    # Create optimizer
    optimizer = create_optimizer(opt, model)

    # Other params
    to_train = opt["to_train"]
    visualize_out = opt["viz"]["vizOut"]

    # train/ test
    Processer = process(model)
    if to_train:
        Processer.train_net(train_loader, test_loader, criterion_hm, criterion_paf, optimizer, opt, viz_output=visualize_out)
    else:
        Processer.validate_net(test_loader, criterion_hm, criterion_paf, save_dir=opt["env"]["saveDir"], viz_output=visualize_out)
def main():
    # Seed all sources of randomness to 0 for reproducibility
    torch.manual_seed(0)
    torch.cuda.manual_seed(0) if torch.cuda.is_available() else torch.manual_seed(0)
    random.seed(0)
    
    # Set cudnn.benchmark True to spped up training
    if torch.cuda.is_available():
        torch.backends.cudnn.enabled =True
        torch.backends.cudnn.benchmark = True
    opt = Opts().opt

    # Create data loaders
    train_loader, test_loader = create_data_loaders(opt)

    # Create nn
    model = create_model(opt).to(device())
    
    # Create loss criterion
    if opt.onlyAutoEncoder:
        criterion = create_criterion(opt.criterionAutoEncoder).to(device())
    else:
        criterion = create_criterion(opt.criterionClassifier).to(device())
    
    # Choose to train or to test the model
    if opt.toTrain:
        # Create optimizer
        optimizer = create_optimizer(opt, model)
        train_net(opt, train_loader, test_loader, model, criterion, optimizer, opt.nEpoch, opt.valInterval, opt.LR, opt.dropLR)
    
    # Test classifier or AutoEncoder
    if not opt.onlyAutoEncoder:
        # Testing classifier
        predictList = []
        labelsList = []
        model.eval()
        with torch.no_grad():
            for data in test_loader:
                images, labels = data
                images = images.to(device())
                labels = labels.to(device())
                outputs = model(images)
                _, predicted = torch.max(outputs, 1)

                predictList.extend(predicted.cpu().numpy().tolist())
                labelsList.extend(labels.cpu().numpy().tolist())
        # ===================confusionMatrixGeneration====================
        confusionMatrix=confusion_matrix(labelsList, predictList)
        print('ConfusionMatrix:\n{}\n{}'.format(list(CLASSES.values()),confusionMatrix))
        np.save(os.path.join(opt.saveDir,'confusionMatrix.npy'),confusionMatrix)
        # ===================classificaitonReport: Precision, Recall and f1 score====================
        classReport = classification_report(labelsList, predictList, digits=2)
        df = pandas.DataFrame(classification_report(labelsList, predictList, digits=2, output_dict=True)).transpose()
        df.to_csv(os.path.join(opt.saveDir, 'my_csv_file.csv'))
        print(classReport)
        # ===================heatmapGeneration====================
        if opt.toGenerateHeatmap:
            model.to(torch.device('cpu'))
            #Currently 33 testing images in the folder test_images are chosen for heaimap generation 
            target_example = 33 
            for t in range(target_example):
                # Get params for heatmap generation
                (original_image, prep_img, target_class, file_name_to_export) = get_example_params(t)
                # Grad cam, choose which block and which layer in the block for heatmap generation
                grad_cam = GradCam(model, target_block=2, target_layer=9)
                # Generate cam mask
                cam = grad_cam.generate_cam(prep_img)
                # Save mask
                save_class_activation_images(opt, original_image, cam, file_name_to_export)
                print('Grad cam completed')       
    else:
        # Test AutoEncoder to plot original input images and reconstructed images
        model.eval()
        dataiter = iter(test_loader)
        # Generate images in the first 8 iteration
        for i in range(8):            
            images, labels = dataiter.next()
            print('GroundTruth: ', ' '.join('%5s' % CLASSES[labels[j].item()] for j in range(opt.batchSize)))
            # ===================showGroundTruthImages====================
            imshow(torchvision.utils.make_grid(images))
            images_ = images.to(device())
            # ===================forward=====================
            decoded_imgs = model(images_)
            # ===================showReconstructedImages====================
            imshow(torchvision.utils.make_grid(decoded_imgs.data))        
def main():
    # Seed all sources of randomness to 0 for reproducibility
    torch.manual_seed(0)
    torch.cuda.manual_seed(
        0) if torch.cuda.is_available() else torch.manual_seed(0)
    random.seed(0)

    # Set cudnn.benchmark True to spped up training
    if torch.cuda.is_available():
        torch.backends.cudnn.enabled = True
        torch.backends.cudnn.benchmark = True
    opt = Opts().opt

    # Create train and val data loaders
    train_loader, val_loader = create_data_loaders(
        opt, 'train'), create_data_loaders(opt, 'val')

    # Create nn
    model = create_model(opt)
    if opt.toCuda:
        model = model.to(device())
    # Choose to train or to test the model
    if opt.toTrain:
        # Create optimizer
        optimizer = create_optimizer(opt, model)
        train_net(opt, train_loader, val_loader, model, optimizer, opt.nEpoch,
                  opt.valInterval, opt.LR, opt.dropLR)

    else:
        # Change ASPP image pooling
        output_stride = 32
        train_crop_h, train_crop_w = (1025, 2049)
        scale = 1. / output_stride
        pool_h = int((float(train_crop_h) - 1.0) * scale + 1.0)
        pool_w = int((float(train_crop_w) - 1.0) * scale + 1.0)

        model.set_image_pooling((pool_h, pool_w))

        # Create test data loaders, change batch size to 1
        opt.batchSize = 1
        test_loader = create_data_loaders(opt, 'test')
        # test_loader = create_data_loaders(opt, 'test')

        panoptic_metric = CityscapesPanopticEvaluator(
            output_dir=os.path.join(opt.saveDir, 'panoptic'),
            train_id_to_eval_id=test_loader.dataset.train_id_to_eval_id(),
            label_divisor=test_loader.dataset.label_divisor,
            void_label=test_loader.dataset.label_divisor *
            test_loader.dataset.ignore_label,
            gt_dir=opt.data,
            split=test_loader.dataset.split,
            num_classes=test_loader.dataset.num_classes)

        image_filename_list = [
            os.path.splitext(os.path.basename(ann))[0]
            for ann in test_loader.dataset.img_list
        ]

        debug_out_dir = os.path.join(opt.saveDir, 'debug_test')
        PathManager.mkdirs(debug_out_dir)

        model.eval()
        with torch.no_grad():
            for i, data in enumerate(test_loader):
                if opt.toCuda:
                    data = to_cuda(data, device())
                image = data.pop('image')
                out_dict = model(image)

                # post-processing
                semantic_pred = get_semantic_segmentation(out_dict['semantic'])

                if 'foreground' in out_dict:
                    foreground_pred = get_semantic_segmentation(
                        out_dict['foreground'])
                else:
                    foreground_pred = None

                panoptic_pred, center_pred = get_panoptic_segmentation(
                    semantic_pred,
                    out_dict['center'],
                    out_dict['offset'],
                    thing_list=test_loader.dataset.thing_list,
                    label_divisor=test_loader.dataset.label_divisor,
                    stuff_area=2048,
                    void_label=(test_loader.dataset.label_divisor *
                                test_loader.dataset.ignore_label),
                    threshold=0.1,
                    nms_kernel=7,
                    top_k=200,
                    foreground_mask=foreground_pred)

                # save predictions
                semantic_pred = semantic_pred.squeeze(0).cpu().numpy()
                panoptic_pred = panoptic_pred.squeeze(0).cpu().numpy()

                # Crop padded regions.
                image_size = data['size'].squeeze(0).cpu().numpy()
                panoptic_pred = panoptic_pred[:image_size[0], :image_size[1]]

                # Resize back to the raw image size.
                raw_image_size = data['raw_size'].squeeze(0).cpu().numpy()
                if raw_image_size[0] != image_size[0] or raw_image_size[
                        1] != image_size[1]:
                    semantic_pred = cv2.resize(
                        semantic_pred.astype(np.float),
                        (raw_image_size[1], raw_image_size[0]),
                        interpolation=cv2.INTER_NEAREST).astype(np.int32)
                    panoptic_pred = cv2.resize(
                        panoptic_pred.astype(np.float),
                        (raw_image_size[1], raw_image_size[0]),
                        interpolation=cv2.INTER_NEAREST).astype(np.int32)

                # Optional: evaluates panoptic segmentation.
                image_id = '_'.join(image_filename_list[i].split('_')[:3])
                panoptic_metric.update(panoptic_pred,
                                       image_filename=image_filename_list[i],
                                       image_id=image_id)

                # Processed outputs
                #                save_annotation(semantic_pred, debug_out_dir, 'semantic_pred_%d' % i,
                #                                add_colormap=True, colormap=test_loader.dataset.create_label_colormap())
                #                pan_to_sem = panoptic_pred // test_loader.dataset.label_divisor
                #                save_annotation(pan_to_sem, debug_out_dir, 'pan_to_sem_pred_%d' % i,
                #                                add_colormap=True, colormap=test_loader.dataset.create_label_colormap())
                #                ins_id = panoptic_pred % test_loader.dataset.label_divisor
                #                pan_to_ins = panoptic_pred.copy()
                #                pan_to_ins[ins_id == 0] = 0
                #                save_instance_annotation(pan_to_ins, debug_out_dir, 'pan_to_ins_pred_%d' % i)

                save_panoptic_annotation(
                    panoptic_pred,
                    debug_out_dir,
                    'panoptic_pred_%d' % i,
                    label_divisor=test_loader.dataset.label_divisor,
                    colormap=test_loader.dataset.create_label_colormap())
            print('1111111111111111111111')
            results = panoptic_metric.evaluate()
            print(results)