Esempio n. 1
0
        print('Restored model, epoch {}, step {}'.format(epoch, step))
    else:
        print("No trained model detected")
        raise OSError

    # Validation
    feature_descriptor_model.eval()
    feature_descriptor_model = feature_descriptor_model.module
    feature_descriptor_model = feature_descriptor_model.cuda(gpu_id)
    # Summary network architecture
    if display_architecture:
        torchsummary.summary(feature_descriptor_model,
                             input_size=(3, height, width))

    total_query = 0
    folder_list = utils.get_parent_folder_names(testing_data_root,
                                                id_range=testing_patient_id)

    mean_accuracy_1 = None
    mean_accuracy_2 = None
    mean_accuracy_3 = None

    for patient_id in testing_patient_id:
        data_root = Path(testing_data_root) / "{:d}".format(patient_id)
        sub_folders = list(data_root.glob("*/"))
        sub_folders.sort()
        for folder in sub_folders:
            # Get color image filenames
            test_filenames = utils.get_file_names_in_sequence(
                sequence_root=folder)
            if len(test_filenames) == 0:
                print("Sequence {} does not have relevant files".format(
Esempio n. 2
0
        currentDT.month,
        currentDT.day,
        currentDT.hour,
        currentDT.minute,
        testing_patient_id)
    if not log_root.exists():
        log_root.mkdir()
    writer = SummaryWriter(logdir=str(log_root))
    print("Tensorboard visualization at {}".format(str(log_root)))

    # Get color image filenames
    train_filenames, val_filenames, test_filenames = utils.get_color_file_names_by_bag(training_data_root,
                                                                                       validation_patient_id=validation_patient_id,
                                                                                       testing_patient_id=testing_patient_id,
                                                                                       id_range=id_range)
    folder_list = utils.get_parent_folder_names(training_data_root,
                                                id_range=id_range)

    # Build training and validation dataset
    train_dataset = dataset.SfMDataset(image_file_names=train_filenames,
                                       folder_list=folder_list,
                                       adjacent_range=adjacent_range, transform=training_transforms,
                                       downsampling=input_downsampling,
                                       network_downsampling=network_downsampling, inlier_percentage=inlier_percentage,
                                       use_store_data=load_intermediate_data,
                                       store_data_root=training_data_root,
                                       phase="train", is_hsv=is_hsv,
                                       num_pre_workers=num_workers, visible_interval=30, rgb_mode="rgb")
    validation_dataset = dataset.SfMDataset(image_file_names=val_filenames,
                                            folder_list=folder_list,
                                            adjacent_range=adjacent_range,
                                            transform=None,
Esempio n. 3
0
    model_root = root / "models"
    try:
        model_root.mkdir(mode=0o777, parents=True)
    except OSError:
        pass
    results_root = root / "results"
    try:
        results_root.mkdir(mode=0o777, parents=True)
    except OSError:
        pass

    # Get color image filenames
    train_filenames, val_filenames, test_filenames = utils.get_color_file_names_by_bag(training_data_root,
                                                                                       which_bag=which_bag,
                                                                                       split_ratio=(0.5, 0.5))
    training_folder_list, val_folder_list = utils.get_parent_folder_names(training_data_root, which_bag=which_bag)
    # Build training and validation dataset
    train_dataset = dataset.SfMDataset(image_file_names=train_filenames,
                                       folder_list=training_folder_list + val_folder_list,
                                       adjacent_range=adjacent_range, to_augment=True, transform=training_transforms,
                                       downsampling=downsampling,
                                       net_depth=teacher_depth, inlier_percentage=inlier_percentage,
                                       use_store_data=load_intermediate_data,
                                       store_data_root=precompute_root,
                                       use_view_indexes_per_point=use_view_indexes_per_point, visualize=visualize,
                                       phase="train", is_hsv=is_hsv)
    train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=shuffle,
                                               num_workers=num_workers)
    # Load trained teacher model
    depth_estimation_model_teacher = models.UNet(in_channels=3, out_channels=1, depth=teacher_depth, wf=filter_base,
                                                 padding=True, up_mode='upsample')
Esempio n. 4
0
        log_root.mkdir(parents=True)
    writer = SummaryWriter(logdir=str(log_root))
    print("Tensorboard visualization at {}".format(str(log_root)))

    if selected_frame_index_list is None and not load_all_frames:
        raise IOError

    # Read all frame indexes
    if load_all_frames:
        selected_frame_index_list = utils.read_visible_view_indexes(
            sequence_root)

    # Get color image filenames
    test_filenames = utils.get_filenames_from_frame_indexes(
        sequence_root, selected_frame_index_list)
    folder_list = utils.get_parent_folder_names(evaluation_data_root,
                                                id_range=id_range)

    if phase == "validation":
        test_dataset = dataset.SfMDataset(
            image_file_names=test_filenames,
            folder_list=folder_list,
            adjacent_range=adjacent_range,
            transform=None,
            downsampling=input_downsampling,
            network_downsampling=network_downsampling,
            inlier_percentage=inlier_percentage,
            use_store_data=load_intermediate_data,
            store_data_root=evaluation_data_root,
            phase="validation",
            is_hsv=is_hsv,
            num_pre_workers=num_pre_workers,
    if not log_root.exists():
        log_root.mkdir()
    log_root = log_root / "dense_descriptor_train_{}_{}_{}_{}".format(
        current_date.month, current_date.day, current_date.hour,
        current_date.minute)
    writer = SummaryWriter(logdir=str(log_root))
    print("Created tensorboard visualization at {}".format(str(log_root)))

    if not Path(args.precompute_root).exists():
        Path(args.precompute_root).mkdir(parents=True)

    train_filenames = \
        utils.get_color_file_names_by_bag(root=Path(args.data_root), id_list=args.training_patient_id)

    sequence_path_list = utils.get_parent_folder_names(
        Path(args.data_root), id_list=args.training_patient_id)

    # Build training and validation dataset
    train_dataset = dataset.DescriptorDataset(
        image_file_names=train_filenames,
        folder_list=sequence_path_list,
        adjacent_range=args.adjacent_range,
        image_downsampling=args.image_downsampling,
        inlier_percentage=args.inlier_percentage,
        network_downsampling=args.network_downsampling,
        load_intermediate_data=args.load_intermediate_data,
        intermediate_data_root=Path(args.precompute_root),
        sampling_size=args.sampling_size,
        heatmap_sigma=args.heatmap_sigma,
        num_pre_workers=args.num_workers,
        visible_interval=args.visibility_overlap,