コード例 #1
0
ファイル: pc_h5_dataset.py プロジェクト: imatge-upc/munegc
    def __init__(self,
                 root_path,
                 h5_folder,
                 split,
                 transform3d=None,
                 range01=False,
                 pos_int16=False,
                 random_crop=False,
                 factor_rand=False,
                 factor=1):

        self.root_path = root_path

        self.h5_path = os.path.join(self.root_path, h5_folder)
        self.split = utils.read_string_list(os.path.join(
            self.root_path, split))

        self.h5_folder = h5_folder

        self.transform3d = transform3d

        self.range01 = range01
        self.pos_int16 = pos_int16
        self.random_crop = random_crop
        self.factor_rand = factor_rand
        self.factor = factor
コード例 #2
0
def extract_features(model, loader, split_path, h5_path, nLayer, cuda=True):
    model.eval()

    model.obtain_intermediate(nLayer)

    features = SaveFeatures(list(model.children())[nLayer])

    print("This layer will be used to extract the features: ",
          list(model.children())[nLayer])
    loader = tqdm(loader, ncols=100)

    files = utils.read_string_list(split_path)
    for i, batch in enumerate(loader, start=0):

        if cuda:
            batch = batch.to('cuda:0')

        graph = model(batch)

        feat = features.features
        if type(feat) == torch_geometric.data.batch.Batch:
            feat = feat.x
        if type(graph) == torch_geometric.data.batch.Batch:
            if feat.size(0) == graph.x.size(0) and graph.x.size(
                    0) == graph.pos.size(0) and graph.pos.size(0) == len(
                        graph.batch):
                graph.x = feat
                x, _ = torch_geometric.utils.to_dense_batch(graph.x,
                                                            batch=graph.batch)
                pos, _ = torch_geometric.utils.to_dense_batch(
                    graph.pos, batch=graph.batch)
                labels = graph.y
                file_indexes = graph.c
                for i in range(0, x.size(0)):
                    x_i = x[i, :, :]
                    pos_i = pos[i, :, :]
                    y_i = labels[i]
                    h5_file_name = os.path.join(h5_path,
                                                files[file_indexes[i]] + '.h5')
                    save_h5_features(h5_file_name,
                                     x_i.detach().cpu().numpy(),
                                     pos_i.detach().cpu().numpy(),
                                     y_i.detach().cpu().numpy())
            else:
                print('Dimensions doesn\'t match')
                exit()
        else:
            raise RuntimeError('wrong input data')
    features.close()
コード例 #3
0
ファイル: img_h5_dataset.py プロジェクト: imatge-upc/munegc
    def __init__(self,
                 root_path,
                 h5_folder,
                 split,
                 transform=None,
                 range01=False):

        self.root_path = root_path

        self.h5_path = os.path.join(self.root_path, h5_folder)
        self.split = utils.read_string_list(os.path.join(
            self.root_path, split))

        self.h5_folder = h5_folder

        self.transform = transform

        self.range01 = range01
コード例 #4
0
    def __init__(self,
                 root_path,
                 h5_folder_b1,
                 h5_folder_b2,
                 split,
                 transform3d=None,
                 pos_int16=False):

        self.root_path = root_path

        self.h5_path_b1 = os.path.join(self.root_path, h5_folder_b1)
        self.h5_path_b2 = os.path.join(self.root_path, h5_folder_b2)

        self.split = utils.read_string_list(os.path.join(
            self.root_path, split))

        self.h5_folder_b1 = h5_folder_b1
        self.h5_folder_b2 = h5_folder_b2

        self.transform3d = transform3d

        self.pos_int16 = pos_int16
コード例 #5
0
        return img[start[0]:end[0], start[1]:end[1]]


if __name__ == "__main__":

    # dataset folder path
    dataset_path = '../../nyuv2'

    # path to the data
    dataset_mat_path = dataset_path + '/nyu_depth_v2_labeled.mat'

    # folder to store the h5 files
    path_h5 = dataset_path + '/h5/h5_feat2d/'
    utils.create_folder(path_h5)

    train_split = utils.read_string_list(dataset_path + "/list/train_list.txt")
    val_split = utils.read_string_list(dataset_path + "/list/test_list.txt")
    scenes_types = utils.read_string_list(dataset_path +
                                          "/list/scenes_labels27.txt")
    dataset = train_split + val_split

    print("Loading .mat")
    f = h5py.File(dataset_mat_path, 'r')

    depths = np.round(np.transpose(np.asarray(f['depths'])), 4)
    scenes = np.transpose(np.asarray(f['sceneTypes'])).squeeze()

    mapping_10 = np.asarray([
        10, 4, 1, 9, 10, 8, 10, 10, 10, 5, 10, 10, 10, 7, 10, 10, 2, 10, 3, 6,
        10, 10, 10, 10, 10, 10, 10
    ]) - 1
コード例 #6
0
            args.pretrain_path)
        model.load_state_dict(model_state)

    else:
        print('Wrong pretrain path')
        exit()

    if args.cuda is True and args.multigpu is False:
        model = model.to('cuda:0')

    print(model)

    label_path = os.path.join(args.dataset_path, args.classname)
    if not os.path.isfile(label_path):
        raise RuntimeError("label file does not exist")
    label_names = utils.read_string_list(label_path)

    assert args.batch_size % args.batch_parts == 0

    dataset = PCH5Dataset(args.dataset_path,
                          args.dataset_folder,
                          args.dataset,
                          range01=args.range01,
                          pos_int16=args.pos_int16)

    loader = torch_geometric.data.DataLoader(dataset,
                                             batch_size=int(args.batch_size /
                                                            args.batch_parts),
                                             num_workers=args.nworkers,
                                             shuffle=False,
                                             pin_memory=False)
コード例 #7
0
ファイル: train.py プロジェクト: imatge-upc/munegc
    parameters = model.parameters()
    if args.optim == "adam":
        optimizer = torch.optim.Adam(parameters, lr=args.lr, betas=args.betas, weight_decay=args.wd)
    elif args.optim == "sgd":
        optimizer = torch.optim.SGD(parameters, lr=args.lr, momentum=args.momentum, weight_decay=args.wd)
    elif args.optim == 'radam':
        print('radam')
        optimizer = RAdam(parameters, lr=args.lr, betas=args.betas, weight_decay=args.wd)

    weights = None
    if args.weights != '' and args.weights != '-':
        weights_path = os.path.join(args.dataset_path, args.weights)
        if not os.path.isfile(weights_path):
            raise RuntimeError("weights file does not exist")
        weights = torch.FloatTensor([float(i) for i in utils.read_string_list(weights_path)]).cuda(0)

    loss_criterion = torch.nn.CrossEntropyLoss(weight=weights)

    label_path = os.path.join(args.dataset_path, args.classname)
    if not os.path.isfile(label_path):
        raise RuntimeError("label file does not exist")
    label_names = utils.read_string_list(label_path)

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_transform = transforms.Compose([
        transforms.ToPILImage(),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
コード例 #8
0
                                    lr=args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.wd)
    elif args.optim == 'radam':
        optimizer = RAdam(parameters,
                          lr=args.lr,
                          betas=args.betas,
                          weight_decay=args.wd)

    weights = None
    if args.weights != '' and args.weights != '-':
        weights_path = os.path.join(args.dataset_path, args.weights)
        if not os.path.isfile(weights_path):
            raise RuntimeError("weights file does not exist")
        weights = torch.FloatTensor([
            float(i) for i in utils.read_string_list(weights_path)
        ]).cuda(args.lastgpu)

    loss_criterion = torch.nn.CrossEntropyLoss(weight=weights)

    label_path = os.path.join(args.dataset_path, args.classname)
    if not os.path.isfile(label_path):
        raise RuntimeError("label file does not exist")
    label_names = utils.read_string_list(label_path)
    assert args.batch_size % args.batch_parts == 0

    transform3d = {
        "dropout": args.pc_augm_input_dropout,
        "rot": args.pc_augm_rot,
        "mirror": args.pc_augm_mirror_prob
    }
コード例 #9
0

if __name__ == "__main__":

    # dataset folder path
    dataset_path = '../../sunrgbd'

    # path to the data
    images_list = dataset_path + '/list/sun_list.txt'
    label_list = dataset_path + '/list/scenes_labels.txt'

    # folder to store the h5 files
    path_h5 = dataset_path + '/h5/h5_feat2d/'
    utils.create_folder(path_h5)

    images = utils.read_string_list(images_list)

    dataset_labels = utils.read_string_list(label_list)
    newSize = (420, 560)

    for i in tqdm(range(0, len(images)), ncols=100):
        img_folder = dataset_path + "/" + images[i]

        depth_path = glob.glob(img_folder + "/depth_bfx/*.png")[0]

        depth_img = read_depth_sunrgbd(depth_path)
        intrinsic = np.loadtxt(img_folder + '/intrinsics.txt')

        readlabel = open(img_folder + "/scene.txt", "r")
        label = readlabel.read()