Пример #1
0
 def __init__(self, file_path):
     super(DatasetFromHDF5, self).__init__()
     self.file_path = file_path
     data = h5py.File(os.path.join(self.file_path, 'gt.h5'), 'r')
     self.keys = list(data.keys())
     random.shuffle(self.keys)
     data.close()
Пример #2
0
    def __getitem__(self, index):
        # fn = self.datapath[index]
        obj_id = self.data_info[index][0]
        jj = self.data_info[index][1]

        data = h5py.File(self.root + '{}.h5'.format(obj_id), 'r')
        point_set2048 = np.array(data['data'][:][jj])
        point_set = point_set2048[::2, :]
        # seg = np.array(data['label'][:][jj])
        data.close()

        if len(point_set) > self.npoints:
            c_mask = np.zeros(len(point_set), dtype=int)
            c_mask[:self.npoints] = 1
            np.random.shuffle(c_mask)
            choose = np.array(range(len(point_set)))
            choose = choose[c_mask.nonzero()]
            point_set = point_set[choose, :]
            # choice = np.random.choice(len(seg), self.npoints, replace=True)
        #resample
        elif len(point_set) < self.npoints:
            choose = np.array(range(len(point_set)))
            choose = np.pad(choose, (0, self.npoints - len(choose)), 'wrap')
            point_set = point_set[choose, :]

        ind = self.obj_list.index(obj_id)
        # point_set = point_set[choose, :]
        # #resample
        # point_set = point_set[choice, :]
        # print(point_set.shape)
        point_set = torch.from_numpy(point_set.astype(np.float32))
        # seg = torch.from_numpy(seg)
        cls = torch.from_numpy(np.array([ind]).astype(np.int64))
        # if self.classification:
        return point_set, cls
Пример #3
0
def readImg(listPath, index):
    data = open(listPath, 'r')
    imgPair = []
    datas = data.readlines()
    if len(index) == 2:
        datas = datas[index[0]:index[1]]
    print('loading ' + str(len(datas)) + ' imgs from ' + listPath)
    for d in datas:
        d = d.strip("\n")
        d = d.strip("\000")
        d = d.strip("\r")
        d = d.split(" ")
        imgPair.append([d[0], d[1]])
    data.close()
    return imgPair
Пример #4
0
    #load variational autoencoder
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    autoencoder_model = VAE()

    autoencoder_model = loadModel(autoencoder_model,
                                  args.vae_path,
                                  dataParallelModel=True)
    # autoencoder_model = loadStateDict(autoencoder_model, args.vae_path)

    autoencoder_model = autoencoder_model.to(device)

    # load dataset from npz
    data = np.load(dataset)
    train_dataset = data['train']  #[0:10]
    test_dataset = data['test']  #[0:10]
    data.close()
    print("train set: {}".format(train_dataset.shape))
    print("test set: {}".format(test_dataset.shape))
    # print("valid set: {}".format(valid_dataset.shape))

    train_dataset = createDataset(train_dataset, seq_length=seq_length)
    test_dataset = createDataset(test_dataset, seq_length=seq_length)
    # valid_dataset = createDataset(valid_dataset, seq_length=seq_length)

    print('train_dataset {}'.format(train_dataset.shape))
    print('test_dataset {}'.format(test_dataset.shape))

    # train_dataset = train_dataset[0:1000]
    train_dataset = torch.from_numpy(train_dataset)
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=batch_size,