Beispiel #1
0
    depth = (((depth << 13) | (depth >> 3)) / 1000.0).astype(np.float32)

    tsdf = np.zeros(vox_size[2] * vox_size[1] * vox_size[0], dtype=np.float32)
    depth_mapping_3d = np.ones(vox_size[2] * vox_size[1] * vox_size[0],
                               dtype=np.float32) * (-1)
    dp.TSDF((depth / 1000.0).reshape(-1), cam_K, cam_pose, vox_origin,
            vox_unit, vox_size.astype(np.float32), vox_margin, depth.shape[0],
            depth.shape[1], tsdf, depth_mapping_3d)
    depth_mapping_3d = depth_mapping_3d.astype(np.int64)
    label = np.zeros(vox_size[2] * vox_size[1] * vox_size[0] /
                     (sampleRatio * sampleRatio * sampleRatio),
                     dtype=np.float32)
    tsdf_downsample = np.zeros(vox_size[2] * vox_size[1] * vox_size[0] /
                               (sampleRatio * sampleRatio * sampleRatio),
                               dtype=np.float32)
    dp.DownSampleLabel(readin_bin, vox_size.astype(np.float32), sampleRatio,
                       tsdf, label, tsdf_downsample)
    label = label.astype(np.int32)
    label_weight = getLabelWeight(label, tsdf_downsample)
    label[np.where(label == 255)] = 0
    dp.TSDFTransform(tsdf, vox_size.astype(np.float32))

    tsdf = torch.FloatTensor(tsdf).view(
        (1, vox_size[2], vox_size[1], vox_size[0]))
    label = torch.LongTensor(label).view(vox_size[2] / sampleRatio,
                                         vox_size[1] / sampleRatio,
                                         vox_size[0] / sampleRatio)
    label_weight = torch.FloatTensor(label_weight).view(
        vox_size[2] / sampleRatio, vox_size[1] / sampleRatio,
        vox_size[0] / sampleRatio)
    depth_mapping_3d = torch.LongTensor(depth_mapping_3d)
Beispiel #2
0
def multi_process(index, lenlist, startindex):

    print str(index) + '/' + str(lenlist)

    binfile = open(binlist[index - startindex], 'rb')
    vox_origin = np.array(struct.unpack('3f', binfile.read(12)),
                          dtype=np.float32)
    cam_pose = np.array(struct.unpack('16f', binfile.read(64)),
                        dtype=np.float32)
    readin_bin = np.zeros(vox_size[0] * vox_size[1] * vox_size[2],
                          dtype=np.float32)
    all_the_byte = binfile.read()
    binfile.close()
    all_the_val = struct.unpack("%dI" % (len(all_the_byte) / 4), all_the_byte)
    last_index = 0
    count_bad_data = 0
    for i in xrange(len(all_the_val) / 2):
        if last_index + all_the_val[
                i * 2 + 1] > vox_size[0] * vox_size[1] * vox_size[2]:
            count_bad_data = 0
            break
        if all_the_val[i * 2] == 255:
            readin_bin[last_index:last_index + all_the_val[i * 2 + 1]] = 255
        else:
            readin_bin[last_index:last_index +
                       all_the_val[i * 2 + 1]] = segmentation_class_map[
                           all_the_val[i * 2] % 37]
            if all_the_val[i * 2] != 0:
                count_bad_data += all_the_val[i * 2 + 1]
        last_index += all_the_val[i * 2 + 1]
    if count_bad_data < 10 or last_index != vox_size[0] * vox_size[
            1] * vox_size[2]:
        print str(index) + ' is awful!'
        return

    depth = cv2.imread(depthlist[index - startindex], -1)
    depth = ((depth << 13) | (depth >> 3)).astype(np.float32)

    tsdf = np.ones(vox_size[2] * vox_size[1] * vox_size[0],
                   dtype=np.float32)  # modified: zeros to ones
    depth_mapping_3d = np.ones(640 * 480, dtype=np.float32) * (
        -1
    )  # vox_size[2]*vox_size[1]*vox_size[0] seems to be wrong, it should be 640x480
    dp.TSDF((depth / 1000.0).reshape(-1), cam_K, cam_pose, vox_origin,
            vox_unit, vox_size.astype(np.float32), vox_margin, depth.shape[0],
            depth.shape[1], tsdf, depth_mapping_3d)
    depth_mapping_3d = depth_mapping_3d.astype(np.int64)
    label = np.zeros(vox_size[2] * vox_size[1] * vox_size[0] /
                     (sampleRatio * sampleRatio * sampleRatio),
                     dtype=np.float32)
    tsdf_downsample = np.zeros(vox_size[2] * vox_size[1] * vox_size[0] /
                               (sampleRatio * sampleRatio * sampleRatio),
                               dtype=np.float32)
    dp.DownSampleLabel(readin_bin, vox_size.astype(np.float32), sampleRatio,
                       tsdf, label, tsdf_downsample)
    label = label.astype(np.int32)
    label_weight = getLabelWeight(label, tsdf_downsample)
    label[np.where(label == 255)] = 0
    dp.TSDFTransform(tsdf, vox_size.astype(np.float32))

    tsdf = torch.FloatTensor(tsdf).view(
        (1, vox_size[2], vox_size[1], vox_size[0]))
    label = torch.LongTensor(label).view(vox_size[2] / sampleRatio,
                                         vox_size[1] / sampleRatio,
                                         vox_size[0] / sampleRatio)
    label_weight = torch.FloatTensor(label_weight).view(
        vox_size[2] / sampleRatio, vox_size[1] / sampleRatio,
        vox_size[0] / sampleRatio)
    depth_mapping_3d = torch.LongTensor(depth_mapping_3d)

    np.savez_compressed(os.path.join(outputpath, '%06d.npz' % index),
                        tsdf.numpy(), label.numpy(), label_weight.numpy(),
                        depth_mapping_3d.numpy())
Beispiel #3
0
    def __getitem__(self, index):

        while True:

            binfile = open(self.binlist[index], 'rb')
            vox_origin = np.array(struct.unpack('3f', binfile.read(12)),
                                  dtype=np.float32)
            cam_pose = np.array(struct.unpack('16f', binfile.read(64)),
                                dtype=np.float32)
            readin_bin = np.zeros(self.vox_size[0] * self.vox_size[1] *
                                  self.vox_size[2],
                                  dtype=np.float32)
            all_the_byte = binfile.read()
            binfile.close()
            all_the_val = struct.unpack("%dI" % (len(all_the_byte) / 4),
                                        all_the_byte)
            last_index = 0
            count_bad_data = 0
            for i in xrange(len(all_the_val) / 2):
                if last_index + all_the_val[i * 2 + 1] > self.vox_size[
                        0] * self.vox_size[1] * self.vox_size[2]:
                    count_bad_data = 0
                    break
                if all_the_val[i * 2] == 255:
                    readin_bin[last_index:last_index +
                               all_the_val[i * 2 + 1]] = 255
                else:
                    readin_bin[last_index:last_index +
                               all_the_val[i * 2 +
                                           1]] = self.segmentation_class_map[
                                               all_the_val[i * 2] % 37]
                    if all_the_val[i * 2] != 0:
                        count_bad_data += all_the_val[i * 2 + 1]
                last_index += all_the_val[i * 2 + 1]
            if count_bad_data < 10 or last_index != self.vox_size[
                    0] * self.vox_size[1] * self.vox_size[2]:
                index = self.hardexamples[random.randint(
                    0,
                    len(self.hardexamples) - 1)]
                continue

            color = cv2.imread(self.colorlist[index], -1)
            depth = cv2.imread(self.depthlist[index], -1).astype(np.float32)

            tsdf = np.zeros(self.vox_size[2] * self.vox_size[1] *
                            self.vox_size[0],
                            dtype=np.float32)
            depth_mapping_3d = np.ones(
                self.vox_size[2] * self.vox_size[1] * self.vox_size[0],
                dtype=np.float32) * (-1)
            dp.TSDF((depth / 1000.0).reshape(-1), self.cam_K,
                    cam_pose, vox_origin, self.vox_unit,
                    self.vox_size.astype(np.float32), self.vox_margin,
                    depth.shape[0], depth.shape[1], tsdf, depth_mapping_3d)
            depth_mapping_3d = depth_mapping_3d.astype(np.int64)
            label = np.zeros(
                self.vox_size[2] * self.vox_size[1] * self.vox_size[0] /
                (self.sampleRatio * self.sampleRatio * self.sampleRatio),
                dtype=np.float32)
            tsdf_downsample = np.zeros(
                self.vox_size[2] * self.vox_size[1] * self.vox_size[0] /
                (self.sampleRatio * self.sampleRatio * self.sampleRatio),
                dtype=np.float32)
            dp.DownSampleLabel(readin_bin, self.vox_size.astype(np.float32),
                               self.sampleRatio, tsdf, label, tsdf_downsample)
            label = label.astype(np.int32)
            label_weight = getLabelWeight(label, tsdf_downsample)
            label[np.where(label == 255)] = 0
            dp.TSDFTransform(tsdf, self.vox_size.astype(np.float32))

            color = np.transpose(color, (2, 0, 1)).astype(np.float32) / 255.0
            depth1 = torch.FloatTensor(depth / 10000.0).view(
                1, depth.shape[0], depth.shape[1])
            color = torch.FloatTensor(color)
            color = self.img_transform(color)
            img = torch.cat((color, depth1), dim=0).contiguous()
            tsdf = torch.FloatTensor(tsdf).view(
                (1, self.vox_size[2], self.vox_size[1], self.vox_size[0]))
            label = torch.LongTensor(label).view(
                self.vox_size[2] / self.sampleRatio,
                self.vox_size[1] / self.sampleRatio,
                self.vox_size[0] / self.sampleRatio)
            label_weight = torch.FloatTensor(label_weight).view(
                self.vox_size[2] / self.sampleRatio,
                self.vox_size[1] / self.sampleRatio,
                self.vox_size[0] / self.sampleRatio)
            depth_mapping_3d = torch.LongTensor(depth_mapping_3d)

            return img, tsdf, label, label_weight, depth_mapping_3d