Пример #1
0
    def updateBefore(self, img):
        # the image format depends on the size/shape of the input image array
        img = img.astype(np.uint8)
        img = util.downsample(img, target_height=480)
        if len(img.shape) == 3:
            height, width, bytesPerPixel = img.shape
            if bytesPerPixel == 3:
                imgBefore = QImage(img, width, height, bytesPerPixel*width, QImage.Format_RGB888)
            elif bytesPerPixel == 4:
                imgBefore = QImage(img, width, height, bytesPerPixel*width, QImage.Format_RGBA8888_Premultiplied)
            elif img.shape[-1] == 1:
                imgBefore = QImage(img, width, height, width, QImage.Format_Indexed8)

        elif len(img.shape) == 2:
            height, width = img.shape
            if img.dtype == np.bool:
                img = img.astype(np.uint8)*128
                imgBefore = QImage(img, width, height, width, QImage.Format_Indexed8)
            if img.dtype == np.uint8:
                imgBefore = QImage(img, width, height, width, QImage.Format_Indexed8)

        myPixmap = QPixmap.fromImage(imgBefore)
        myPixmap = myPixmap.scaled(self.mainBefore.size(), Qt.KeepAspectRatio)
        self.mainBefore.setPixmap(myPixmap)
        self.imgNpBefore = img
Пример #2
0
def read_vox(vox_name): 
    vox_model = read_bv(vox_name) 
    vox_factor = voxel_resolution * 1.0 / 128
    #vox_model_zoom = ndimg.zoom(vox_model, vox_factor, order=0) # nearest neighbor interpolation
    vox_model_zoom = downsample(vox_model, int(1/vox_factor))
        
    vox_model_zoom = np.transpose(vox_model_zoom, (0,2,1))

    return vox_model_zoom
Пример #3
0
def unsupervised_loss(flows_fw,
                      im1,
                      im2,
                      border_mask,
                      params,
                      full_resolution=True):

    #########  loss calculation ##########
    LOSSES = ['smooth_2nd', 'ncc', 'gradient']
    loss_weights = dict()
    loss_weights['smooth_2nd'] = params['smooth_2nd_weight']
    loss_weights['ncc'] = params['ncc_weight']
    loss_weights['gradient'] = params['gradient_weight']

    #

    if full_resolution:
        layer_weights = [650.0, 500.0, 250.0, 130.0, 70.0]
        mask_s = border_mask
        im1_s, im2_s = im1, im2
    else:
        layer_weights = [500.0, 250.0, 130.0, 70.0]
        down_factor = 2
        _, height, width, thick, _ = im1.shape.as_list()
        im1_s = downsample(im1, down_factor)
        im2_s = downsample(im2, down_factor)
        mask_s = downsample(border_mask, down_factor)
        flows_fw = flows_fw[1:]

    combined_loss = 0.0
    flow_enum = enumerate(flows_fw)

    for i, flow_fw_s in flow_enum:
        layer_name = "loss" + str(i + 1)
        flow_scale = 1.0 / (2**i)
        with tf.variable_scope(layer_name):
            layer_weight = layer_weights[i]

            losses = compute_losses(im1_s,
                                    im2_s,
                                    flow_fw_s * flow_scale,
                                    border_mask=mask_s)

            layer_loss = 0.0
            for loss in LOSSES:
                layer_loss += loss_weights[loss] * losses[loss]

            combined_loss += layer_weight * layer_loss
            im1_s = downsample(im1_s, 2)
            im2_s = downsample(im2_s, 2)
            mask_s = downsample(mask_s, 2)

    regularization_loss = tf.losses.get_regularization_loss()
    final_loss = combined_loss + 0.0001 * regularization_loss

    return final_loss
Пример #4
0
def run_test(
    dataset,
    clf_type,
    epochs,
    true_rh1,
    downsample_ratio,
    ordered_models_keys,
    list_of_images=range(10),
    suppress_error=False,
    verbose=False,
    pi1=0.0,
    one_vs_rest=True,
    cv_n_folds=3,
    early_stopping=True,
    pulearning=None,
):

    # Cast types to ensure consistency for 1 and 1.0, 0 and 0.0
    true_rh1 = float(true_rh1)
    downsample_ratio = float(downsample_ratio)
    pi1 = float(pi1)

    # Load MNIST or CIFAR data
    (X_train_original,
     y_train_original), (X_test_original,
                         y_test_original) = get_dataset(dataset=dataset)
    X_train_original, y_train_original = downsample(X_train_original,
                                                    y_train_original,
                                                    downsample_ratio)

    # Initialize models and result storage
    metrics = {key: [] for key in ordered_models_keys}
    data_all = {"metrics": metrics, "calculated": {}, "errors": {}}
    start_time = dt.now()

    # Run through the ten images class of 0, 1, ..., 9
    for image in list_of_images:
        if one_vs_rest:
            # X_train and X_test will not be modified. All data will be used. Adjust pointers.
            X_train = X_train_original
            X_test = X_test_original

            # Relabel the image data. Make label 1 only for given image.
            y_train = np.array(y_train_original == image, dtype=int)
            y_test = np.array(y_test_original == image, dtype=int)
        else:  # one_vs_other
            # Reducing the dataset to just contain our image and image = 4
            other_image = 4 if image != 4 else 7
            X_train = X_train_original[(y_train_original == image) |
                                       (y_train_original == other_image)]
            y_train = y_train_original[(y_train_original == image) |
                                       (y_train_original == other_image)]
            X_test = X_test_original[(y_test_original == image) |
                                     (y_test_original == other_image)]
            y_test = y_test_original[(y_test_original == image) |
                                     (y_test_original == other_image)]

            # Relabel the data. Make label 1 only for given image.
            y_train = np.array(y_train == image, dtype=int)
            y_test = np.array(y_test == image, dtype=int)

        print()
        print("Evaluating image:", image)
        print("Number of positives in y:", sum(y_train))
        print()
        sys.stdout.flush()

        s = y_train * (np.cumsum(y_train) < (1 - true_rh1) * sum(y_train))
        # In the presence of mislabeled negative (negative incorrectly labeled positive):
        # pi1 is the fraction of mislabeled negative in the labeled set:
        num_mislabeled = int(sum(y_train) * (1 - true_rh1) * pi1 / (1 - pi1))
        if num_mislabeled > 0:
            negative_set = s[y_train == 0]
            mislabeled = np.random.choice(len(negative_set),
                                          num_mislabeled,
                                          replace=False)
            negative_set[mislabeled] = 1
            s[y_train == 0] = negative_set

        print("image = {0}".format(image))
        print(
            "Training set: total = {0}, positives = {1}, negatives = {2}, P_noisy = {3}, N_noisy = {4}"
            .format(len(X_train), sum(y_train),
                    len(y_train) - sum(y_train), sum(s),
                    len(s) - sum(s)))
        print("Testing set:  total = {0}, positives = {1}, negatives = {2}".
              format(len(X_test), sum(y_test),
                     len(y_test) - sum(y_test)))

        # Fit different models for PU learning
        for key in ordered_models_keys:
            fit_start_time = dt.now()
            print("\n\nFitting {0} classifier. Default classifier is {1}.".
                  format(key, clf_type))

            if clf_type == "logreg":
                clf = LogisticRegression()
            elif clf_type == "cnn":
                from classifier_cnn import CNN
                from keras import backend as K
                K.clear_session()
                clf = CNN(
                    dataset_name=dataset,
                    num_category=2,
                    epochs=epochs,
                    early_stopping=early_stopping,
                    verbose=1,
                )
            else:
                raise ValueError(
                    "clf_type must be either logreg or cnn for this testing file."
                )

            ps1 = sum(s) / float(len(s))
            py1 = sum(y_train) / float(len(y_train))
            true_rh0 = pi1 * ps1 / float(1 - py1)

            model = get_model(
                key=key,
                rh1=true_rh1,
                rh0=true_rh0,
                clf=clf,
            )

            try:
                if key == "True Classifier":
                    model.fit(X_train, y_train)
                elif key in [
                        "Rank Pruning", "Rank Pruning (noise rates given)",
                        "Liu16 (noise rates given)"
                ]:
                    model.fit(X_train,
                              s,
                              pulearning=pulearning,
                              cv_n_folds=cv_n_folds)
                elif key in ["Nat13 (noise rates given)"]:
                    model.fit(X_train, s, pulearning=pulearning)
                else:  # Elk08, Baseline
                    model.fit(X_train, s)

                pred = model.predict(X_test)
                # Produces only P(y=1|x) for pulearning models because they are binary
                pred_prob = model.predict_proba(X_test)
                pred_prob = pred_prob[:,
                                      1] if key == "True Classifier" else pred_prob

                # Compute metrics
                metrics_dict = get_metrics(pred, pred_prob, y_test)
                elapsed = (dt.now() - fit_start_time).total_seconds()

                if verbose:
                    print(
                        "\n{0} Model Performance at image {1}:\n=================\n"
                        .format(key, image))
                    print("Time Required", elapsed)
                    print("AUC:", metrics_dict["AUC"])
                    print("Error:", metrics_dict["Error"])
                    print("Precision:", metrics_dict["Precision"])
                    print("Recall:", metrics_dict["Recall"])
                    print("F1 score:", metrics_dict["F1 score"])
                    print("rh1:", model.rh1 if hasattr(model, 'rh1') else None)
                    print("rh0:", model.rh0 if hasattr(model, 'rh0') else None)
                    print()

                metrics_dict["image"] = image
                metrics_dict["time_seconds"] = elapsed
                metrics_dict["rh1"] = model.rh1 if hasattr(model,
                                                           'rh1') else None
                metrics_dict["rh0"] = model.rh0 if hasattr(model,
                                                           'rh0') else None

                # Append dictionary of error and loss metrics
                if key not in data_all["metrics"]:
                    data_all["metrics"][key] = [metrics_dict]
                else:
                    data_all["metrics"][key].append(metrics_dict)
                data_all["calculated"][(key, image)] = True

            except Exception as e:
                msg = "Error in {0}, image {1}, rh1 {2}, m {3}: {4}\n".format(
                    key, image, true_rh1, pi1, e)
                print(msg)
                make_sure_path_exists("failed_models/")
                with open("failed_models/" + key + ".txt", "ab") as f:
                    f.write(msg)
                if suppress_error:
                    continue
                else:
                    raise
    return data_all
Пример #5
0
    def initUI(self):

        """
        Main Window global parameters
        """
        self.imgOriginal = np.array([])
        self.mainWidth = 1280
        self.mainHeight = 640
        self.main = QLabel()
        self.imgNpBefore = np.array([])
        self.imgNpAfter = np.array([])
        self.skin = mpimg.imread('res/skin.jpg')

        grid = QGridLayout()
        self.main.setLayout(grid)

        self.mainBefore = QLabel('Before')
        self.mainBefore.setAlignment(Qt.AlignHCenter | Qt.AlignVCenter)
        self.mainBefore.setWordWrap(True)
        self.mainBefore.setFont(QFont('Monospace', 10))

        self.mainAfter = QLabel('After')
        self.mainAfter.setAlignment(Qt.AlignHCenter | Qt.AlignVCenter)
        self.mainAfter.setWordWrap(True)
        self.mainAfter.setFont(QFont('Monospace', 10))

        grid.addWidget(self.mainBefore, 0, 0)
        grid.addWidget(self.mainAfter, 0, 1)

        """
        Menu Bar
        """

        # FILE MENU
        openFile = QAction('Open', self)
        openFile.setShortcut('Ctrl+O')
        openFile.setStatusTip('Open new File')
        openFile.triggered.connect(self.showDialog)

        exitAction = QAction('Exit', self)
        exitAction.setShortcut('Ctrl+Q')
        exitAction.setStatusTip('Exit application')
        exitAction.triggered.connect(self.close)

        moveLeft = QAction('Move Left', self)
        moveLeft.setShortcut('Ctrl+L')
        moveLeft.triggered.connect(lambda: self.updateBefore(self.imgNpAfter))


        # PROCESS MENU
        equalizeMenu = QAction('Equalize', self)
        equalizeMenu.triggered.connect(lambda: self.updateImgAfter(util.equalize(self.imgNpBefore)))

        histogramMenu = QAction('Histogram', self)
        #histogramMenu.triggered.connect(lambda: self.updateImgAfter(

        grayscaleMenu = QAction('Grayscale', self)
        grayscaleMenu.triggered.connect(lambda: self.updateImgAfter(util.getgrayscale(self.imgNpBefore)))

        binarizeMenu = QAction('Binarize', self)
        binarizeMenu.triggered.connect(lambda: self.updateImgAfter(util.otsu(self.imgNpBefore)))

        gaussianMenu = QAction('Smooth', self)
        gaussianMenu.triggered.connect(lambda: self.updateImgAfter(util.convolvefft(util.gaussian_filt(), util.getgrayscale(self.imgNpBefore))))        

        resizeMenu = QAction('Resize', self)
        resizeMenu.triggered.connect(lambda: self.updateImgAfter(util.downsample(self.imgNpBefore)))

        segmentMenu = QAction('Segment', self)
        segmentMenu.triggered.connect(lambda: self.updateImgAfter(util.showobj(util.downsample(self.imgNpBefore, target_height=480), util.segment(util.thin(util.otsu(util.downsample(self.imgNpBefore, target_height=480), bg='light'))), box=False)))

        # EDGE DETECTION MENU
        averageMenu = QAction('Average', self)
        averageMenu.triggered.connect(lambda: self.updateImgAfter(util.degreezero(self.imgNpBefore, type="average")))

        differenceMenu = QAction('Difference', self)
        differenceMenu.triggered.connect(lambda: self.updateImgAfter(util.degreezero(self.imgNpBefore, type="difference")))

        homogenMenu = QAction('Homogen', self)
        homogenMenu.triggered.connect(lambda: self.updateImgAfter(util.degreezero(self.imgNpBefore, type="homogen")))

        sobelMenu = QAction('Sobel', self)
        sobelMenu.triggered.connect(lambda: self.updateImgAfter(util.degreeone(self.imgNpBefore, type="sobel")))

        prewittMenu = QAction('Prewitt', self)
        prewittMenu.triggered.connect(lambda: self.updateImgAfter(util.degreeone(self.imgNpBefore, type="prewitt")))

        freichenMenu = QAction('Frei-Chen', self)
        freichenMenu.triggered.connect(lambda: self.updateImgAfter(util.degreeone(self.imgNpBefore, type="freichen")))

        kirschMenu = QAction('Kirsch', self)
        kirschMenu.triggered.connect(lambda: self.updateImgAfter(util.degreetwo(self.imgNpBefore, type="kirsch")))


        # FEATURE MENU
        chaincodeMenu = QAction('Chain code', self)
        chaincodeMenu.triggered.connect(lambda: self.updateTxtAfter(str([util.getdirection(chain[n][0], chain[n][1]) for chain in util.segment(util.thin(self.imgNpBefore), cc=True) for n in xrange(len(chain))])))

        turncodeMenu = QAction('Turn code', self)
        turncodeMenu.triggered.connect(lambda: self.updateTxtAfter(str([util.getturncode(cc) for cc in util.segment(util.thin(self.imgNpBefore, bg='light'), cc=False)])))

        skeletonMenu = QAction('Zhang-Suen thinning', self)
        skeletonMenu.triggered.connect(lambda:self.updateImgAfter(util.zhangsuen(util.binarize(self.imgNpBefore, bg='light'))))

        skinMenu = QAction('Boundary detection', self)
        skinMenu.triggered.connect(lambda:self.updateImgAfter(util.thin(self.imgNpBefore, bg='light')))

        freemanMenu = QAction('Contour profile', self)


        # RECOGNITION MENU
        freemantrainfontMenu = QAction('Train Contour Font', self)
        freemantrainfontMenu.triggered.connect(lambda: util.train(self.imgNpBefore, feats='zs', order='font', setname='font')) 

        freemantrainplatMenu = QAction('Train ZS Plate (GNB)', self)
        freemantrainplatMenu.triggered.connect(lambda: util.train(self.imgNpBefore, feats='zs', order='plat', setname='plat'))

        cctctrainfontMenu = QAction('Train CC + TC Font', self)

        cctctrainplatMenu = QAction('Train CC + TC Plate', self)

        freemantestfontMenu = QAction('Predict Contour Font', self)
        freemantestfontMenu.triggered.connect(lambda: self.updateTxtAfter(util.test(self.imgNpBefore, feats='zs', order='font', setname='font')))
        
        freemantestplatMenu = QAction('Predict Contour Plate', self)
        freemantestplatMenu.triggered.connect(lambda:self.updateTxtAfter(util.test(self.imgNpBefore, feats='zs', order='plat', setname='plat')))

        cctctestfontMenu = QAction('Predict CC + TC Font', self)

        cctctestplatMenu = QAction('Predict CC + TC Plate', self)

        facesMenu = QAction('Show faces', self)
        facesMenu.triggered.connect(lambda: self.updateImgAfter(util.getFaces(self.imgNpBefore, self.skin, range=70)))

        faceMenu = QAction('Show facial features', self)
        faceMenu.triggered.connect(lambda: self.updateImgAfter(util.showobj(self.imgNpBefore, util.getFaceFeats(self.imgNpBefore, self.skin, range=100), color=False)))

        # MENU BAR
        menubar = self.menuBar()

        fileMenu = menubar.addMenu('&File')
        fileMenu.addAction(openFile)
        fileMenu.addAction(exitAction)
        fileMenu.addAction(moveLeft)

        processMenu = menubar.addMenu('&Preprocess')
        #processMenu.addAction(histogramMenu)
        processMenu.addAction(equalizeMenu)
        processMenu.addAction(grayscaleMenu)
        processMenu.addAction(binarizeMenu)
        processMenu.addAction(gaussianMenu)
        processMenu.addAction(resizeMenu)
        processMenu.addAction(segmentMenu)

        edgeMenu = menubar.addMenu('&Edge Detection')
        edgeMenu.addAction(averageMenu)
        edgeMenu.addAction(differenceMenu)
        edgeMenu.addAction(homogenMenu)
        edgeMenu.addAction(sobelMenu)
        edgeMenu.addAction(prewittMenu)
        edgeMenu.addAction(freichenMenu)
        edgeMenu.addAction(kirschMenu)

        featureMenu = menubar.addMenu('&Features')
        featureMenu.addAction(chaincodeMenu)
        featureMenu.addAction(turncodeMenu)
        featureMenu.addAction(skeletonMenu)
        featureMenu.addAction(skinMenu)
        featureMenu.addAction(freemanMenu)

        recogMenu = menubar.addMenu('&Recognition')
        recogMenu.addAction(freemantrainfontMenu)
        recogMenu.addAction(freemantrainplatMenu)
        recogMenu.addAction(cctctrainfontMenu)
        recogMenu.addAction(cctctrainplatMenu)
        recogMenu.addAction(freemantestfontMenu)
        recogMenu.addAction(freemantestplatMenu)
        recogMenu.addAction(cctctestfontMenu)
        recogMenu.addAction(cctctestplatMenu)
        recogMenu.addAction(facesMenu)
        recogMenu.addAction(faceMenu)
        #recogMenu.addAction(

        """
        Toolbar, Status Bar, Tooltip
        """
        self.statusBar().showMessage('Ready')

        QToolTip.setFont(QFont('SansSerif', 10))
        #self.setToolTip('This is a <b>QWidget</b> widget')

        """
        Displaying
        """

        self.setGeometry(12, 30, self.mainWidth, self.mainHeight+80)
        self.setWindowTitle('Pyxel')
        self.setWindowIcon(QIcon('res/web.png'))

        self.setCentralWidget(self.main)

        self.main.setGeometry(QRect(0, 80, self.mainWidth, self.mainHeight))
        #self.mainAfter.setGeometry(QRect(self.mainWidth/2, 80, self.mainWidth/2, self.mainHeight))

        self.show()
Пример #6
0
def process_cloud(msg):
    global init_time, count_msg, previous_time
    if count_msg % subsample != 0:
        count_msg += 1
        return
    count_msg += 1
    if init_time is None:
        init_time = msg.header.stamp.to_sec()
        print('init_time', init_time)
    elif msg.header.stamp.to_sec() - init_time < start_time:
        return
    elif msg.header.stamp.to_sec() - init_time > end_time:
        return
    try:
        listener.waitForTransform("map", "base_link", msg.header.stamp,
                                  rospy.Duration(1.0))
        translation, rotation = listener.lookupTransform(
            "map", "base_link", msg.header.stamp)
    except Exception as e:
        print(e)
        return

    if translation is None:
        pass
    else:
        data = numpy.frombuffer(msg.data, dtype=numpy.float32)
        pcd = data.reshape(-1, 8)[:, :3]
        nan_mask = numpy.any(numpy.isnan(pcd), axis=1)
        pcd = pcd[numpy.logical_not(nan_mask), :]
        x_mask = numpy.abs(pcd[:, 0]) < 20
        y_mask = numpy.abs(pcd[:, 1]) < 20
        z_mask = numpy.abs(pcd[:, 2]) < 20
        pcd = pcd[x_mask & y_mask & z_mask, :]

        #apply transformation
        pcd = downsample(pcd, downsample_resolution)
        T = numpy.array(translation)
        R = quaternion_matrix(rotation)[:3, :3]
        pcd = R.dot(pcd.transpose()).transpose() + T

        if velodyne_to_faro is None:
            global_cloud.extend(pcd)
            dt = (msg.header.stamp.to_sec() - init_time)
            print("t:%.2f" % dt, pcd.shape, count_msg)
        else:
            if previous_time is None:
                secs = msg.header.stamp.to_sec()
            else:
                secs = previous_time + time_interval
            previous_time = secs
            t = rospy.Time.from_sec(secs)
            msg.header.frame_id = '/map'
            msg.header.stamp = t

            #apply transformation from velodyne frame to FARO frame
            pcd = velodyne_to_faro[:3, :3].dot(pcd.T).T + velodyne_to_faro[:3,
                                                                           3]

            #get instance labels
            instance_labels = numpy.zeros(len(pcd))
            class_labels = numpy.zeros(len(pcd))
            pcd_voxels = [
                tuple(p)
                for p in numpy.round(pcd[:, :3] / label_resolution).astype(int)
            ]
            for i in range(len(pcd_voxels)):
                k = pcd_voxels[i]
                if k in gt_obj_map:
                    instance_labels[i] = gt_obj_map[k]
                    class_labels[i] = gt_cls_map[k]
                    instance_set.add(gt_obj_map[k])
                    continue
                for offset in itertools.product(range(-1, 2), range(-1, 2),
                                                range(-1, 2)):
                    kk = (k[0] + offset[0], k[1] + offset[1], k[2] + offset[2])
                    if kk in gt_obj_map:
                        instance_labels[i] = gt_obj_map[kk]
                        class_labels[i] = gt_cls_map[kk]
                        instance_set.add(gt_obj_map[kk])
                        break

            #apply offset for visualization
            pcd -= faro_offset

            T = TransformStamped()
            T.header = msg.header
            T.transform.translation.x = velodyne_to_faro[0, 3] - faro_offset[0]
            T.transform.translation.y = velodyne_to_faro[1, 3] - faro_offset[1]
            T.transform.translation.z = velodyne_to_faro[2, 3] - faro_offset[2]
            q = quaternion_from_matrix(velodyne_to_faro)
            T.transform.rotation.x = q[0]
            T.transform.rotation.y = q[1]
            T.transform.rotation.z = q[2]
            T.transform.rotation.w = q[3]

            pose = PoseStamped()
            pose.header = msg.header
            pose.pose.position.x = translation[0]
            pose.pose.position.y = translation[1]
            pose.pose.position.z = translation[2]
            pose.pose.orientation.x = rotation[0]
            pose.pose.orientation.y = rotation[1]
            pose.pose.orientation.z = rotation[2]
            pose.pose.orientation.w = rotation[3]
            pose = do_transform_pose(pose, T)
            pose_arr.append(pose)
            bag.write('slam_out_pose', pose, t=t)

            path = Path()
            path.header = msg.header
            path.poses = pose_arr
            bag.write('trajectory', path, t=t)

            fields = [
                PointField('x', 0, PointField.FLOAT32, 1),
                PointField('y', 4, PointField.FLOAT32, 1),
                PointField('z', 8, PointField.FLOAT32, 1),
                PointField('r', 12, PointField.UINT8, 1),
                PointField('g', 13, PointField.UINT8, 1),
                PointField('b', 14, PointField.UINT8, 1),
                PointField('o', 15, PointField.INT32, 1),
                PointField('c', 19, PointField.UINT8, 1),
            ]
            pcd_with_labels = numpy.zeros((len(pcd), 8))
            pcd_with_labels[:, :3] = pcd
            pcd_with_labels[:, 3:6] = 255
            pcd_with_labels[:, 6] = instance_labels
            pcd_with_labels[:, 7] = class_labels
            pcd_with_labels = point_cloud2.create_cloud(
                msg.header, fields, pcd_with_labels)
            bag.write('laser_cloud_surround', pcd_with_labels, t=t)

            dt = (msg.header.stamp.to_sec() - init_time)
            print("t:%.2f" % dt, pcd.shape, count_msg, len(instance_set),
                  'instances', numpy.sum(instance_labels > 0), 'labels')
Пример #7
0
            pcd_with_labels[:, 6] = instance_labels
            pcd_with_labels[:, 7] = class_labels
            pcd_with_labels = point_cloud2.create_cloud(
                msg.header, fields, pcd_with_labels)
            bag.write('laser_cloud_surround', pcd_with_labels, t=t)

            dt = (msg.header.stamp.to_sec() - init_time)
            print("t:%.2f" % dt, pcd.shape, count_msg, len(instance_set),
                  'instances', numpy.sum(instance_labels > 0), 'labels')


rospy.init_node('bag_converter')
listener = tf.TransformListener()
subscribed_topic = '/full_cloud_projected'
global_cloud_sub = rospy.Subscriber(subscribed_topic, PointCloud2,
                                    process_cloud)
print('Listening to %s ...' % subscribed_topic)
rospy.spin()

if velodyne_to_faro is None:
    global_cloud = numpy.array(global_cloud)
    print(global_cloud.shape)
    global_cloud = numpy.hstack(
        (global_cloud, numpy.zeros((len(global_cloud), 3))))
    print(global_cloud.shape)
    global_cloud = downsample(global_cloud, 0.05)
    print(global_cloud.shape)
    savePLY('viz/global_cloud.ply', global_cloud)
else:
    bag.close()