def save_all(self, img_orig, pred_persp, pred_sphe):

        # pil_image = PIL.Image.open(img_orig).convert('RGB')
        # img_original = numpy.array(pil_image)

        # colorize prediction
        pred_persp_color = colorEncode(pred_persp, self.colors).astype(numpy.uint8)
        # pred_persp_color = (pred_persp).astype(numpy.uint8)
        pred_sphe_color = colorEncode(pred_sphe, self.colors).astype(numpy.uint8)

        # aggregate images and save
        im_vis = numpy.concatenate((pred_persp_color, pred_sphe_color), axis=1)
        img_final = PIL.Image.fromarray(im_vis)
        #print(img_final.size)

        new_im = PIL.Image.new('RGB', (img_final.size[0], 2*img_final.size[1]))

        new_im.paste(PIL.Image.open(img_orig))
        it =  str(int((img_orig.split('/')[-1]).split('_')[0]))
        gt_image = img_orig[0:-len((img_orig.split('/')[-1]))]+it+'_2.png'
        #print(gt_image)
        new_im.paste(PIL.Image.open(gt_image),(int(img_final.size[0]/2),0))
        new_im.paste(img_final,(0,img_final.size[1]))

        os.makedirs(self.savedir, exist_ok=True)
        print(it)
        print(img_orig)
        new_im.save(os.path.join(self.savedir, it+'.png'))
예제 #2
0
def visualize_result(data, pred, dir_result):
    (img, seg, info) = data

    # segmentation
    seg_color = colorEncode(seg, colors)

    # prediction
    pred_color = colorEncode(pred, colors)

    # aggregate images and save
    im_vis = np.concatenate((img, seg_color, pred_color),
                            axis=1).astype(np.uint8)

    img_name = info.split('/')[-1]
    Image.fromarray(im_vis).save(os.path.join(dir_result, img_name.replace('.jpg', '.png')))
예제 #3
0
def visualize_result(data, pred, cfg):
    (img, info) = data

    # print predictions in descending order
    pred = np.int32(pred)
    print("The predictions are: ", pred, pred.shape)
    pixs = pred.size
    uniques, counts = np.unique(pred, return_counts=True)
    print("Predictions in [{}]:".format(info))
    for idx in np.argsort(counts)[::-1]:
        name = names[uniques[idx] + 1]
        ratio = counts[idx] / pixs * 100
        if ratio > 0.1:
            print("  {}: {:.2f}%".format(name, ratio))

    # colorize prediction
    pred_color = colorEncode(pred, colors).astype(np.uint8)
    print("Colorized: ", pred_color, pred_color.shape)

    # aggregate images and save
    im_vis = np.concatenate((img, pred_color), axis=1)

    img_name = info.split('/')[-1]
    Image.fromarray(im_vis).save(
        os.path.join(cfg.TEST.result, img_name.replace('.jpg', '.png')))
예제 #4
0
    def visualize_result(self,
                         data,
                         pred,
                         overlay=True,
                         concat=False,
                         verbose=False):
        (img, info) = data

        # print predictions in descending order
        pred = np.int32(pred)
        pixs = pred.size
        uniques, counts = np.unique(pred, return_counts=True)
        # print("Predictions in [{}]:".format(info))
        if verbose:
            for idx in np.argsort(counts)[::-1]:
                name = self.names[uniques[idx]]
                ratio = counts[idx] / pixs * 100
                if ratio > 0.1:
                    print("  {:20}: {:.2f}%".format(name, ratio))

        # colorize prediction
        pred_color = colorEncode(pred, self.colors).astype(np.uint8)

        # aggregate images and save
        # im_vis = np.concatenate((img, pred_color), axis=1)
        if not overlay and not concat:
            return np.repeat(np.expand_dims(pred.astype(np.uint8), 2), 3, 2)

        if overlay:
            img = (img.astype('float') + pred_color.astype('float')).clip(
                0, 255).astype('uint8')
        if concat:
            img = np.concatenate((img, pred_color), axis=1)
        return img
def visualize_result(data, pred, cfg):
    (img, info) = data
    colors = loadmat('data/color150.mat')['colors']
    names = {1: 'road', 2: 'background'}
    # print predictions in descending order
    pred = np.int32(pred)
    pixs = pred.size
    uniques, counts = np.unique(pred, return_counts=True)
    print(uniques, counts)
    print("Predictions in [{}]:".format(info))
    for idx in np.argsort(counts)[::-1]:
        name = names[uniques[idx] + 1]
        ratio = counts[idx] / pixs * 100
        if ratio > 0.1:
            print("  {}: {:.2f}%".format(name, ratio))

    # colorize prediction
    pred_color = colorEncode(pred, colors).astype(np.uint8)

    # aggregate images and save
    im_vis = np.concatenate((img, pred_color), axis=1)

    #
    img_name = image_demo_dir.split('/')[-1]
    Image.fromarray(im_vis).save('demo/huawei_20_' +
                                 img_name.replace('.jpg', '.png'))
예제 #6
0
def visualize(img, pred, index=None, concat_original=True):
    if index is not None:
        pred = pred.copy()
        pred[pred != index] = -1
    im_vis = colorEncode(pred, colors).astype(np.uint8)
    if concat_original:
        im_vis = np.concatenate((img, im_vis), axis=1)
    image.display(Image.fromarray(im_vis))
    def save_simple(self, img_orig, pred_persp, pred_sphe):
        # colorize prediction
        pred_persp_color = colorEncode(pred_persp, self.colors).astype(numpy.uint8)
        pred_sphe_color = colorEncode(pred_sphe, self.colors).astype(numpy.uint8)

        # aggregate images and save
        im_vis = numpy.concatenate((pred_persp_color, pred_sphe_color), axis=1)
        img_final = PIL.Image.fromarray(im_vis)

        new_im = PIL.Image.new('RGB', (img_final.size[0], 2*img_final.size[1]))

        new_im.paste(PIL.Image.open(img_orig))
        # it =  str(int((img_orig.split('/')[-1]).split('_')[0]))
        it =  str((img_orig.split('/')[-1]).split('_0')[0])
        gt_image = img_orig[0:-len((img_orig.split('/')[-1]))][0:-3]+'/2/'+it+'_2.png'
        # print(gt_image)
        # sys.exit()
        new_im.paste(PIL.Image.open(gt_image),(int(img_final.size[0]/2),0))
        new_im.paste(PIL.Image.fromarray(pred_persp_color),(0,img_final.size[1]))
        # new_im.paste(img_final,(0,img_final.size[1]))

        from PIL import ImageDraw, ImageFont

        img_edit = ImageDraw.Draw(new_im)
        text_color = (255, 255, 255)
        # fnt = ImageFont.truetype("Pillow/Tests/fonts/FreeMono.ttf", 40)
        fnt = ImageFont.truetype("/usr/share/fonts/liberation/LiberationSans-Regular.ttf", 40)

        ipred_unique = numpy.unique(pred_persp[:,:], return_counts=True)[0]
        ipred_ratio =  10
        ipred_dist = int(img_final.size[1]/ipred_ratio)
        idx_loc = 0
        for ipred in ipred_unique:
            posx = int(img_final.size[0]*5/10) + 150 * numpy.floor(idx_loc/ipred_ratio)
            posy = img_final.size[1] + ipred_dist * (idx_loc%ipred_ratio) + ipred_dist/2
            img_edit.text((posx,posy), self.names[ipred+1], text_color, font=fnt, anchor="ls")
            img_edit.rectangle((posx-30,posy-20,posx-10,posy), fill=(self.colors[ipred][0],self.colors[ipred][1],self.colors[ipred][2]), outline=(255, 255, 255))
            idx_loc += 1


        os.makedirs(self.savedir, exist_ok=True)
        new_im.save(os.path.join(self.savedir, it+'.png'))
예제 #8
0
def visualize_result(pred):
    # print predictions in descending order
    pred = np.int32(pred)

    # colorize prediction
    colors = loadmat('data/color150.mat')['colors']
    pred_color = colorEncode(pred, colors).astype(np.uint8)

    img_name = "res_seg.jpg"
    Image.fromarray(pred_color).save(
        os.path.join("./", img_name.replace('.jpg', '.png')))
def visualize_result(img, pred, index=None):

    # filter prediction class if requested
    if index is not None:
        pred = pred.copy()
        pred[pred != index] = -1
        print(f'{names[index+1]}:')
        x1 = []
        x2 = []
        y1 = []
        y2 = []
        flag1 = True
        flag2 = True

        for i in range(len(pred)):
            if (flag1 and (max(pred[i]) > -1)):
                y1.append(i)
                flag1 = False
            elif (flag2 and max(pred[len(pred) - 1 - i]) > -1):

                ap = len(pred) - 1 - i

                y2.append(ap)
                flag2 = False

        xpred = numpy.transpose(pred)
        flag1 = True
        flag2 = True
        for i in range(len(xpred)):
            if flag1 and (max(xpred[i]) > -1):
                x1.append(i)
                flag1 = False
            elif flag2 and max(xpred[len(xpred) - 1 - i]) > -1:

                ap = len(xpred) - 1 - i

                x2.append(ap)
                flag2 = False

        pred_color = colorEncode(pred, colors).astype(numpy.uint8)

        x1 = min(x1)
        x2 = max(x2)
        y1 = min(y1)
        y2 = min(y2)

        # aggregate images and save
        im_vis = numpy.concatenate((img, pred_color), axis=1)

        #display(PIL.Image.fromarray(im_vis))

        return [x1, y1, x2, y2]
예제 #10
0
def visualize_result(img, pred, index=None):
    # filter prediction class if requested
    if index is not None:
        pred = pred.copy()
        pred[pred != index] = -1
        print(f'{names[index+1]}:')

    # colorize prediction
    pred_color = colorEncode(pred, colors).astype(numpy.uint8)

    # aggregate images and save
    im_vis = numpy.concatenate((img, pred_color), axis=1)
    PIL.Image.fromarray(im_vis).save('result.png')
예제 #11
0
    def visualize_result(self, img, pred, index=None):
        # filter prediction class if requested
        colors = scipy.io.loadmat('semseg/color150.mat')['colors']
        if index is not None:
            pred = pred.copy()
            pred[pred != index] = -1
            print(f'{self.names[index + 1]}:')

        # colorize prediction
        pred_color = colorEncode(pred, colors).astype(np.float32) / 255

        # aggregate images and save
        im_vis = np.concatenate((img, pred_color), axis=1)
        plt.imshow(im_vis)
        plt.show()
예제 #12
0
def visualize_result(img, pred, colors, index=None):
    # filter prediction class if requested
    if index is not None:
        pred = pred.copy()
        pred[pred != index] = -1
        # print(f'{names[index+1]}:')

    # colorize prediction
    pred_color = colorEncode(pred, colors).astype(numpy.uint8)

    # aggregate images and save
    im_vis = numpy.concatenate((img, pred_color), axis=1)
    #if show==True:
    #display(PIL.Image.fromarray(im_vis))
    #else:
    return pred_color, im_vis
예제 #13
0
파일: cam_test.py 프로젝트: starkgines/PDI
def visualize_result(img, pred, index=None):
    # filter prediction class if requested
    if index is not None:
        pred = pred.copy()
        pred[pred != index] = -1
        print(f'{names[index+1]}:')
        
    # colorize prediction
    pred_color = colorEncode(pred, colors).astype(numpy.uint8)

    # aggregate images and save
    im_vis = numpy.concatenate((img, pred_color), axis=1)
    im_vis = im_vis.astype(numpy.uint8)
    img = im_vis[:,:,::-1]

    return img
예제 #14
0
def visualize_result(data, pred, cfg):
    (img, info) = data

    # print predictions in descending order
    pred = np.int32(pred)
    pixs = pred.size
    uniques, counts = np.unique(pred, return_counts=True)
    print("Predictions in [{}]:".format(info))
    for idx in np.argsort(counts)[::-1]:
        name = names[uniques[idx] + 1]
        ratio = counts[idx] / pixs * 100
        if ratio > 0.1:
            print("  {}: {:.2f}%".format(name, ratio))

    # colorize prediction
    pred_color = colorEncode(pred, colors).astype(np.uint8)

    # modified: convert to greyscale and save image
    gray = np.dot(pred_color[..., :3], [0.299, 0.587, 0.114]) / 255.0
    img_name = info.split('/')[-1]
    Image.fromarray(gray).save(
        os.path.join(cfg.TEST.result, img_name.replace('.jpg', '_sky.tif')))
def main():
    """Run main function"""



    OSS = OmniSemSeg(DATADIR, SAVEDIR)

    if VERBOSE:
        print('Semantic Segmentation ')

        print("Saving results to %s" % SAVEDIR)

    print("Nombre images: ",len(OSS.list_img))
    
    if IMODE == "test":
        for elt in OSS.list_img:
            torch.cuda.synchronize()
            tic = time.perf_counter()

            pred_sphe, pred_persp = OSS.semseg_pred(elt)

            time_end = time.perf_counter() - tic
            # if VERBOSE:
            print("Done for ",str(elt), "in ", time_end)
            OSS.save_simple(elt, pred_persp, pred_sphe)
            # OSS.save_all(elt, pred_persp, pred_sphe)

    elif IMODE == "eval":


        from mit_semseg.lib.utils import as_numpy

        semseg_metric_persp = semseg_metric()
        semseg_metric_sphe = semseg_metric()

        for elt in OSS.list_img[0:100]:


            semseg_gt_file = elt.replace("_0.png","_2.png")
            semseg_gt = as_numpy(PIL.Image.open(semseg_gt_file).convert('RGB'))
            # print("Image seg GT")
            # # print(semseg_gt)
            # print(numpy.unique(semseg_gt[:,:,0], return_counts=True)) #red
            # print(numpy.unique(semseg_gt[:,:,1], return_counts=True)) #green
            # print(numpy.unique(semseg_gt[:,:,2], return_counts=True)) #blue
            # semseg_gt_id = numpy.zeros((semseg_gt.shape[0],semseg_gt.shape[1])) -1
            # for idx in range(semseg_gt.shape[0]):
            #     for idy in range(semseg_gt.shape[1]):
            #         for idc, col in enumerate(OSS.colors):
            #             if not((semseg_gt[idx,idy] - col).all()):
            #                 semseg_gt_id[idx,idy] = idc
            #                 break
            # print("Semseg Gt ID")
            # print(semseg_gt_id)

            torch.cuda.synchronize()
            tic = time.perf_counter()

            # if VERBOSE:
            print("Doing for ",str(elt))
            pred_sphe, pred_persp = OSS.semseg_pred(elt)
            # OSS.save_all(elt, pred_persp, pred_sphe)
            OSS.save_all_2(elt, pred_persp, pred_sphe)

            pred_sphe_color = colorEncode(pred_sphe, OSS.colors).astype(numpy.uint8)
            pred_persp_color = colorEncode(pred_persp, OSS.colors).astype(numpy.uint8)

            semseg_metric_persp.update_metrics(pred_persp_color,semseg_gt,time.perf_counter() - tic)
            semseg_metric_sphe.update_metrics(pred_sphe_color,semseg_gt,time.perf_counter() - tic)



            # print("MIOU KERAS : ",iou_mean(pred_sphe,semseg_gt_id,150))

        semseg_metric_persp.show_metrics("PERSP")
        semseg_metric_sphe.show_metrics("SPHE")


    print("DONE")
예제 #16
0
def run_loop(bag_path, seg_model, seg_opts, save_images=False, output_mode=0):
    if save_images:
        create_folders()
    # Create pipeline
    pipeline = rs.pipeline()
    # Create a config object
    config = rs.config()
    # Tell config that we will use a recorded device from filem to be used by the pipeline through playback.
    rs.config.enable_device_from_file(config, args.input)
    # Start streaming from file
    Pipe = pipeline.start(config)

    # Getting the depth sensor's depth scale (see rs-align example for explanation)
    depth_sensor = Pipe.get_device().first_depth_sensor()
    depth_scale = depth_sensor.get_depth_scale()
    print("Depth Scale is: ", depth_scale)  # can be commented out

    if output_mode == 0 or output_mode == 1:
        # Create opencv window to render image in
        cv2.namedWindow("Full Stream", cv2.WINDOW_NORMAL)

    flip_transform = [[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]

    # Create colorizer object
    colorizer = rs.colorizer()
    idx = 0
    # initial frame delay
    idx_limit = 30

    # pre_seg_mask_sum = None  # previous frame path segmentation area - isn't being used right now

    # Streaming loop
    try:
        while True:
            idx += 1
            # Get frameset of depth
            frames = pipeline.wait_for_frames()
            # ignore first idx frames
            if idx < idx_limit:
                continue
            else:
                pass

            align = rs.align(rs.stream.color)
            frames = align.process(frames)

            # Get color frame
            color_frame = frames.get_color_frame()
            # Get intrinsic in Open3d format for mode 2 and 3 for point cloud output
            if output_mode == 1 or output_mode == 2:
                intrinsic = o3d.camera.PinholeCameraIntrinsic(
                    get_intrinsic_matrix(color_frame))
            # Get depth frame
            depth_frame = frames.get_depth_frame()
            # Print intrinsics and extrinsics - not necessary : can be commented out
            if idx == idx_limit:
                camera_intrinsics(color_frame, depth_frame, Pipe)

            color_image = np.asanyarray(color_frame.get_data())

            ### Add SEGMENTATION part here ###
            pred = test(color_image, seg_model, seg_opts)

            # pavement, floor, road, earth/ground, field, path, dirt/track - chosen classes for the model selected (we'd like an oversegmentation of the path)
            seg_mask = (pred == 11) | (pred == 3) | (pred == 6) | (
                pred == 13) | (pred == 29) | (pred == 52) | (
                    pred == 91)  #.astype(np.uint8)

            if idx == idx_limit:  # 1st frame detection needs to be robust
                pre_seg_mask_sum = np.sum(seg_mask)
            # checking for bad detection
            new_seg_sum = np.sum(seg_mask)
            diff = abs(new_seg_sum - pre_seg_mask_sum)
            # if diff > pre_seg_mask_sum/15:  # smoothening between segmentation outputs - seems like a bad idea since the model inputs are not connected between timesteps
            #     seg_mask = np.ones_like(pred).astype(np.uint8) # need to add depth (5mt) criterea for calculation for robustness
            #     del new_seg_sum
            # else:
            pre_seg_mask_sum = new_seg_sum
            ### mask Hole filling
            seg_mask = nd.binary_fill_holes(seg_mask).astype(int)
            seg_mask = seg_mask.astype(np.uint8)
            #####
            seg_mask_3d = np.dstack((seg_mask, seg_mask, seg_mask))

            pred_color = colorEncode(
                pred,
                loadmat(os.path.join(model_folder, 'color150.mat'))['colors'])
            ##################################

            depth_frame = depth_filter(depth_frame)
            depth_array = np.asarray(depth_frame.get_data())
            # Colorize depth frame to jet colormap
            depth_color_frame = colorizer.colorize(depth_frame)
            # Convert depth_frame to numpy array to render image in opencv
            depth_color_image = np.asanyarray(depth_color_frame.get_data())

            ############ Plane Detection
            ## need to add smoothening between frames - by plane weights' variance?
            try:
                ### need to add multithreading here (and maybe other methods?)
                planes_mask_binary = plane_detection(color_image*seg_mask_3d, depth_array*seg_mask,\
                    loop=5)
            except TypeError as e:
                try:
                    print("plane mask 1st error")
                    planes_mask, planes_normal, list_plane_params = test_PlaneDetector_send(
                        img_color=color_image * seg_mask_3d,
                        img_depth=depth_array * seg_mask)
                except TypeError as e:
                    print("plane mask not detected-skipping frame")
                    continue
                    ## removed this part
                    planes_mask = np.ones_like(depth_array).astype(np.uint8)
                    planes_mask = np.dstack(
                        (planes_mask, planes_mask, planes_mask))
            ##############################################
            ## Hole filling for plane_mask (plane mask isn't binary - fixed that!)
            planes_mask_binary = nd.binary_fill_holes(planes_mask_binary)
            planes_mask_binary = planes_mask_binary.astype(np.uint8)
            # Clean plane mask object detection by seg_mask
            planes_mask_binary *= seg_mask
            planes_mask_binary_3d = np.dstack(
                (planes_mask_binary, planes_mask_binary, planes_mask_binary))

            edges = planes_mask_binary - nd.morphology.binary_dilation(
                planes_mask_binary
            )  # edges calculation - between travesable and non-traversable path
            #############################################

            if output_mode == 1 or output_mode == 2:
                odepth_image = o3d.geometry.Image(depth_array * edges)
                ocolor_image = o3d.geometry.Image(color_image * np.dstack(
                    (edges, edges, edges)))

                rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(
                    ocolor_image,
                    odepth_image,
                    depth_scale=1.0 / depth_scale,
                    depth_trunc=10,  # set to 10 metres
                    convert_rgb_to_intensity=False)
                temp = o3d.geometry.PointCloud.create_from_rgbd_image(
                    rgbd_image, intrinsic)
                temp.transform(flip_transform)
                # temp = temp.voxel_down_sample(0.03)

            # Point cloud output of frame is appended to the list
            if output_mode == 1 or output_mode == 2:
                output_list.append(temp)

            # image format conversion for cv2 visualization/output
            if output_mode == 0 or output_mode == 1 or save_images == True:
                pred_color = cv2.cvtColor(pred_color, cv2.COLOR_RGB2BGR)
                color_image = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)
                edges = cv2.cvtColor(edges, cv2.COLOR_GRAY2BGR)
                ## for displaying seg_mask
                seg_mask = (np.array(seg_mask) * 255).astype(np.uint8)
                seg_mask = cv2.cvtColor(
                    seg_mask, cv2.COLOR_GRAY2BGR)  # segmentation binary mask
                final_output_mask = cv2.cvtColor(
                    planes_mask_binary,
                    cv2.COLOR_GRAY2BGR)  # final traversable path mask

            if output_mode == 0 or output_mode == 1:
                # # Blending rgb and depth images for display - can check alignment with this as well
                alpha = 0.2
                beta = (1.0 - alpha)
                dst = cv2.addWeighted(
                    color_image, alpha, pred_color, beta, 0.0
                )  # color and segmentation output from ADE20K model blended
                dst2 = cv2.addWeighted(depth_color_image, alpha, color_image,
                                       beta,
                                       0.0)  # color and depth blended together
                ##################################

                ### delete later if needed - color image masked by final traversable path
                final_output = color_image * planes_mask_binary_3d
                mask = (final_output[:, :, 0] == 0) & (
                    final_output[:, :, 1] == 0) & (final_output[:, :, 2] == 0)
                final_output[:, :, :3][mask] = [255, 255, 255]
                ######

                ### Select outputs for visualization - we've chosen some as default
                image_set1 = np.vstack((dst, dst2))
                # image_set2 = np.vstack((planes_mask_binary_3d*255, seg_mask))
                # image_set2 = np.vstack((dst, final_output))
                image_set2 = np.vstack((edges, final_output))
                ### Choose which images you want to display from above
                combined_images = np.concatenate((image_set1, image_set2),
                                                 axis=1)

            if save_images == True:
                # Outputs saved - you can modify this
                cv2.imwrite("output/visualization/frame%d.png" % idx,
                            combined_images)
                cv2.imwrite("data/color/frame%d.png" % idx,
                            color_image)  # save frame as JPEG file
                cv2.imwrite("data/depth/frame%d.png" % idx,
                            depth_array)  # save frame as JPEG file
                cv2.imwrite("output/edges/frame%d.png" % idx,
                            edges)  # save frame as JPEG file
                cv2.imwrite("output/segmentation_mask/frame%d.png" % idx,
                            seg_mask)  # save frame as JPEG file
                cv2.imwrite("output/output_mask/frame%d.png" % idx,
                            final_output_mask)  # save frame as JPEG file

            if output_mode == 0 or output_mode == 1:
                try:
                    cv2.imshow('Full Stream', combined_images)
                except TypeError as e:
                    print(idx, e)
                key = cv2.waitKey(1)
                # if pressed escape exit program
                if key == 27:
                    cv2.destroyAllWindows()
                    break
    finally:
        pipeline.stop()
        if output_mode == 0 or output_mode == 1:
            cv2.destroyAllWindows()
    return
예제 #17
0
def get_color_labels(pred):
    im_vis = colorEncode(pred, colors).astype(np.uint8)
    return im_vis
예제 #18
0
def run_loop(bag_path, seg_model, seg_opts, save_images=False):
    # Create pipeline
    pipeline = rs.pipeline()
    # Create a config object
    config = rs.config()
    # Tell config that we will use a recorded device from filem to be used by the pipeline through playback.
    rs.config.enable_device_from_file(config, args.input)
    # Start streaming from file
    Pipe = pipeline.start(config)

    # Getting the depth sensor's depth scale (see rs-align example for explanation)
    depth_sensor = Pipe.get_device().first_depth_sensor()
    depth_scale = depth_sensor.get_depth_scale()
    print("Depth Scale is: ", depth_scale)

    # Create opencv window to render image in
    cv2.namedWindow("Full Stream", cv2.WINDOW_NORMAL)

    # Create colorizer object
    colorizer = rs.colorizer()
    idx = 0
    # initial frame delay
    idx_limit = 30

    pre_seg_mask_sum = None  # previous frame path segmentation area

    # Streaming loop
    try:
        while True:
            idx += 1
            # Get frameset of depth
            frames = pipeline.wait_for_frames()
            # ignore first idx frames
            if idx < idx_limit:
                continue
            else:
                pass

            align = rs.align(rs.stream.color)
            frames = align.process(frames)

            # Get color frame
            color_frame = frames.get_color_frame()
            # Get depth frame
            depth_frame = frames.get_depth_frame()
            # Get intrinsics and extrinsics
            if idx == idx_limit:
                camera_intrinsics(color_frame, depth_frame, Pipe)

            color_image = np.asanyarray(color_frame.get_data())

            ### Add Segmentation part here ###
            pred = test(color_image, seg_model, seg_opts)

            # pavement, floor, road, earth/ground, field, path, dirt/track
            seg_mask = (pred == 11) | (pred == 3) | (pred == 6) | (
                pred == 13) | (pred == 29) | (pred == 52) | (
                    pred == 91)  #.astype(np.uint8)

            if idx == idx_limit:  # 1st frame detection needs to be robust
                pre_seg_mask_sum = np.sum(seg_mask)
            # checking for bad detection
            new_seg_sum = np.sum(seg_mask)
            diff = abs(new_seg_sum - pre_seg_mask_sum)
            # if diff > pre_seg_mask_sum/15:  # smoothening between segmentation outputs - seems like a bad idea since the model inputs are not connected between timesteps
            #     seg_mask = np.ones_like(pred).astype(np.uint8) # need to add depth (5mt) criterea for calculation for robustness
            #     del new_seg_sum
            # else:
            pre_seg_mask_sum = new_seg_sum
            ### mask Hole filling
            seg_mask = nd.binary_fill_holes(seg_mask).astype(int)
            seg_mask = seg_mask.astype(np.uint8)
            #####
            seg_mask_3d = np.dstack((seg_mask, seg_mask, seg_mask))

            pred_color = colorEncode(
                pred,
                loadmat(os.path.join(model_folder, 'color150.mat'))['colors'])
            ##################################

            depth_frame = depth_filter(depth_frame)
            depth_array = np.asarray(depth_frame.get_data())
            # Colorize depth frame to jet colormap
            depth_color_frame = colorizer.colorize(depth_frame)
            # Convert depth_frame to numpy array to render image in opencv
            depth_color_image = np.asanyarray(depth_color_frame.get_data())

            ############ Plane Detection
            ## need to add smoothening between frames - by plane weights' variance?
            try:
                ### need to add multithreading here (and maybe other methods?)
                planes_mask_binary = plane_detection(color_image*seg_mask_3d, depth_array*seg_mask,\
                    loop=5)
            except TypeError as e:
                try:
                    print("plane mask 1st error")
                    planes_mask, planes_normal, list_plane_params = test_PlaneDetector_send(
                        img_color=color_image * seg_mask_3d,
                        img_depth=depth_array * seg_mask)
                except TypeError as e:
                    print("plane mask not detected-skipping frame")
                    continue
                    ## removed this part
                    planes_mask = np.ones_like(depth_array).astype(np.uint8)
                    planes_mask = np.dstack(
                        (planes_mask, planes_mask, planes_mask))
            ##############################################
            ## Hole filling for plane_mask (plane mask isn't binary - fixed that!)
            planes_mask_binary = nd.binary_fill_holes(planes_mask_binary)
            planes_mask_binary = planes_mask_binary.astype(np.uint8)
            # Clean plane mask object detection by seg_mask
            planes_mask_binary *= seg_mask
            planes_mask_binary_3d = np.dstack(
                (planes_mask_binary, planes_mask_binary, planes_mask_binary))
            edges = planes_mask_binary - nd.morphology.binary_dilation(
                planes_mask_binary)  # edges calculation
            edges = cv2.cvtColor(edges, cv2.COLOR_GRAY2BGR)
            #############################################

            # for cv2 output
            pred_color = cv2.cvtColor(pred_color, cv2.COLOR_RGB2BGR)
            color_image = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)

            # if save_images == True:
            #     cv2.imwrite("data_/color/frame%d.png" % idx, color_image)     # save frame as JPEG file
            #     cv2.imwrite("data_/depth/frame%d.png" % idx, depth_array)     # save frame as JPEG file
            #     cv2.imwrite("data_/color_depth/frame%d.png" % idx, depth_color_image)     # save frame as JPEG file
            #     cv2.imwrite("data_/thresholded_color/frame%d.png" % idx, thresholded_color_image)     # save frame as JPEG file
            #     # cv2.imwrite("data_/thresholded_depth/frame%d.png" % idx, thresholded_depth_image)     # save frame as JPEG file

            # # Blending images
            alpha = 0.2
            beta = (1.0 - alpha)
            dst = cv2.addWeighted(color_image, alpha, pred_color, beta, 0.0)
            # kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(11,11))
            # res = cv2.morphologyEx(planes_mask,cv2.MORPH_OPEN,kernel)
            dst2 = cv2.addWeighted(depth_color_image, alpha, color_image, beta,
                                   0.0)

            ## for displaying seg_mask
            seg_mask = (np.array(seg_mask) * 255).astype(np.uint8)
            seg_mask = cv2.cvtColor(seg_mask, cv2.COLOR_GRAY2BGR)
            ##################################

            ### delete later
            final_output = color_image * planes_mask_binary_3d
            mask = (final_output[:, :, 0] == 0) & (
                final_output[:, :, 1] == 0) & (final_output[:, :, 2] == 0)
            final_output[:, :, :3][mask] = [255, 255, 255]
            ######

            # if np.sum(planes_mask) == depth_array.shape[0]*depth_array.shape[1]:
            #     image_set1 = np.vstack((dst, color_image))
            # else:
            image_set1 = np.vstack((color_image, depth_color_image))
            # image_set2 = np.vstack((planes_mask_binary_3d*255, seg_mask))
            image_set2 = np.vstack((dst, final_output))
            # image_set2 = np.vstack((edges, final_output))
            combined_images = np.concatenate((image_set1, image_set2), axis=1)
            if save_images == True:
                cv2.imwrite("./meeting_example/frame%d.png" % idx,
                            combined_images)
            try:
                cv2.imshow('Full Stream', combined_images)
            except TypeError as e:
                print(idx, e)
            key = cv2.waitKey(1)
            # if pressed escape exit program
            if key == 27:
                cv2.destroyAllWindows()
                break
    finally:
        pipeline.stop()
        cv2.destroyAllWindows()
    # if save_images == True:
    #     pkl.dump( threshold_mask, open( "data_/depth_threshold.pkl", "wb" ) )
    #     print("Mask pickle saved")
    return