コード例 #1
0
def main():

    vrep.simxFinish(-1)  # just in case, close all opened connections
    clientID = vrep.simxStart('127.0.0.1', 20000, True, True, 1300,
                              5)  # Connect to V-REP
    if clientID == -1:
        sys.exit()
    print('Connected to remote API server')

    res, camhandle = vrep.simxGetObjectHandle(clientID, 'camara_1',
                                              vrep.simx_opmode_oneshot_wait)
    print(res)
    res, resolution, image = vrep.simxGetVisionSensorImage(
        clientID, camhandle, 0, vrep.simx_opmode_streaming)

    ##############

    args = cli()

    # load model
    model, _ = nets.factory_from_args(args)
    model = model.to(args.device)
    processor = decoder.factory_from_args(args, model)

    visualizer = None
    while True:
        res, resolution, image = vrep.simxGetVisionSensorImage(
            clientID, camhandle, 0, vrep.simx_opmode_buffer)
        if len(image) == 0:
            continue
        img = np.array(image, dtype=np.uint8)
        img.resize([resolution[1], resolution[0], 3])
        img = np.rot90(img, 2)
        img = np.fliplr(img)
        cv2.imshow('t', img)
        cv2.waitKey(1)
        image = cv2.resize(img, None, fx=args.scale, fy=args.scale)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

        if visualizer is None:
            visualizer = Visualizer(processor, args)(image)
            visualizer.send(None)

        start = time.time()
        image_pil = PIL.Image.fromarray(image)
        processed_image_cpu, _, __ = transforms.EVAL_TRANSFORM(
            image_pil, [], None)
        processed_image = processed_image_cpu.contiguous().to(
            args.device, non_blocking=True)
        #print('preprocessing time', time.time() - start)

        fields = processor.fields(torch.unsqueeze(processed_image, 0))[0]
        visualizer.send((image, fields))

        #print('loop time = {:.3}s, FPS = {:.3}'.format(
        #    time.time() - last_loop, 1.0 / (time.time() - last_loop)))
        last_loop = time.time()

    vrep.simxFinish(clientID)
コード例 #2
0
def setup(opts):
    args = cli()

    # load model
    model, _ = nets.factory_from_args(args)
    model = model.to(args.device)
    processor = decoder.factory_from_args(args, model)

    return model, processor
コード例 #3
0
    def initialize(self):
        # openpifpaf configuration
        class Args:
            source = 0
            checkpoint = None
            basenet = None
            dilation = None
            dilation_end = None
            headnets = ['pif', 'paf']
            dropout = 0.0
            quad = 1
            pretrained = False
            keypoint_threshold = None
            seed_threshold = 0.2
            force_complete_pose = False
            debug_pif_indices = []
            debug_paf_indices = []
            connection_method = 'max'
            fixed_b = None
            pif_fixed_scale = None
            profile_decoder = None
            instance_threshold = 0.05
            device = torch.device(type="cuda")
            disable_cuda = False
            scale = 1
            key_point_threshold = 0.05
            head_dropout = 0.0
            head_quad = 0
            default_kernel_size = 1
            default_padding = 0
            default_dilation = 1
            head_kernel_size = 1
            head_padding = 0
            head_dilation = 0
            cross_talk = 0.0
            two_scale = False
            multi_scale = False
            multi_scale_hflip = False
            paf_th = 0.1
            pif_th = 0.1
            decoder_workers = None
            experimental_decoder = False
            extra_coupling = 0.0

        self.args = Args()
        model, _ = nets.factory_from_args(self.args)
        model = model.to(self.args.device)
        self.processor = decoder.factory_from_args(self.args, model)

        self.client = b0RemoteApi.RemoteApiClient('b0RemoteApi_pythonClient',
                                                  'b0RemoteApiAddOn')
        self.bill = self.client.simxGetObjectHandle(
            'Bill_base#1', self.client.simxServiceCall())

        self.start = time.time()
コード例 #4
0
    def __init__(self, args):
        """Instanciate the mdodel"""
        factory_from_args(args)
        model_pifpaf, _ = nets.factory_from_args(args)
        model_pifpaf = model_pifpaf.to(args.device)
        self.processor = decoder.factory_from_args(args, model_pifpaf)
        self.keypoints_whole = []

        # Scale the keypoints to the original image size for printing (if not webcam)
        self.scale_np = np.array([args.scale, args.scale, 1] * 17).reshape(
            17, 3)
コード例 #5
0
	def initialize(self):
		
		# add args.device
		print("gola")
		class Args:
			source = 0
			checkpoint = None
			basenet = None
			dilation = None
			dilation_end = None
			headnets = ['pif', 'paf']
			dropout = 0.0
			quad = 1
			pretrained = False
			keypoint_threshold = None
			seed_threshold = 0.2
			force_complete_pose = False
			debug_pif_indices = []
			debug_paf_indices = []
			connection_method = 'max'
			fixed_b = None
			pif_fixed_scale = None
			profile_decoder = None
			instance_threshold = 0.05
			device = torch.device(type="cuda")
			disable_cuda = False
			scale = 1
			key_point_threshold = 0.05
			head_dropout = 0.0
			head_quad = 0
			default_kernel_size = 1
			default_padding = 0
			default_dilation = 1
			head_kernel_size = 1
			head_padding = 0
			head_dilation = 0
			cross_talk = 0.0
			two_scale = False
			multi_scale = False
			multi_scale_hflip = False
			paf_th = 0.1
			pif_th = 0.1
			decoder_workers = None
			experimental_decoder = False
			extra_coupling = 0.0


		self.args = Args()
		print(self.args)
		model, _ = nets.factory_from_args(self.args)
		model = model.to(self.args.device)
		model.cuda()
		self.processor = decoder.factory_from_args(self.args, model)
		self.src = np.zeros((480, 640, 3), np.uint8)
コード例 #6
0
def main():
    parser = argparse.ArgumentParser(
        description=__doc__,
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
    )
    nets.cli(parser)
    decoder.cli(parser, force_complete_pose=False, instance_threshold=0.1, seed_threshold=0.5)
    parser.add_argument('--no-colored-connections',
                        dest='colored_connections', default=True, action='store_false',
                        help='do not use colored connections to draw poses')
    parser.add_argument('--disable_cuda', action='store_true', default=None,
                        help='disable CUDA')
    args = parser.parse_args()

    # add args.device
    args.device = torch.device('cpu')
    if not args.disable_cuda and torch.cuda.is_available():
        print('************************ using gpu *****************************')
        args.device = torch.device('cuda')

    # load model
    model, _ = nets.factory_from_args(args)
    model = model.to(args.device)
    processor = decoder.factory_from_args(args, model)

    # own coco val json
    f = open('/home/chenjia/pedestrian_det/chenjiaPifPaf/splitjson/test_5000.json')
    js = ujson.load(f)
    img_paths = js['images']   # len==5000, img的相对路径
    img_path_root = '/data/nfs_share/public/zxyang/human_data/detection_data_cpu'
    out_root = '/home/chenjia/pedestrian_det/openpifpaf/show_eval'

    # random check pred result
    for i in range(50):
        print('*************** run the ', i + 1, 'image ******************')
        ind = np.random.randint(0, 4999)
        img_path = os.path.join(img_path_root, img_paths[ind]['file_name'])
        img = cv2.imread(img_path)
        img = cv2.resize(img, (683, 384))

        image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        processed_image_cpu = transforms.image_transform(image.copy())   # normalize

        processed_image = processed_image_cpu.contiguous().to(args.device, non_blocking=True)  # transpose 2,0,1
        fields = processor.fields(torch.unsqueeze(processed_image, 0))[0]
        keypoint_sets, _ = processor.keypoint_sets(fields)

        # plot pred result
        plot_points(image, keypoint_sets, COCO_PERSON_SKELETON)
        cv2.imwrite(os.path.join(out_root, str(ind) + '.jpg'), image)
コード例 #7
0
    def __init__(self, arguments, input_size = 320):
        super(Detector, self).__init__()

        self.args = cli(arguments)
        # load model
        self.model, _ = nets.factory_from_args(self.args)
        self.model = self.model.to(self.args.device)
        self.processor = decoder.factory_from_args(self.args, self.model)

        self.preprocess = transforms.SquareRescale(input_size, black_bars=False, random_hflip=False,horizontal_swap=None)

        self.image_transform = transforms.image_transform
        INV_COCO_LABELS = {v: k for k, v in COCO_LABELS.items()}
        self.chosen_label = INV_COCO_LABELS["person"]-1
        self.input_size = input_size
コード例 #8
0
	def initialize(self):
		args = Args()
		model, _ = nets.factory_from_args(args)
		model = model.to(args.device)
		self.processor = decoder.factory_from_args(args, model)

		if self.simulation:
			self.client = b0RemoteApi.RemoteApiClient('b0RemoteApi_pythonClient', 'b0RemoteApiAddOn')
			ret, children = self.client.simxGetObjectsInTree('sim.handle_scene', None, 1+2, self.client.simxServiceCall())
			for child in children:
				ret, name = self.client.simxGetObjectName(child, '', self.client.simxServiceCall())
				if "Bill_base" in str(name):
					self.bill.append(child)

		self.start = time.time()
コード例 #9
0
ファイル: eval_coco.py プロジェクト: MatanAvitan/openpifpaf
def main():
    args = cli()

    # skip existing?
    if args.skip_existing:
        if os.path.exists(args.output + '.stats.json'):
            print('Output file {} exists already. Exiting.'
                  ''.format(args.output + '.stats.json'))
            return
        print('Processing: {}'.format(args.checkpoint))

    preprocess, collate_fn = preprocess_factory_from_args(args)
    data = datasets.CocoKeypoints(
        root=args.image_dir,
        annFile=args.annotation_file,
        preprocess=preprocess,
        all_persons=True,
        all_images=args.all_images,
    )
    data_loader = torch.utils.data.DataLoader(data,
                                              batch_size=args.batch_size,
                                              pin_memory=args.pin_memory,
                                              num_workers=args.loader_workers,
                                              collate_fn=collate_fn)

    model_cpu, _ = nets.factory_from_args(args)
    model = model_cpu.to(args.device)
    if not args.disable_cuda and torch.cuda.device_count() > 1:
        LOG.info('Using multiple GPUs: %d', torch.cuda.device_count())
        model = torch.nn.DataParallel(model)
        model.head_names = model_cpu.head_names
        model.head_strides = model_cpu.head_strides

    processor = decoder.factory_from_args(args, model, args.device)
    # processor.instance_scorer = instance_scorer.InstanceScoreRecorder()
    # processor.instance_scorer = torch.load('instance_scorer.pkl')

    coco = pycocotools.coco.COCO(args.annotation_file)
    eval_coco = EvalCoco(coco, processor, preprocess.annotations_inverse)
    total_start = time.time()
    loop_start = time.time()
    for batch_i, (image_tensors_cpu, anns_batch,
                  meta_batch) in enumerate(data_loader):
        LOG.info('batch %d, last loop: %.3fs, batches per second=%.1f',
                 batch_i,
                 time.time() - loop_start,
                 batch_i / max(1, (time.time() - total_start)))
        if batch_i < args.skip_n:
            continue
        if args.n and batch_i >= args.n:
            break

        loop_start = time.time()

        if len([
                a for anns in anns_batch
                for a in anns if np.any(a['keypoints'][:, 2] > 0)
        ]) < args.min_ann:
            continue

        fields_batch = processor.fields(image_tensors_cpu)

        decoder_start = time.perf_counter()
        pred_batch = processor.annotations_batch(
            fields_batch,
            meta_batch=meta_batch,
            debug_images=image_tensors_cpu)
        eval_coco.decoder_time += time.perf_counter() - decoder_start

        # loop over batch
        assert len(image_tensors_cpu) == len(fields_batch)
        assert len(image_tensors_cpu) == len(anns_batch)
        assert len(image_tensors_cpu) == len(meta_batch)
        for image_tensor_cpu, pred, anns, meta in zip(image_tensors_cpu,
                                                      pred_batch, anns_batch,
                                                      meta_batch):
            eval_coco.from_predictions(pred,
                                       meta,
                                       debug=args.debug,
                                       gt=anns,
                                       image_cpu=image_tensor_cpu)
    total_time = time.time() - total_start

    # processor.instance_scorer.write_data('instance_score_data.json')
    write_evaluations(eval_coco, args.output, args, total_time)
コード例 #10
0
    def __init__(self,
                 checkpoint=None,
                 basenet=None,
                 headnets=None,
                 pretrained=True,
                 two_scale=False,
                 multi_scale=False,
                 multi_scale_hflip=True,
                 cross_talk=0.0,
                 download_progress=True,
                 head_dropout=0.0,
                 head_quad=1,
                 seed_threshold=0.5,
                 instance_threshold=0.1,
                 keypoint_threshold=None,
                 decoder_workers=None,
                 dense_connections=False,
                 dense_coupling=0.01,
                 caf_seeds=False,
                 force_complete_pose=True,
                 profile_decoder=None,
                 cif_th=0.1,
                 caf_th=0.1,
                 connection_method="blend",
                 greedy=False,
                 video_output="",
                 xls_output="",
                 quiet=False,
                 debug=False,
                 skip_frames=1,
                 max_frames=False,
                 start_frame=0,
                 confidence_threshold=0.1):

        self.checkpoint = checkpoint
        self.basenet = basenet
        self.headnets = headnets
        self.pretrained = pretrained
        self.two_scale = two_scale
        self.multi_scale = multi_scale
        self.multi_scale_hflip = multi_scale_hflip
        self.cross_talk = cross_talk
        self.download_progress = download_progress
        self.head_dropout = head_dropout
        self.head_quad = head_quad
        self.seed_threshold = seed_threshold
        self.instance_threshold = instance_threshold
        self.keypoint_threshold = keypoint_threshold
        self.decoder_workers = decoder_workers
        self.dense_connections = dense_connections
        self.dense_coupling = dense_coupling
        self.caf_seeds = caf_seeds
        self.force_complete_pose = force_complete_pose
        self.profile_decoder = profile_decoder
        self.cif_th = cif_th
        self.caf_th = caf_th
        self.connection_method = connection_method
        self.greedy = greedy
        self.quiet = quiet
        self.debug = debug
        self.skip_frames = skip_frames
        self.max_frames = max_frames
        self.start_frame = start_frame
        self.confidence_threshold = confidence_threshold

        class InternalConfig:
            def __init__(self):
                self.debug_cifhr = None
                self.debug_cif_c = None
                self.debug_cif_v = None
                self.debug_cifdet_c = None
                self.debug_cifdet_v = None
                self.debug_caf_c = None
                self.debug_caf_v = None
                self.debug_indices = []
                self.debug_images = False
                self.show_box = False,
                self.show_joint_scales = False,
                self.show_decoding_order = False,
                self.show_frontier_order = False,
                self.show_only_decoded_connections = False,
                self.show_joint_confidences = False,

        # configure logging
        self.log_level = logging.INFO
        if quiet:
            self.log_level = logging.WARNING
        if debug:
            self.log_level = logging.DEBUG
        logging.basicConfig()
        logging.getLogger('openpifpaf').setLevel(self.log_level)
        LOG.setLevel(self.log_level)

        internal_config = InternalConfig()
        network.configure(self)
        pifpaf.show.configure(internal_config)
        visualizer.configure(internal_config)

        # add args.device
        self.device = torch.device('cpu')
        if torch.cuda.is_available():
            self.device = torch.device('cuda')
        LOG.debug('neural network device: %s', self.device)

        self.model, _ = network.factory_from_args(self)
        self.model = self.model.to(self.device)
        self.processor = decoder.factory_from_args(self, self.model)
コード例 #11
0
ファイル: predict.py プロジェクト: MatanAvitan/openpifpaf
def main():
    args = cli()
    if args.our_new_model:
        args.checkpoint = TRAINED_MODEL_PATH
    # load model
    model_cpu, _ = nets.factory_from_args(args)
    model = model_cpu.to(args.device)
    if not args.disable_cuda and torch.cuda.device_count() > 1:
        LOG.info('Using multiple GPUs: %d', torch.cuda.device_count())
        model = torch.nn.DataParallel(model)
        model.head_names = model_cpu.head_names
        model.head_strides = model_cpu.head_strides
    processor = decoder.factory_from_args(args, model, args.device)

    # data
    preprocess = None
    if args.long_edge:
        preprocess = transforms.Compose([
            transforms.NormalizeAnnotations(),
            transforms.RescaleAbsolute(args.long_edge),
            transforms.CenterPad(args.long_edge),
            transforms.EVAL_TRANSFORM,
        ])
    data = datasets.ImageList(args.images, preprocess=preprocess)
    data_loader = torch.utils.data.DataLoader(
        data,
        batch_size=args.batch_size,
        shuffle=False,
        pin_memory=args.pin_memory,
        num_workers=args.loader_workers,
        collate_fn=datasets.collate_images_anns_meta)

    # visualizers
    keypoint_painter = show.KeypointPainter(
        show_box=args.debug,
        show_joint_scale=args.debug,
    )
    skeleton_painter = show.KeypointPainter(
        color_connections=True,
        markersize=args.line_width - 5,
        linewidth=args.line_width,
        show_box=args.debug,
        show_joint_scale=args.debug,
    )

    for batch_i, (image_tensors_batch, _,
                  meta_batch) in enumerate(data_loader):
        fields_batch = processor.fields(image_tensors_batch)
        pred_batch = processor.annotations_batch(
            fields_batch, debug_images=image_tensors_batch)

        # unbatch
        for pred, meta in zip(pred_batch, meta_batch):
            if args.output_directory is None:
                output_path = meta['file_name']
            else:
                file_name = os.path.basename(meta['file_name'])
                output_path = os.path.join(args.output_directory, file_name)
            LOG.info('batch %d: %s to %s', batch_i, meta['file_name'],
                     output_path)

            # load the original image if necessary
            cpu_image = None
            if args.debug or \
                    'keypoints' in args.output_types or \
                    'skeleton' in args.output_types:
                with open(meta['file_name'], 'rb') as f:
                    cpu_image = PIL.Image.open(f).convert('RGB')

            processor.set_cpu_image(cpu_image, None)
            if preprocess is not None:
                pred = preprocess.annotations_inverse(pred, meta)

            if 'json' in args.output_types:
                with open(output_path + '.pifpaf.json', 'w') as f:
                    json.dump([{
                        'keypoints':
                        np.around(ann.data, 1).reshape(-1).tolist(),
                        'bbox':
                        np.around(bbox_from_keypoints(ann.data), 1).tolist(),
                        'score':
                        round(ann.score(), 3),
                    } for ann in pred], f)

            if 'keypoints' in args.output_types:
                with show.image_canvas(cpu_image,
                                       output_path + '.keypoints.png',
                                       show=args.show,
                                       fig_width=args.figure_width,
                                       dpi_factor=args.dpi_factor) as ax:
                    keypoint_painter.annotations(ax, pred)

            if 'skeleton' in args.output_types:
                with show.image_canvas(cpu_image,
                                       output_path + '.skeleton.png',
                                       show=args.show,
                                       fig_width=args.figure_width,
                                       dpi_factor=args.dpi_factor) as ax:
                    skeleton_painter.annotations(ax, pred)
コード例 #12
0
    def initialize(self):
        print("Initialize")

        # openpifpaf configuration
        class Args:
            source = 0
            checkpoint = None
            basenet = None
            dilation = None
            dilation_end = None
            headnets = ['pif', 'paf']
            dropout = 0.0
            quad = 1
            pretrained = False
            keypoint_threshold = None
            seed_threshold = 0.2
            force_complete_pose = False
            debug_pif_indices = []
            debug_paf_indices = []
            connection_method = 'max'
            fixed_b = None
            pif_fixed_scale = None
            profile_decoder = None
            instance_threshold = 0.05
            device = torch.device(type="cpu")
            disable_cuda = True
            scale = 1
            key_point_threshold = 0.05
            head_dropout = 0.0
            head_quad = 0
            default_kernel_size = 1
            default_padding = 0
            default_dilation = 1
            head_kernel_size = 1
            head_padding = 0
            head_dilation = 0
            cross_talk = 0.0
            two_scale = False
            multi_scale = False
            multi_scale_hflip = False
            paf_th = 0.1
            pif_th = 0.1
            decoder_workers = None
            experimental_decoder = False
            extra_coupling = 0.0

        self.args = Args()
        model, _ = nets.factory_from_args(self.args)
        model = model.to(self.args.device)
        # model.cuda()
        self.processor = decoder.factory_from_args(self.args, model)

        # realsense configuration
        try:
            config = rs.config()
            config.enable_device(self.params["device_serial"])
            config.enable_stream(rs.stream.depth, self.width, self.height,
                                 rs.format.z16, 30)
            config.enable_stream(rs.stream.color, self.width, self.height,
                                 rs.format.bgr8, 30)

            self.pointcloud = rs.pointcloud()
            self.pipeline = rs.pipeline()
            cfg = self.pipeline.start(config)
#            profile = cfg.get_stream(rs.stream.color) # Fetch stream profile for depth stream
#            intr = profile.as_video_stream_profile().get_intrinsics() # Downcast to video_stream_profile and fetch intrinsics
#            print (intr.fx, intr.fy)
#            depth_scale = cfg.get_device().first_depth_sensor().get_depth_scale()
#            print("Depth Scale is: " , depth_scale)
#            sys.exit(-1)
        except Exception as e:
            print("Error initializing camera")
            print(e)
            sysexit(-1)
コード例 #13
0
def main():
    args = cli()

    # load model
    model, _ = nets.factory_from_args(args)
    model = model.to(args.device)
    processor = decoder.factory_from_args(args, model)

    # data
    data = datasets.ImageList(args.images)
    data_loader = torch.utils.data.DataLoader(data,
                                              batch_size=1,
                                              shuffle=False,
                                              pin_memory=args.pin_memory,
                                              num_workers=args.loader_workers)

    # visualizers
    skeleton_painter = show.InstancePainter(show_box=True,
                                            color_connections=True,
                                            markersize=1,
                                            linewidth=6)

    for image_i, (image_paths, image_tensors,
                  processed_images_cpu) in enumerate(data_loader):
        images = image_tensors.permute(0, 2, 3, 1)
        img_show = np.squeeze(images)
        print(image_paths)
        #print(img_show)
        #plt.imshow(img_show)
        processed_images = processed_images_cpu.to(args.device,
                                                   non_blocking=True)
        fields_batch = processor.fields(processed_images)
        # unbatch
        for image_path, image, processed_image_cpu, fields in zip(
                image_paths, images, processed_images_cpu, fields_batch):

            if args.output_directory is None:
                output_path = image_path
            else:
                file_name = os.path.basename(image_path)
                output_path = os.path.join(args.output_directory, file_name)
            print('image', image_i, image_path, output_path)

            processor.set_cpu_image(image, processed_image_cpu)
            keypoint_sets, scores = processor.keypoint_sets(fields)

            img = plotCenter(img_show, keypoint_sets)
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            cv2.imwrite(output_path + '.center.png', img)

            if 'json' in args.output_types:
                with open(output_path + '.pifpaf.json', 'w') as f:
                    json.dump([{
                        'keypoints':
                        np.around(kps, 1).reshape(-1).tolist(),
                        'bbox': [
                            np.min(kps[:, 0]),
                            np.min(kps[:, 1]),
                            np.max(kps[:, 0]),
                            np.max(kps[:, 1])
                        ]
                    } for kps in keypoint_sets], f)

            texts = [
                COCO_LABELS[np.argmax(kps[:, 2]) + 1] for kps in keypoint_sets
            ]

            #print(texts)

            if 'skeleton' not in args.output_types:
                with show.image_canvas(image,
                                       output_path + '.skeleton.png',
                                       show=args.show,
                                       fig_width=args.figure_width,
                                       dpi_factor=args.dpi_factor) as ax:
                    skeleton_painter.keypoints(ax,
                                               keypoint_sets,
                                               scores=scores,
                                               texts=texts)
コード例 #14
0
def main():
    args = cli()

    # load model
    model, _ = nets.factory_from_args(args)
    model = model.to(args.device)
    processor = decoder.factory_from_args(args, model)

    # zed
    init = sl.InitParameters()
    init.depth_mode = sl.DEPTH_MODE.DEPTH_MODE_ULTRA
    init.coordinate_units = sl.UNIT.UNIT_METER
    init.coordinate_system = sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Y_UP

    cam = sl.Camera()
    status = cam.open(init)
    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit()

    runtime_parameters = sl.RuntimeParameters()
    runtime_parameters.sensing_mode = sl.SENSING_MODE.SENSING_MODE_STANDARD  # Use STANDARD sensing mode

    img = sl.Mat()
    depth = sl.Mat()
    point_cloud = sl.Mat()

    last_loop = time.time()
    #capture = cv2.VideoCapture(args.source)

    visualizer = None
    while True:
        err = cam.grab(runtime_parameters)
        if err == sl.ERROR_CODE.SUCCESS:
            # Retrieve left image
            cam.retrieve_image(img, sl.VIEW.VIEW_LEFT)
            # Retrieve depth map. Depth is aligned on the left image
            cam.retrieve_measure(depth, sl.MEASURE.MEASURE_DEPTH)
            # Retrieve colored point cloud. Point cloud is aligned on the left image.
            cam.retrieve_measure(point_cloud, sl.MEASURE.MEASURE_XYZRGBA)

            # Get and print distance value in mm at the center of the image
            # We measure the distance camera - object using Euclidean distance
            x = round(img.get_width() / 2)
            y = round(img.get_height() / 2)
            err, point_cloud_value = point_cloud.get_value(x, y)
            err, depth_value = depth.get_value(x, y)
            print("depth ", depth_value)

            distance = math.sqrt(point_cloud_value[0] * point_cloud_value[0] +
                                 point_cloud_value[1] * point_cloud_value[1] +
                                 point_cloud_value[2] * point_cloud_value[2])

            if not np.isnan(distance) and not np.isinf(distance):
                distance = round(distance)
                #print("Distance to Camera at ({0}, {1}): {2} mm\n".format(x, y, distance))
            else:
                print(
                    "Can't estimate distance at this position, move the camera\n"
                )
            cv2.imshow("Depth", depth.get_data())
        else:
            print("Err", err)
            continue

        image = cv2.resize(img.get_data(), None, fx=args.scale, fy=args.scale)
        #print('resized image size: {}'.format(image.shape))
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

        if visualizer is None:
            visualizer = Visualizer(processor, args)(image)
            visualizer.send(None)

        start = time.time()
        image_pil = PIL.Image.fromarray(image)
        processed_image_cpu, _, __ = transforms.EVAL_TRANSFORM(
            image_pil, [], None)
        processed_image = processed_image_cpu.contiguous().to(
            args.device, non_blocking=True)
        #print('preprocessing time', time.time() - start)

        fields = processor.fields(torch.unsqueeze(processed_image, 0))[0]
        visualizer.send((image, fields))

        #print('loop time = {:.3}s, FPS = {:.3}'.format(
        #    time.time() - last_loop, 1.0 / (time.time() - last_loop)))
        last_loop = time.time()

    cam.close()