def main():
    args = argparse.ArgumentParser()
    add_camera_opener_options(args)
    args.add_argument("-d",
                      "--device",
                      type=str,
                      default='MYRIAD',
                      help='on Device')
    args.add_argument("-m", "--mode", type=int, default=1, help="ModeNo.")
    args.add_argument("-n",
                      "--name",
                      type=str,
                      default='MODE_5_45FPS_500',
                      help="ModeName.")
    args.add_argument("-q",
                      "--queues",
                      type=int,
                      default=33 * 5,
                      help="Depth of Queue.")
    args.add_argument("-ud",
                      "--usedepth",
                      action='store_true',
                      help="full highlight for objects")
    args = args.parse_args()

    flexx(args)
def main ():
    platformhelper = PlatformHelper()
    parser = argparse.ArgumentParser (usage = __doc__)
    add_camera_opener_options (parser)
    parser.add_argument ("--frames", type=int, required=True, help="duration to capture data (number of frames)")
    parser.add_argument ("--output", type=str, required=True, help="filename to record to")
    parser.add_argument ("--skipFrames", type=int, default=0, help="frameSkip argument for the API method")
    parser.add_argument ("--skipMilliseconds", type=int, default=0, help="msSkip argument for the API method")
    options = parser.parse_args()

    opener = CameraOpener (options)
    cam = opener.open_camera ()

    print_camera_info (cam)

    l = MyListener()
    cam.registerRecordListener(l)
    cam.startCapture()
    cam.startRecording (options.output, options.frames, options.skipFrames, options.skipMilliseconds);

    seconds = options.frames * (options.skipFrames + 1) / cam.getFrameRate()
    if options.skipMilliseconds:
        timeForSkipping = options.frames * options.skipMilliseconds / 1000
        seconds = int (max (seconds, timeForSkipping))

    print ("Capturing with the camera running at {rate} frames per second".format (rate=cam.getFrameRate()))
    print ("This is expected to take around {seconds} seconds".format (seconds=seconds))
    
    l.waitForStop()

    cam.stopCapture()
Exemplo n.º 3
0
def main():
    platformhelper = PlatformHelper()
    parser = argparse.ArgumentParser(usage=__doc__)
    add_camera_opener_options(parser)
    parser.add_argument("-s",
                        "--seconds",
                        type=int,
                        default=15,
                        help="duration to capture data")
    parser.add_argument("-n",
                        "--name",
                        type=str,
                        default='MODE_5_45FPS_500',
                        help="mode name")
    options = parser.parse_args()
    opener = CameraOpener(options)
    cam = opener.open_camera()
    cam.setUseCase(options.name)

    print_camera_info(cam)
    print("isConnected", cam.isConnected())
    print("getFrameRate", cam.getFrameRate())

    # we will use this queue to synchronize the callback with the main
    # thread, as drawing should happen in the main thread
    q = queue.Queue()
    xq = queue.Queue()
    yq = queue.Queue()
    l = MyListener(q, xq, yq)
    cam.registerDataListener(l)
    cam.startCapture()
    # create a loop that will run for a time (default 15 seconds)
    process_event_queue(q, xq, yq, l, options.seconds)
    cam.stopCapture()
def main ():



    try:
        rospy.init_node('pico_flexx')
        r = rospy.Rate(10)
        parser = argparse.ArgumentParser (usage = __doc__)
        add_camera_opener_options (parser)
        parser.add_argument ("--seconds", type=int, default=15, help="duration to capture data")
        options = parser.parse_args()
        opener = CameraOpener (options)
        cam = opener.open_camera ()


        print_camera_info (cam)
        print("isConnected", cam.isConnected())
        print("getFrameRate", cam.getFrameRate())

        # we will use this queue to synchronize the callback with the main
        # thread, as drawing should happen in the main thread
        q = queue.Queue(maxsize=0)
        l = MyListener(q)
        cam.registerDataListener(l)
        cam.startCapture()

        while not rospy.is_shutdown():
            r.sleep()

        cam.stopCapture()
    except rospy.ROSInterruptException:
        pass
Exemplo n.º 5
0
def get_parser_options(proj_desc: str):
    """Returns an argument parser than can be used to parse command-line arguments"""

    parser = argparse.ArgumentParser(description=proj_desc, usage=__doc__)
    add_camera_opener_options(parser)
    parser.add_argument("--external_trig", '-e', action="store_true")

    return parser
Exemplo n.º 6
0
    def __init__(self, disp_parent=None, file: str = ""):
        super().__init__(disp_parent, file)

        self.platform = roypy_platform_utils.PlatformHelper()

        parser = argparse.ArgumentParser()
        roypy_sample_utils.add_camera_opener_options(parser)
        self.options = parser.parse_args("")

        self.manager = roypy_sample_utils.CameraOpener(self.options)
        self.frame_q = deque()
        self.depth_q = deque()
        self.listener = DepthListener(self.frame_q, self.depth_q)
        self.exposure = 80
        self.mode = 'MODE_5_45FPS_500'
Exemplo n.º 7
0
def main():
    # PICOFLEXX

    parser1 = argparse.ArgumentParser(usage=__doc__)
    add_camera_opener_options(parser1)

    # parser1.add_argument("--seconds", type=int, default=15, help="duration to capture data")
    options = parser1.parse_args()
    opener = CameraOpener(options)
    cam = opener.open_camera()

    cam.setUseCase("MODE_5_45FPS_500")
    # print_camera_info(cam)
    # print("isConnected", cam.isConnected())
    # print("getFrameRate", cam.getFrameRate())

    # LEAP MOTION
    controller = Leap.Controller()
    controller.set_policy_flags(Leap.Controller.POLICY_IMAGES)

    run(controller, cam)
Exemplo n.º 8
0
def main():
    # PICOFLEXX

    parser1 = argparse.ArgumentParser(usage=__doc__)
    add_camera_opener_options(parser1)

    # parser1.add_argument("--seconds", type=int, default=15, help="duration to capture data")
    options = parser1.parse_args()
    opener = CameraOpener(options)
    cam = opener.open_camera()

    cam.setUseCase("MODE_5_45FPS_500")
    # print_camera_info(cam)
    # print("isConnected", cam.isConnected())
    # print("getFrameRate", cam.getFrameRate())

    # # LEAP MOTION
    # controller = Leap.Controller()
    # controller.set_policy_flags(Leap.Controller.POLICY_IMAGES)


    # CARICO RETI

    use_cuda = torch.cuda.is_available()
    device = torch.device('cuda' if use_cuda else 'cpu')
    if use_cuda:
        torch.cuda.manual_seed(1994)
        torch.backends.cudnn.deterministic = True

    # DEPTH
    model_depth = models.densenet161(pretrained=True)
    # for params in model_depth.parameters():
    #     params.requires_grad = False
    model_depth.features._modules['conv0'] = nn.Conv2d(in_channels=args.n_frames, out_channels=96, kernel_size=(7, 7),
                                                       stride=(2, 2), padding=(3, 3))
    model_depth.classifier = nn.Linear(in_features=2208, out_features=12, bias=True)

    # carico i pesi

    model_depth.load_state_dict(torch.load(args.depth_model_path, map_location=device)['state_dict'])


    model_depth = model_depth.to(device)
    model_depth.eval()




    # IR
    model_ir = models.densenet161(pretrained=True)
    # for params in model_ir.parameters():
    #     params.requires_grad = False
    model_ir.features._modules['conv0'] = nn.Conv2d(in_channels=args.n_frames, out_channels=96, kernel_size=(7, 7),
                                                    stride=(2, 2), padding=(3, 3))
    model_ir.classifier = nn.Linear(in_features=2208, out_features=12, bias=True)

    model_ir.load_state_dict(torch.load(args.ir_model_path, map_location=device)['state_dict'])

    model_ir = model_ir.to(device)
    model_ir.eval()



    # create CROSS CONVNET

    # model = CrossConvNet(n_classes=12, depth_model=model_depth, ir_model=None, rgb_model=None,
    #                             mode='depth_ir', cross_mode='avg').to(device)
    # model.eval()


    # carico media e std

    npzfile = np.load("weights/mean_std_depth_z.npz")
    mean_depth = npzfile['arr_0']
    std_depth = npzfile['arr_1']
    print('mean, std depth_z loaded.')

    npzfile = np.load("weights/mean_std_depth_ir.npz")
    mean_ir = npzfile['arr_0']
    std_ir = npzfile['arr_1']
    print('mean, std depth_ir loaded.')

    run(controller=None, cam=cam, model=model_depth, device=device, mean_depth=mean_depth, std_depth=std_depth,
        mean_ir=mean_ir, std_ir=std_ir)