Exemplo n.º 1
0
def listen_print_loop(responses):
    num_chars_printed = 0
    for response in responses:
        if not response.results:
            continue
        
        result = response.results[0]
        if not result.alternatives:
            continue

        transcript = ""
        transcript = result.alternatives[0].transcript

        overwrite_chars = '' * (num_chars_printed - len(transcript))

        if not result.is_final:
            sys.stdout.write(transcript + overwrite_chars + '\r')
            sys.stdout.flush()

            num_chars_printed = len(transcript)

        else:
            print(transcript + overwrite_chars)
            transcript.replace(" ","")
                
            com = "음성인식"
            if transcript == com:
                print("앞으로 갑니다")
                transcript = ""
                
            if transcript == "왼쪽" or transcript == " 왼쪽":
                GPIO.output(12, True)
                print(transcript)
                time.sleep(0.5)
                GPIO.output(12, False)
                transcript = ""
                
            elif transcript == "오른쪽" or transcript == " 오른쪽":
                GPIO.output(18, True)
                print(transcript)
                time.sleep(0.5)
                GPIO.output(18, False)
                transcript = ""
                
            elif transcript == "카메라" or transcript == " 카메라":
                camera.camera()
                print("카메라 촬영 완료")
            
            elif transcript == "동영상" or transcript == " 동영상":
                camera.video()
                print("동영상 저장")
            
            if re.search(r'\b(exit|quit)\b', transcript, re.I):
                print('Exiting..')
                break

            num_chars_printed = 0
Exemplo n.º 2
0
def detct():
    time.sleep(1)
    if GPIO.input(HCSRPORT) == True:
        print "Someone is closing!"
        ishumen = 1
        mysqlDbHcsr(ishumen, "客厅")
        camera.camera()
    else:
        print "No anybody!"
        ishumen = 0
        mysqlDbHcsr(ishumen, "客厅")
Exemplo n.º 3
0
    def __init__(self):
        self.mode = "m"

        self.pitch_hold = False
        # self.hdg_hold=False
        self.alt_hold = False
        self.auto_hold = False
        self.speed_hold = False
        self.is_connected = 0

        self.status = c.OK
        self.servos_init = False
        self.throttle_updated = False

        self.pitch = 0.0
        self.roll = 0.0
        self.throttle = 0.0
        self.heading = 0.0
        self.altittude = 0.0
        self.speed = 0

        self.altPID = pid(0, 0)
        self.hdgPID = pid(0, 0)
        self.throttlePID = pid(0, 0)

        self.elevons = elevons()
        self.sensors = sensors()
        self.gps = gpsdata()
        self.motor = motor_handler(0)
        self.camera = camera()
Exemplo n.º 4
0
    def __init__(self):
        self.mode = "m"

        self.pitch_hold = False
        # self.hdg_hold=False
        self.alt_hold = False
        self.auto_hold = False
        self.speed_hold = False
        self.is_connected = 0

        self.status = c.OK
        self.servos_init = False
        self.throttle_updated = False

        self.pitch = 0.0
        self.roll = 0.0
        self.throttle = 0.0
        self.heading = 0.0
        self.altittude = 0.0
        self.speed = 0

        self.altPID=pid(0,0)
        self.hdgPID=pid(0,0)
        self.throttlePID=pid(0,0)

        self.elevons = elevons()
        self.sensors = sensors()
        self.gps = gpsdata()
        self.motor = motor_handler(0)
        self.camera = camera()
Exemplo n.º 5
0
def main():
    cam = camera("mobile")
    mtx = cam.mtx
    print(cam)
    print(mtx)
    # print(f"cam._camera_matirx={cam._camera_matrix}")
    pass
def main():
    ######red ball
    ballLower = (137, 88, 55)
    ballUpper = (183, 255, 255)
    camera1 = cam.camera(1, "/dev/video0")
    camera2 = cam.camera(2, "/dev/video3")
    if camera1.createCameraMatrixUndistort() == False:
        return False
    if camera2.createCameraMatrixUndistort() == False:
        return False
    camera1.showvideo()
    camera2.showvideo()
    trackingObject = HSVTr.HSVObjTracking(ballLower, ballUpper,
                                          [camera1, camera2])
    x, y, z = trackingObject.threedMovementsRecontstruction()
    threeDplot.displayanimation(x, y, z)
Exemplo n.º 7
0
    def __init__(self, name, M0b=18, M0f=23, M1b=24, M1f=25, T1=4, E1=17, simulation=False):
#GPIO: 4 17 21 22
#pin : 7 11 13 15

#GPIO: 18 23 24 25
#pin : 12 16 18 22

        self.logger = logging.getLogger('myR.rover')
        self.name = name
        self.version = 1
        self.simulation = simulation
        self.ip = '192.168.0.10'
        self.speed = 100  # [mm/s]
        self.speedMax = 1000  # [mm/s]

        self.voltage = 6  # [V]
        self.mass = 2  # [Kg]
        self.Lenght = 180  # [mm]
        self.wheelR = 32  # [mm]

        self.time360 = 2  # [sec]

        self.data = datacollector()
        self.camera = camera(self.data)
        self.camserver = camserver(self.data)
        self.sensor = us_sensor('S1', T1, E1, self.data)
        self.motorLeft = DCmotor('MLeft', M0b, M0f, 11, 12)
        self.motorRight = DCmotor('MRight', M1b, M1f, 13, 14)

        self.rc = rc(self.data)
        self.server = server(self.data)
Exemplo n.º 8
0
    def __init__(self, auto, debug):
        # initialize car object
        self.myCar = car()
        # initialize current state
        self.state = "f"
        # initialize map
        self.myMap = map()
        # initialize action counter
        self.actioncounter = 0
        # initialize camera
        self.myCamera = camera()
        # initialize global variables
        vars.init()
        # debug or not
        self.debug = debug

        # get ready
        time.sleep(1)

        print("myCar and myCamera startup completed")

        if auto:
            # auto drive
            print("autoDrive mode")
            self.autoDrive()

        else:
            # manual drive
            print("manualDrive mode")
            self.manualDrive()
def detect_video(yolo, video_path, output_path=""):
    vid = cv2.VideoCapture(video_path)

    video_FourCC, video_fps, video_size, accum_time, curr_fps, fps, prev_time = timer_first(
        vid)

    if not vid.isOpened():
        raise IOError("Couldn't open webcam or video")
    isOutput = True if output_path != "" else False
    if isOutput:
        #print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size))
        out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)

    HEIGHT = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
    WIDTH = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))

    while True:
        return_value, frame = vid.read()

        result = camera(frame, HEIGHT, WIDTH, yolo)

        result = timer_back(accum_time, curr_fps, fps, prev_time, result)

        cv2.namedWindow("result", cv2.WINDOW_NORMAL)
        cv2.imshow("result", result)
        if isOutput:
            out.write(result)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    yolo.close_session()
Exemplo n.º 10
0
 def __init__(self, url, debug = False, angle = 0, init = False):
     if init:
         self.player = None
         self.cam = camera(url = url, angle = angle, debug = debug)
         self.cam.start()
         self.det = detect(self.cam, debug = debug)
         self.first_action = True
Exemplo n.º 11
0
    def __init__(self,
                 name_pattern,
                 directory,
                 scan_range=360,
                 scan_exposure_time=3.6,
                 scan_start_angle=0,
                 zoom=None,
                 frontlight=False):

        experiment.__init__(self,
                            name_pattern=name_pattern,
                            directory=directory)

        self.scan_range = scan_range
        self.scan_exposure_time = scan_exposure_time
        self.scan_start_angle = scan_start_angle
        self.zoom = zoom
        self.frontlight = frontlight

        self.camera = camera()
        self.goniometer = goniometer()
        self.fastshutter = fast_shutter()

        self.md2_task_info = None

        self.images = []

        self.parameter_fields = self.parameter_fields.union(
            film.specific_parameter_fields)
Exemplo n.º 12
0
 def __init__(self):
     self.channel = 17
     self.camera = camera()
     self.network = networkPreference()
     self.device = deviceActivation()
     self.smokesensor = MQ()
     self.buzzer = buzzer()
Exemplo n.º 13
0
 def __init__(self):
     global ms
     global c
     global l
     ms = motionSensor()
     c = camera()
     l = led()
Exemplo n.º 14
0
def main():
    nx = 600
    ny = 400
    n = 4
    ns = 10

    anti_aliasing = False

    # a = sphere(np.array([.0,.0,-1.0]), 0.5, lambertian(np.array([0.1,0.2,0.5],dtype=float)))
    # b = sphere(np.array([.0,-100.5,-1.0]), 100, lambertian(np.array([0.8,0.8,0.0],dtype=float)))
    # c = sphere(np.array([1.0,.0,-1.0]), 0.5, metal(np.array([0.8,0.6,0.2],dtype=float), 0.0))
    # d = sphere(np.array([-1.0,0.0,-1.0]), 0.5, dielectric(1.5))
    # e = sphere(np.array([-1.0,0.0,-1.0]), -0.45, dielectric(1.5))
    scene = random_scene(n)
    world = hitable_list(scene, len(scene))

    lookfrom = vec3(13, 2, 3)
    lookat = vec3(0, 0, 0)
    dist_to_focus = 10.0
    aperture = 0.1
    cam = camera(lookfrom, lookat, vec3(0, 1, 0), 20, nx / ny, aperture,
                 dist_to_focus)

    data = render(np.zeros([ny, nx, n], dtype=np.float32), cam, world, ny, nx,
                  ns, anti_aliasing)
    data = data.astype(np.uint8)
    img = cv2.cvtColor(data, cv2.COLOR_RGBA2BGRA)
    cv2.imwrite('res.png', img)
Exemplo n.º 15
0
 def __init__(self):
     conf_file = open("/home/max/projects/catflap/ros_catkin_ws/src/catflap_image_collector/scripts/conf.yaml", 'r')
     self.cfg = yaml.load(conf_file)
     conf_file.close()
     # define our state variables
     self.door_closed = True
     self.camera_blocked = False
     self.timeout_door =  5
     self.timeout_entry =  10
     self.ir_state = 2
     self.trust = 1.
     self.trust_threshold = 3.
     self.picture_sequence = 0
     self.telegram_publisher = rospy.Publisher('telegram_message', String, queue_size = 150)
     sleep(1)
     self.telegram_publisher.publish("catflap startup ...")
     sleep(3)
     # define the listeners and their callbacks
     rospy.Subscriber("door_closed_state",            Bool ,self.callback_door_sensor, queue_size = 1)
     rospy.Subscriber("inner_ir_sensor_state", Bool ,self.callback_ir_sensor, queue_size = 1)
     rospy.Subscriber("outer_ir_sensor_state", Bool ,self.callback_ir_sensor, queue_size = 1)
     # start the publishers
     sleep(3)
     self.light_publisher = rospy.Publisher("lightswitch_command", Bool, queue_size=10)
     self.light_publisher.publish(False)
     self.take_a_picture_publisher = rospy.Publisher('take_a_picture', Bool, queue_size = 1)
     self.door_lock_publisher = rospy.Publisher('door_lock_command', Bool, queue_size = 1)
     self.door_lock_publisher.publish(True)
     self.camera = camera.camera()
     self.camera.camera_init()
     self.camera.classifier_init()
     self.telegram_publisher.publish("catflap startup finished")
     rospy.spin()
Exemplo n.º 16
0
def proc_th(oder):
    global state,streaming
    
    if state == 0:
        if any( oder in s for s in od_list):
            state =1
            print("노래 제목이 무엇입니까?")
        elif oder == '사진'.decode('utf-8') or oder == ' 사진'.decode('utf-8'):
            camera.camera()
        elif oder == '영상'.decode('utf-8') or oder == ' 영상'.decode('utf-8'):
            camera.vedio()
    elif state == 1:
        if streaming:
            MP.stop_now()
            
        streaming = True
        MP.play(oder)
def test_projected_point():
    cam = camera.camera((4.0, 3.0, 2.0), (0.0, 0.0, 0.0), fov=45.0)

    viewport = (0, 0, 200, 200)

    for point in itertools.product([-1.0, 1.0], repeat=3):
        projected = cam.project(point, viewport)
        assert 0 < projected[0] < 200 and 0 < projected[1] < 200
Exemplo n.º 18
0
def main():
    ls1_channel = 0
    ls2_channel = 1
    cam = camera.camera()
    ls1 = lightsensor.lightsensor(ls1_channel)
    ls2 = lightsensor.lightsensor(ls2_channel)
    dv = device.device(cam, ls1, ls2)
    dv.run()
Exemplo n.º 19
0
def test_move():
    cam = camera.camera((4.0, 3.0, 2.0), (0.0, 0.0, 0.0), fov=45.0)
    np.testing.assert_almost_equal(cam.position, (4.0, 3.0, 2.0))

    cam.move(0.0, 0.0, 3.0)
    np.testing.assert_almost_equal(cam.position, (4.0, 3.0, 5.0))

    cam.move(1.0, 0.0, 0.0)
    np.testing.assert_almost_equal(cam.position, (3.25, 2.44, 4.62), decimal=1)
Exemplo n.º 20
0
def colorize_plane(x_pixels, y_pixels, samples_per_pixel):
    camera_pos      = vector_3d([13.0, 2.0, 3.0])
    camera_looks_at = vector_3d([0.0, 0.0, 0.0])

    cam = camera(deg_fov      = 20,
                 aspect       = float(x_pixels)/float(y_pixels),
                 aperture     = 0.1,
                 focal_length = 10.0,
                 look_from    = camera_pos,
                 look_at      = camera_looks_at,
                 view_up      = vector_3d([0, 1, 0]))                           # y-axis-aligned
    ppm_file_lines = [ ]

    #
    # the ppm preamble
    #      P3
    #      640 480
    #      255
    #
    ppm_file_lines.append("P3")
    ppm_file_lines.append('{:d} {:d}'.format(x_pixels, y_pixels))
    ppm_file_lines.append("255")

    world_object_list = get_random_scene()

    # draw the plane
    for y in range(y_pixels - 1, 0, -1):
        for x in range(0, x_pixels):

            #
            # each pixel, has multiple samples. for each sample, we send the
            # rays, and then average the final color
            #
            point_color = vector_3d()
            for s in range(0, samples_per_pixel):
                u = float(x + random.random())/float(x_pixels)
                v = float(y + random.random())/float(y_pixels)

                r = cam.get_ray(u, v)
                point_color += colorize_world(r, world_object_list)
            # <end: for s in range(0, samples_per_pixel)

            # average the final color and apply gamma correction
            point_color /= float(samples_per_pixel)
            point_color  = vector_3d([math.sqrt(point_color.r()),
                                      math.sqrt(point_color.g()),
                                      math.sqrt(point_color.b())])
            # byte conversion
            point_color *= 255.99

            # dump it
            r, g, b = point_color.rgb_values()
            ppm_file_lines.append('{:d} {:d} {:d}'.format(r, g, b))

    return ppm_file_lines
Exemplo n.º 21
0
 def __init__(self, name):
     """
     Descript. :
     """
     GenericVideoDevice.__init__(self, name)
     camera.__init__(self)
     self.log = logging.getLogger("user_level_log")
     self.device = name
     self.camera = camera()
     self.width = 1360
     self.height = 1024
Exemplo n.º 22
0
 def __init__(self, name):
     """
     Descript. :
     """
     GenericVideoDevice.__init__(self, name)
     camera.__init__(self)
     self.log = logging.getLogger("user_level_log")
     self.device = name
     self.camera = camera()
     self.width = 1360
     self.height = 1024
Exemplo n.º 23
0
    def videoStream(self):
        # Video Feed
        self.layout = QGridLayout(self)
        view = camera.camera(ip.camera1)

        # set size of camera stream
        view.setMaximumHeight(500)
        view.setMaximumWidth(500)

        # Add widget to display
        self.layout.addWidget(view)
Exemplo n.º 24
0
def main(options):
    """
    Captures camera images according to options
    :param options:
    :return:
    """
    cam = camera.camera(options.wt, options.ht)
    while True:
        cam.capture_image()
        cam.show_image()
        cam.delay_camera(options.time)
Exemplo n.º 25
0
 def __init__(self,
              vertical_range,
              horizontal_range,
              number_of_rows,
              number_of_columns,
              scan_exposure_time,
              scan_start_angle=None,
              scan_range=0.01,
              image_nr_start=1,
              scan_axis='horizontal', # 'horizontal' or 'vertical'
              direction_inversion=True,
              method='md2', # possible methods: "md2", "helical"
              zoom=None, # by default use the current zoom
              name_pattern='grid_$id',
              directory='/nfs/ruchebis/spool/2016_Run3/orphaned_collects'): 
     
     self.goniometer = goniometer()
     self.detector = detector()
     self.camera = camera()
     self.guillotine = protective_cover()
     self.beam_center = beam_center()
     
     self.scan_axis = scan_axis
     self.method = method
     self.vertical_range = vertical_range
     self.horizontal_range = horizontal_range
     self.shape = numpy.array((number_of_rows, number_of_columns))
     self.number_of_rows = number_of_rows
     self.number_of_columns = number_of_columns
     
     self.frame_time = scan_exposure_time
     self.count_time = self.frame_time - self.detector.get_detector_readout_time()
     
     self.scan_start_angle = scan_start_angle
     self.scan_range = scan_range
     
     if self.scan_axis == 'horizontal':
         self.line_scan_time = self.frame_time * self.number_of_columns
         self.angle_per_frame = self.scan_range / self.number_of_columns
     else:
         self.line_scan_time = self.frame_time * self.number_of_rows
         self.angle_per_frame = self.scan_range / self.number_of_rows
     
     self.image_nr_start = image_nr_start
     
     self.direction_inversion = direction_inversion
     
     self.name_pattern = name_pattern
     self.directory = directory
     
     self.method = method
     self.zoom = zoom
     super(self, experiment).__init__()
def test_projected_point_after_move():
    cam = camera.camera((4.0, 3.0, 2.0), (0.0, 0.0, 0.0), fov=45.0)
    cam.move(0.0, 0.0, 10.0)

    viewport = (0, 0, 200, 200)

    in_viewport = 0
    for point in itertools.product([-1.0, 1.0], repeat=3):
        projected = cam.project(point, viewport)
        if 0 < projected[0] < 200 and 0 < projected[1] < 200:
            in_viewport += 1

    assert in_viewport == 0
Exemplo n.º 27
0
 def start(self, game_runner, player, pos):
     self.game_runner = game_runner
     self.player = player
     self.player.replace(pos)
     self.cam = camera.camera(self.player)
     self.cam.set_x_limit(
         -self.size_tiles[0] * entity.TILE_SIZE + size[0] - 200, 200)
     self.cam.set_y_limit(
         -self.size_tiles[1] * entity.TILE_SIZE + size[1] - 200, 200)
     self.cam.replace(-player.pos[0] + size[0] / 2,
                      -player.pos[1] + size[1] / 2)
     self.player.set_map(self)
     self.transition_type = TRANSITION_OPEN
Exemplo n.º 28
0
    def __init__(self, *args):
        """
        Description:
        """
        GenericDiffractometer.__init__(self, *args)

        # Hardware objects ----------------------------------------------------
        self.zoom_motor_hwobj = None
        self.omega_reference_motor = None
        self.centring_hwobj = None
        self.minikappa_correction_hwobj = None
        self.nclicks = None
        self.step = None
        self.centring_method = None
        self.collecting = False

        # Channels and commands -----------------------------------------------
        self.chan_calib_x = None
        self.chan_calib_y = None
        self.chan_current_phase = None
        self.chan_head_type = None
        self.chan_fast_shutter_is_open = None
        self.chan_state = None
        self.chan_status = None
        self.chan_sync_move_motors = None
        self.chan_scintillator_position = None
        self.chan_capillary_position = None
        self.cmd_start_set_phase = None
        self.cmd_start_auto_focus = None
        self.cmd_get_omega_scan_limits = None
        self.cmd_save_centring_positions = None
        self.centring_time = None
        # Internal values -----------------------------------------------------
        self.use_sc = False
        self.omega_reference_pos = [0, 0]
        self.reference_pos = [680, 512]

        self.goniometer = goniometer()
        self.camera = camera()
        self.detector = detector()

        self.md2_to_mxcube = dict(
            [(key, value) for key, value in self.motor_name_mapping]
        )
        self.mxcube_to_md2 = dict(
            [(value, key) for key, value in self.motor_name_mapping]
        )

        self.log = logging.getLogger("HWR")
Exemplo n.º 29
0
class motion:
    currentImage = Image.new('RGB',(150,150),"black")
    prevImage = Image.new('RGB',(150,150),"black")
    alarmEndPoint = '/entrance/?format=json'
    camera = camera()
    width = 0
    height = 0
    totalMovement = 0
    def __init__(self):
        self.setPrevImage()
        self.setWidthHeight()
    def movement(self):
        self.setCurrentImage()
        currentImagePixels = self.currentImage.load()
        prevImagePixels = self.prevImage.load()
        x=0
        y=0
        
        while y<self.height:
            while x<self.width:
                p1,p2,p3 = currentImagePixels[x,y]
                c1,c2,c3 = prevImagePixels[x,y]
                difference = (p1 - c1) + (p2 - c2) + (p3 - c3)
                if difference > 20:
                    self.totalMovement +=1
                x+=1
            x=0
            y+=1
        print self.totalMovement
        self.prevImage = self.currentImage 
        if self.totalMovement > 500:
            self.totalMovement = 0
            return True
        else:
            self.totalMovement = 0
            return False
           
    def setCurrentImage(self):
        self.camera.takePicture('currentImage.jpg','150','150')
        self.currentImage=Image.open("/home/pi/Desktop/currentImage.jpg")
    def setPrevImage(self):
        self.camera.takePicture('prevImage.jpg','150','150')
        self.prevImage = Image.open("/home/pi/Desktop/prevImage.jpg")
    def setWidthHeight(self):
        self.width,self.height = self.prevImage.size
    def registerMotionAlarm(self,webServer)
Exemplo n.º 30
0
 def __init__(self):
     '''
     Initialize required objects
     '''
     # Construct the camera captddure object
     from camera import camera
     self.cam = camera()
     # Using container to store images
     from container import dataContainer
     self.imgContainer = dataContainer()
     # Contruct a alarm object
     from alarm import alarm
     self.eventAlarm = alarm()
     # Construct the image processing strategy
     from strategy import strategyConstructor
     self.strategyConstruction = strategyConstructor(self.eventAlarm)
     # check if X11 DISPLAY exist
     self._isgui = self._checkGUI()
Exemplo n.º 31
0
def main():
    t0 = time.time()

    nx = 600
    ny = 300

    # world = [Sphere(vec3(0, 0, -1), 0.5, lambertian(0.5)), Sphere(vec3(0, -100.5, -1), 100, lambertian(0.5)), Sphere(vec3(0.75, 0, -1), 0.25, metal(0.5))]
    world = [
        Sphere(vec3(0, 0, -1), 0.5, lambertian(0.5)),
        Sphere(vec3(0, -100.5, -1), 100, lambertian(0.5))
    ]
    # world = [Sphere(vec3(0, -100.5, -1), 100, lambertian(0.5))]
    # Build array of vectors defined on a normalized plane
    # aspect ratio
    # ratio = float(nx) / float(ny)
    # normalized range
    S = (0., 1., 1., 0.)
    # linearly step through each xy pixel and create vector position
    npx = np.tile(np.linspace(S[0], S[2], nx), ny)
    npy = np.repeat(np.linspace(S[1], S[3], ny), nx)
    npz = np.repeat(0.0, (nx * ny))

    origin = vec3(0.0, 0.0, 0.0)
    color = vec3(0, 0, 0)
    cam = camera()
    # print(test)

    Q = vec3(npx, npy, npz)
    rdir = Q - origin

    ns = 1
    for s in range(ns):
        u = rdir.x + (random.random() / float(nx))
        v = rdir.y + (random.random() / float(ny))
        r = cam.get_ray(u, v)
        # p = r.point_at_parameter(2.0)
        color += raytrace(r, world, 0)

    color = color / vec3(ns, ns, ns)
    # color = vec3(np.sqrt(color.x), np.sqrt(color.y), np.sqrt(color.z))
    print("Took %s" % (time.time() - t0))

    output_image(color, nx, ny)
Exemplo n.º 32
0
 def __init__(self, gpioDict):
     #check interval
     self.filePath = ''
     self.checkInt = 3
     #warning everything 30 minutes
     minutes = 120
     self.warning = minutes * 60
     #set initial time
     self.time = 0
     self.cam = camera()
     #set position to closed
     self.shut = True
     try:
         self.email = email()
     except EmailError:
         sys.exit()
     self.messages = self.__message()
     self.gpioDict = gpioDict
     self.stateDict = dict()
     self.initializedGpio()
Exemplo n.º 33
0
def main(options):
	#This program capture one image each 1 second

	cam=camera.camera(options.wt,options.ht)
	cam.capture_image()
	anterior_image=cam.actual_image

	while True:
		cam.capture_image()
		cam.save_image('./prof1.png')
		fs.save_image('./prof2.png',anterior_image)
		anterior_image=cam.actual_image

		imr=fs.differences_images('./prof1.png','./prof2.png')
		fs.save_cv2_image('./result.png',imr)
		imr = pygame.image.load('./result.png')
		cam.image_show=imr
		cam.show_image()

		#cam.show_image()
		cam.delay_camera(options.time)
Exemplo n.º 34
0
    def __init__(self,
                 name_pattern,
                 directory,
                 step=36,
                 orientations=[],
                 analysis=True,
                 conclusion=True):

        experiment.__init__(self,
                            name_pattern,
                            directory,
                            analysis=analysis,
                            conclusion=conclusion)
        
        self.goniometer = goniometer()
        self.camera = camera()
        
        self.step = step
        self.orientations = orientations
        self.observations = []
        self.observe = False
Exemplo n.º 35
0
    def __init__(self, renderer, size):
        self.geometry = geometry.geometry()

        self.renderer = renderer

        self.camera = camera.camera(size[0], size[1])

        self.player = player.player(self.geometry)

        self.mouse_screen_x = 0
        self.mouse_screen_y = 0

        self.mouse_x = 0
        self.mouse_y = 0

        self.ghost = player.player(self.geometry, True)
        self.ghost_data_store = []
        self.ghost_data_replay = []
        self.ghost_replay_index = 0
        
        self.font = sdl2hl.ttf.Font(resource_string(__name__, 'res/font/LiberationSans-Regular.ttf'), 24)
    def __init__(self, auto=True):
        # initialize car object
        self.myCar = car()
        # initialize camera
        self.myCamera = camera()
        # initialize global variables
        vars.init()

        # get ready
        time.sleep(2)

        print("myCar and myCamera startup completed")

        if auto:
            # auto drive
            print("autoDrive mode")
            self.autoDrive()

        else:
            # manual drive
            print("manualDrive mode")
            self.manualDrive()
Exemplo n.º 37
0
def defineParameters(
        camResolution,
        camFocalLength,
        camSensorSize,
        beacons,
        tc,
        qe,
        lambdaBinSize,
        effectiveArea,
        darkCurrent,
        readSTD,
        binSize,
        maxBinDepth,
        psfSTD,
        simTimeStep
):
    """
    Generates formatted inputs for camera, image processing, and navigation modules
    :params: camResolution      : (horizontal x vertical) camera resolution
    :params: camFocalLength     : camera focal length [m]
    :params: camSensorSize      : (horizontal x vertical) camera sensor size [m]
    :params: beacons            : N length list of beacon objects
    :params: tc                 : transmission curve dictionary (see SERs 4.3/4.3b)
    :params: qe                 : transmission curve dictionary (see SERs 4.3/4.3b)
    :params: lambdaBinSize      : bin size for lambda functions [nm]
    :params: effectiveArea      : effective area of camera [m^2]
    :params: darkCurrent        : dark current [electrons/s/pixel]
    :params: readSTD            : standard deviation of read noise [electrons/pixel]
    :params: binSize            : bin size [DN]
    :params: maxBinDepth        : saturation depth [DN]
    :params: psfSTD             : point spred funtion standard deviation [pixels]
    :params: simTimeStep        : simulation time step [s]

    :return: camInputs          : list of inputs for camera module
    :return: ipInputs           : list of inputs for image processing
    :return: navInputs          : lsit of inputs for navigation module
    """

    beaconIDs = []
    beaconRadius = []
    for each in beacons:
        beaconIDs.append(each.id)
        beaconRadius.append(each.r_eq)

    # init values for camera that will be set later.
    scState = -1
    scDCM = -1
    takeImage = 0

    cam = camera.camera(
        camSensorSize[0],  # detector width in m
        camSensorSize[1],  # detector height in m
        camFocalLength,  # focal lenght in m
        camResolution[0],  # detector resolution (width direction)
        camResolution[1],  # detector resolution (height direction)
        np.identity(3),  # body2cameraDCM
        1000,  # maximum magnitude (for debugging)
        -1000,  # minimum magnitude (for debugging)
        qe,  # quantum efficiency dictionary
        tc,  # transmission curve dictionary
        lambdaBinSize,  # lambda bin size
        effectiveArea,  # effective area in m^2
        darkCurrent,  # dark current in electrons per second
        readSTD,  # std for read noise in electrons
        binSize,  # bin size
        maxBinDepth,  # max bin depth
        psfSTD,  # std for psf
        simTimeStep,  # simulation timestep
        scState,  # position state of s/c
        scDCM,  # intertal 2 body DCM for s/c
        beacons,  # bodies to track in images
        takeImage,  # takeImage message
        debug={
            'addStars': 0, 'rmOcc': 1, 'addBod': 1, 'psf': 1,
            'raster': 1, 'photon': 1, 'dark': 1, 'read': 1,
            'verbose': 1, 'hotDark': 1},
        db='../dinoModels/SimCode/opnavCamera/db/tycho.db'  # stellar database
    )

    # Camera Module Parameter Creation

    camInputs = cam

    # Image Processing Module Parameter Creation
    ipCamParam = {}
    ipCamParam['resolution'] = (cam.resolutionWidth, cam.resolutionHeight)
    ipCamParam['focal length'] = cam.focalLength
    ipCamParam['sensor size'] = (cam.detectorWidth, cam.detectorHeight)
    ipCamParam['pixel size'] = (
        cam.detectorWidth / cam.resolutionWidth,
        cam.detectorHeight / cam.resolutionHeight)
    ipCamParam['field of view'] = (
        cam.angularWidth,
        cam.angularHeight)
    ipInputs = [ipCamParam, beaconIDs, beaconRadius]

    # Nav Module Parameter Creation

    navInputs = {}

    # SPICE Parameters

    # basic .bsp filename (generic, such as de430, etc)
    navInputs['basic_bsp'] = 'de430.bsp'
    # .bsp filename for mission
    navInputs['mission_bsp'] = 'DINO_kernel.bsp'
    # .tls filename
    navInputs['tls'] = 'naif0011.tls'
    # abcorr for spkzer
    navInputs['abcorr'] = 'NONE'
    # reference frame
    navInputs['ref_frame'] = 'J2000'

    # Force Parameters

    #   Gravity
    # body vector for primary and secondary gravitational bodies
    navInputs['bodies'] = ['SUN', '3', '399']
    # specify primary and secondary indices
    navInputs['primary'] = 0
    navInputs['secondary'] = [1, 2]
    # respective GP vector
    navInputs['mu'] = [1.32712428 * 10 ** 11, 3.986004415 * 10 ** 5, 4.305 * 10 ** 4]
    #   SRP
    # A/M ratio multiplied by solar pressure constant at 1 AU with adjustments
    # Turboprop document Eq (64)
    navInputs['SRP'] = 0.3 ** 2 / 14. * 149597870. ** 2 * 1358. / 299792458. / 1000.
    # coefficient of reflectivity
    navInputs['cR'] = 1.

    # Camera/P&L Parameters

    # Focal Length (mm)
    navInputs['FoL'] = ipCamParam['focal length']
    # default inertial to camera transformation matrices
    navInputs['DCM_BI'] = np.eye(3)
    navInputs['DCM_TVB'] = np.eye(3)
    # Camera resolution (pixels)
    navInputs['resolution'] = [cam.resolutionWidth, cam.resolutionHeight]
    # width and height of pixels in camera. convert from meters to mm
    navInputs['pixel_width'] = ipCamParam['pixel size'][0] * 10 ** 3
    navInputs['pixel_height'] = ipCamParam['pixel size'][1] * 10 ** 3
    # direction coefficient of pixel and line axes
    navInputs['pixel_direction'] = 1.
    navInputs['line_direction'] = 1.

    # Add anomaly detection parameters
    navInputs['anomaly'] = False
    navInputs['anomaly_num'] = 0
    navInputs['anomaly_threshold'] = 4

    # plotting? 'ON' or 'OFF'
    navInputs['nav plots'] = 'ON'

    return camInputs, ipInputs, navInputs
Exemplo n.º 38
0
def test_4_15_calculate_hot_dark():
	msg = {
	'addStars': 0,'rmOcc': 0, 'addBod': 0, 'psf': 1, 
	'raster': 1, 'photon': 0, 'dark': 1, 'read': 0, 
	'hotDark': 1, 'verbose': 1}

	init_hot_dark = np.ones( (1, 262144) )

	########################################
	## 	Initialize Cameras
	########################################
	cam1 = camera.camera(
		2.0,            # detector_height
		2.0,            # detector_width
		5.0,            # focal_length
		512,            # resolution_height
		512,            # resolution_width
		np.identity(3), # body2cameraDCM
		1000,           # maximum magnitude
		-1000,          # minimum magnitude (for debugging)
		qe,
		tc,
		1,
		0.01**2, 		# effective area in m^2
		100,
		100,
		1, 
		2**32,
		1,
		0.01,
		scState,
		scDCM,
		bodies,
		takeImage,
		# init_hot_dark,	# hot_dark_cam1
		debug = msg,
		db='../db/tycho.db'
		)

	cam2= camera.camera(
		3.0,
		4.0,
		7.0,
		512,
		512,
		np.identity(3),
		1000,
		-1000,
		qe,
		tc,
		1,
		0.01**2, 		# effective area in m^2
		100,
		100,
		1, 
		2**32,
		1,
		0.01,
		scState,
		scDCM,
		bodies,
		takeImage,
		# init_hot_dark,	# hot_dark_cam1
		debug = msg,
		db='../db/tycho.db'
		)

	cam3 = camera.camera(
		6.0,
		4.0,
		7.0,
		512,
		512,
		np.identity(3),
		1000,
		-1000,
		qe,
		tc,
		1,
		0.01**2, 		# effective area in m^2
		100,
		100,
		1, 
		2**32,
		1,
		0.01,
		scState,
		scDCM,
		bodies,
		takeImage,
		# init_hot_dark,	# hot_dark_cam1
		debug = msg,
		db='../db/tycho.db'
		)
	
	cam1.hotDarkSigma = 1
	cam2.hotDarkSigma = 1
	cam3.hotDarkSigma = 1

########################################
## 	Take Two Images without Hot_dark
########################################
	msg['hotDark'] = 0
# 	First Image
	cam1.takeImage = 1
	cam2.takeImage = 1
	cam3.takeImage = 1
	cam1.updateState()
	cam2.updateState()
	cam3.updateState()
	cam1.takeImage = 0
	cam2.takeImage = 0
	cam3.takeImage = 0
	cam1.updateState()
	cam2.updateState()
	cam3.updateState()

# 	Second Image
	cam1.takeImage = 1
	cam2.takeImage = 1
	cam3.takeImage = 1
	cam1.updateState()
	cam2.updateState()
	cam3.updateState()
	cam1.takeImage = 0
	cam2.takeImage = 0
	cam3.takeImage = 0
	cam1.updateState()
	cam2.updateState()
	cam3.updateState()
# ########################################
# ## 	Take Two Images with Hot_Dark
# ########################################
	msg['hotDark'] = 1
# 	Image Three (+ Hot_Dark)
	cam1.takeImage = 1
	cam2.takeImage = 1
	cam3.takeImage = 1
	cam1.updateState()
	cam2.updateState()
	cam3.updateState()
	cam1.takeImage = 0
	cam2.takeImage = 0
	cam3.takeImage = 0
	cam1.updateState()
	cam2.updateState()
	cam3.updateState()
# 	Image Four (+ Hot_Dark)
	cam1.takeImage = 1
	cam2.takeImage = 1
	cam3.takeImage = 1
	cam1.updateState()
	cam2.updateState()
	cam3.updateState()
	cam1.takeImage = 0
	cam2.takeImage = 0
	cam3.takeImage = 0
	cam1.updateState()
	cam2.updateState()
	cam3.updateState()

########################################
## 	Camera 1 Properties
########################################
	# Image 1
	cam1_img1 = cam1.images[0].detectorArray
	cam11_mean = np.mean(cam1_img1)
	cam11_var  = np.var(cam1_img1)
	# Image 2
	cam1_img2 = cam1.images[1].detectorArray
	cam12_mean = np.mean(cam1_img2)
	cam12_var  = np.var(cam1_img2)
	# Image 3
	cam1_img3 = cam1.images[2].detectorArray
	cam13_mean = np.mean(cam1_img3)
	cam13_var = np.var(cam1_img3)
	# Image 4
	cam1_img4 = cam1.images[3].detectorArray
	cam14_mean = np.mean(cam1_img4)
	cam14_var = np.var(cam1_img4)

	# Images without Hot_Dark should be different from those with Hot_dark
	assert ( cam1.images[0].detectorArray.any != cam1.images[2].detectorArray.any )
	assert ( cam1.images[1].detectorArray.any != cam1.images[3].detectorArray.any )
	assert ( cam2.images[0].detectorArray.any != cam2.images[2].detectorArray.any )
	assert ( cam2.images[1].detectorArray.any != cam2.images[3].detectorArray.any )
	assert ( cam3.images[0].detectorArray.any != cam3.images[2].detectorArray.any )
	assert ( cam3.images[1].detectorArray.any != cam3.images[3].detectorArray.any )


 	# First Two Images from each camera should have the same MEAN & VARIANCE since NO Hot-Dark Pixels added
	assert ( abs(np.mean(cam1.images[0].detectorArray) - np.mean(cam1.images[1].detectorArray) ) <= 1e-12 )
	assert ( abs(np.mean(cam2.images[0].detectorArray) - np.mean(cam2.images[1].detectorArray) ) <= 1e-12 )
	assert ( abs(np.mean(cam3.images[0].detectorArray) - np.mean(cam3.images[1].detectorArray) ) <= 1e-12 )

	assert ( abs(np.std(cam1.images[0].detectorArray) - np.std(cam1.images[1].detectorArray) ) <= 1e-12 )
	assert ( abs(np.std(cam2.images[0].detectorArray) - np.std(cam2.images[1].detectorArray) ) <= 1e-12 )
	assert ( abs(np.std(cam3.images[0].detectorArray) - np.std(cam3.images[1].detectorArray) ) <= 1e-12 )

	# Third Image MEAN & VARIANCE should be different from First Image MEAN & VARIANCE
	assert ( abs(np.mean(cam1.images[2].detectorArray) - np.mean(cam1.images[0].detectorArray) ) >= 0.1 )
	assert ( abs(np.mean(cam2.images[2].detectorArray) - np.mean(cam2.images[0].detectorArray) ) >= 0.1 )
	assert ( abs(np.mean(cam3.images[2].detectorArray) - np.mean(cam3.images[0].detectorArray) ) >= 0.1 )

	assert ( abs(np.std(cam1.images[2].detectorArray) - np.std(cam1.images[0].detectorArray) ) >= 0.1 )
	assert ( abs(np.std(cam2.images[2].detectorArray) - np.std(cam2.images[0].detectorArray) ) >= 0.1 )
	assert ( abs(np.std(cam3.images[2].detectorArray) - np.std(cam3.images[0].detectorArray) ) >= 0.1 )


	# # Fourth Image MEAN & VARIANCE should be different from SECOND Image MEAN & VARIANCE
	assert ( abs(np.mean(cam1.images[3].detectorArray) - np.mean(cam1.images[1].detectorArray) ) >= 0.1 )
	assert ( abs(np.mean(cam2.images[3].detectorArray) - np.mean(cam2.images[1].detectorArray) ) >= 0.1 )
	assert ( abs(np.mean(cam3.images[3].detectorArray) - np.mean(cam3.images[1].detectorArray) ) >= 0.1 )

	assert ( abs(np.std(cam1.images[3].detectorArray) - np.std(cam1.images[1].detectorArray) ) >= 0.1 )
	assert ( abs(np.std(cam2.images[3].detectorArray) - np.std(cam2.images[1].detectorArray) ) >= 0.1 )
	assert ( abs(np.std(cam3.images[3].detectorArray) - np.std(cam3.images[1].detectorArray) ) >= 0.1 )
Exemplo n.º 39
0
def test_4_8_findStarsInFOV():
	msg = { 
		'addStars': 1,'rmOcc': 0, 'addBod': 0, 'psf': 1, 
		'raster': 1, 'photon': 0, 'dark': 0, 
		'hotDark': 0,'read': 0, 'verbose': 1}

	OriCam = camera.camera(
		1.5,				#detectorHeight
		1.5, 				#detectorWidth
		3.0, 				#focalLength
		512, 				#resolutionHeight
		512,				#resolutionWidth
		np.identity(3), 	#body2cameraDCM
		3.57,		   	 	#maximum magnitude
		-1000,				#minimum magnitude (for debugging)
		qe,					#quantum efficiency dictionary
		tc,					#transmission curve dictionary
		1,					#wavelength bin size in nm
		0.01**2, 			#effective area in m^2
		100, 				#dark current in electrons per second
		100, 				#std for read noise in electrons
		100, 				#bin size
		2**32, 				#max bin depth
		1,					#sigma for gaussian psf
		0.01,				#integration timestep
		scState,			#position state of s/c
		scDCM,				#intertal 2 body DCM for s/c
		bodies,				#bodies to track in images
		takeImage,			#takeImage message
		debug=msg,			#debug message
		db='../db/tycho.db'	#stellar database
		)
	OriCam.scDCM = Euler321_2DCM(
		np.deg2rad(85),
		np.deg2rad(0),
		np.deg2rad(0)
		)
	
	OriCam.takeImage = 1
	OriCam.updateState()
	OriCam.takeImage = 0
	OriCam.updateState()


	UMiCam = camera.camera(
		2.3,				#detectorHeight
		2.3,				#detectorWidth
		4.0, 				#focalLength
		512, 				#resolutionHeight
		512,				#resolutionWidth
		np.identity(3), 	#body2cameraDCM
		4,			   	 	#maximum magnitude
		-1000,				#minimum magnitude (for debugging)
		qe,					#quantum efficiency dictionary
		tc,					#transmission curve dictionary
		1,					#wavelength bin size in nm
		0.01**2, 			#effective area in m^2
		100, 				#dark current in electrons per second
		100, 				#std for read noise in electrons
		100, 				#bin size
		2**32, 				#max bin depth
		1,					#sigma for gaussian psf
		0.01,				#integration timestep
		scState,			#position state of s/c
		scDCM,				#intertal 2 body DCM for s/c
		bodies,				#bodies to track in images
		takeImage,			#takeImage message
		debug=msg,			#debug message
		db='../db/tycho.db'	#stellar database
		)
	UMiCam.scDCM = Euler321_2DCM(
		np.deg2rad(187),
		np.deg2rad(-59),
		np.deg2rad(0)
		)

	UMiCam.takeImage = 1
	UMiCam.updateState()
	UMiCam.takeImage = 0
	UMiCam.updateState()


	plot = 1

	if plot:
		plt.figure()
		plt.imshow(OriCam.images[0].detectorArray.reshape(
			OriCam.resolutionHeight,
			OriCam.resolutionWidth
			))
		plt.figure()
		plt.imshow(UMiCam.images[0].detectorArray.reshape(
			UMiCam.resolutionHeight,
			UMiCam.resolutionWidth
			))

		plt.figure()
		plt.plot(OriCam.images[0].scenes[0].pixel,OriCam.images[0].scenes[0].line,'.')
		plt.xlim(0,OriCam.resolutionWidth)
		plt.ylim(0,OriCam.resolutionHeight)
		plt.gca().invert_yaxis()
		plt.axis('equal')
		plt.figure()
		plt.plot(UMiCam.images[0].scenes[0].pixel,UMiCam.images[0].scenes[0].line,'.')
		plt.xlim(0,UMiCam.resolutionWidth)
		plt.ylim(0,UMiCam.resolutionHeight)
		plt.gca().invert_yaxis()
		plt.axis('equal')

	savedData = np.load('camera_test_support_files/4.8.test_support.npy')
	savedUMiRA = savedData[0]
	savedUMiDE = savedData[1]
	savedOriRA = savedData[2]
	savedOriDE = savedData[3]

	assert(np.array_equal(UMiCam.images[0].RA,savedUMiRA))
	assert(np.array_equal(UMiCam.images[0].DE,savedUMiDE))
	assert(np.array_equal(OriCam.images[0].RA,savedOriRA))
	assert(np.array_equal(OriCam.images[0].DE,savedOriDE))
Exemplo n.º 40
0
#create camera with no stars in it for tests that don't need them
#They will run significantly faster without them.
noStarCam = camera.camera(
	2, 					#detectorHeight
	2, 					#detectorWidth
	5.0, 				#focalLength
	512, 				#resolutionHeight
	512,				#resolutionWidth
	np.identity(3), 	#body2cameraDCM
	1000,		   	 	#maximum magnitude
	-1000,				#minimum magnitude (for debugging)
	qe,					#quantum efficiency dictionary
	tc,					#transmission curve dictionary
	1,					#wavelength bin size in nm
	0.01**2, 			#effective area in m^2
	100, 				#dark current in electrons per second
	100, 				#std for read noise in electrons
	100, 				#bin size
	2**32, 				#max bin depth
	1,					#sigma for gaussian psf
	0.01, 				#simulation timestep
	scState,			#position state of s/c
	scDCM,				#intertal 2 body DCM for s/c
	bodies,				#bodies to track in images
	takeImage,			#takeImage message
	debug=msg,			#debug message
	db='../db/tycho.db'	#stellar database
	)

# #now create a camera with stars in it for use in the tests that
# #actually need them.
Exemplo n.º 41
0
	def __init__(self):
		super(film, self).__init__()
		camera = camera()
		goniometer = goniometer()
Exemplo n.º 42
0
def main():
    '''Headmouse main loop'''
    config = get_config()

    # configure logging manually so we can use runtime config settings
    log_level = [logging.ERROR, logging.WARN, logging.INFO, logging.DEBUG][config['verbosity']]

    # must get root logger, set its level, attach handler, and set its level
    root_logger = logging.getLogger('')
    console = logging.StreamHandler()

    root_logger.setLevel(log_level)
    console.setLevel(log_level)

    root_logger.addHandler(console)

    # output driver setup
    # TODO: restrict loadable generaton functions for security
    # TODO: driver loading system that doesn't depend on __main__ - breaks cProfile
    output_driver = sys.modules[__name__].__dict__[config['output']](config=config)

    # signal proc chain setup
    velocity_gen = filters.relative_movement()
    sub_pix_gen = filters.sub_pix_trunc()
    stateful_smooth_gen = filters.stateful_smoother()
    input_smoother_gen = filters.ema_smoother(config['smoothing'])
    #slow_smoother_gen = filters.slow_smoother(.6)
    acceleration_gen = filters.accelerate_exp(
        p=ACCELERATION_EXPONENT,
        accel=config['acceleration'], 
        sensitivity=config['sensitivity']
        )

    output_smoother = filters.ema_smoother(config['output_smoothing'])

    # input driver setup
    camera.visualize = config['input_visualize']

    fps_stats = util.Stats(util.Stats.inverse_normalized_interval_delta, "Average frame rate {:.0f} fps", 10)
    with camera.camera(
            tracker_name=config['input_tracker'],
            camera_id=config['input_camera_name'],
            resolution=config['input_camera_resolution'],
            fps=config['input_camera_fps'],
            realtime_search_timeout=config['input_realtime_search_timeout'],
            slow_search_delay=config['input_slow_search_delay']
        ) as input_source:
        # main loop
        for x, y, distance in input_source:
            fps_stats.push(time.time())

            # Capture frame-by-frame

            ### Filter Section ###
            # take absolute position return relative position
            v = velocity_gen.send((x, y))
            v = filters.killOutliers(v, OUTLIER_VELOCITY_THRESHOLD)

            if config['distance_scaling']:
                dx, dy = v
                v = dx * distance, dy * distance

            #v = slow_smoother_gen.send((v, 6))
            v = input_smoother_gen.send(v)
            v = acceleration_gen.send(v)
            #v = filters.accelerate(v)

            dx, dy = sub_pix_gen.send(v)

            # mirror motion on x-axis
            dx *= -1

            dx, dy = output_smoother.send((dx, dy))

            output_driver.send((dx,dy))

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
    return 0
Exemplo n.º 43
0
 def setUp(self):
     '''Create an instance of the camera object.'''
     self.my_camera = camera()
Exemplo n.º 44
0
    def __init__(self, modelName, modelActive=True, modelPriority=-1):
        super(opnavCamera, self).__init__(modelName, modelActive, modelPriority)
        ## Input guidance structure message name
        self.scStateInputMsgName = "scStateInputMsgName"
        ## Output body torque message name
        self.outputMsgName = "opnavCameraOutputMsg"
        ## Input message ID (initialized to -1 to break messaging if unset)
        self.scStateInputMsgID = -1
        ## Output message ID (initialized to -1 to break messaging if unset)
        self.outputMsgID = -1
        import pdb
        pdb.set_trace()
        # Output Lr torque structure instantiation.  Note that this creates the whole class for interation
        # with the messaging system
        #self.outputMsgData = spice_interface.OutputMsgStruct()

        # Input vehicle configuration structure instantiation.  Note that this creates the whole class for interation
        # with the messaging system
        self.SCstate = spacecraftPlus.SCPlusStatesSimMsg()

        #self.inputMsgData = other_module.OutputMsgStruct()

        #load tranmission curve for Canon 20D
        _20D = np.load('../dinoModels/SimCode/opnavCamera/tc/20D.npz')
        tc = {}
        tc['lambda'] = _20D['x']
        tc['throughput'] = _20D['y']

        #load QE curve for Hubble Space Telecope Advanced Camera for Surveys SITe CCD
        ACS = np.load('../dinoModels/SimCode/opnavCamera/qe/ACS.npz')
        qe = {}
        qe['lambda'] = ACS['x']
        qe['throughput'] = ACS['y']

        scState = np.array([au/1000-100000,0,0,0,0,0])
        scDCM = np.identity(3)
        bod.earth.state = np.array([au/1000, 0,0,0,0,0])
        bod.luna.state = np.array([au/1000, 250000,0,0,0,0])
        takeImage = 0
        bodies = [bod.earth, bod.luna]

        self.cam = camera.camera(
            2,                  #detectorHeight
            2,                  #detectorWidth
            5.0,                #focalLength
            512,                #resolutionHeight
            512,                #resolutionWidth
            np.identity(3),     #body2cameraDCM
            1000,               #maximum magnitude
            -1000,              #minimum magnitude (for debugging)
            qe,                 #quantum efficiency dictionary
            tc,                 #transmission curve dictionary
            1,                  #wavelength bin size in nm
            0.01**2,            #effective area in m^2
            100,                #dark current in electrons per second
            100,                #std for read noise in electrons
            100,                #bin size
            2**32,              #max bin depth
            1,                  #sigma for gaussian psf
            0.001,               #simulation timestep
            scState,            #position state of s/c
            scDCM,              #intertal 2 body DCM for s/c
            bodies,             #bodies to track in images
            takeImage,          #takeImage message
            db='../dinoModels/SimCode/opnavCamera/db/tycho.db'
            )
Exemplo n.º 45
0
	sc
	], 
	'addStars': 1,'rmOcc': 1, 'addBod': 1, 'psf': 1, 
	'raster': 1, 'photon':1, 'dark': 1, 'read': 1, 'dt': 0.01}

cam = camera.camera(
	0.019968, 				#detector_height
	0.019968, 				#detector_width
	0.05, 			#focal_length
	512, 			#resolution_height
	512,			#resolution_width
	np.identity(3), #body2cameraDCM
	1000,		    #maximum magnitude
	-1000,			#minimum magnitude (for debugging)
	qe,
	tc,
	1,
	0.01**2, #effective area in m^2
	100, #dark current in electrons per second
	100, #std for read noise in electrons
	1000, #bin size
	2**32, #max bin depth
	1,
	sc,
	msg
	)

starCam = camera.camera(
	0.019968, 				#detector_height
	0.019968, 				#detector_width
	0.05, 			#focal_length
Exemplo n.º 46
0
    def run(self):
        logger.info("clouds is starting up")
        self.camera = camera.camera()
        self.light_sensor = photo_sensor.photo_sensor()
        day = 15
        night = 15 # we will make this the same eventually, still testing it
        buffer = day
        day_time = True
        time_lapse_length = 1000
        minute = 60
        
        '''
        performing book keeping when the script starts
        
        we might want to keep track of the last time stamp that was generated by the script
        this is left behind by my screen logger program but might be useful and doesn't do any harm
        '''
        try: #try to read last temp stamp
            #open up timestamp file for graphing
            timestamps = open('timestamps','a')
        except:
            logger.error('failed to open timestamps file')

        while True:
            #time.sleep(buffer)
            timestamp = int(time.time())
            # if it is the night time, increase exposure time to maximum allowed by software and try and get start
            if day_time != True:
                self.camera.capture('/var/lib/clouds/images/{}.png'.format(timestamp))
                #logger.info('night vision capture')
            elif day_time == True:     
                self.camera.capture('/var/lib/clouds/images/{}.png'.format(timestamp))
                #logger.info('day capture')
            timestamps.write(str(timestamp)+'\n')
            #copy file to webroot

            if timestamp %minute <= 5: # update once every minute
                #logger.info('updating')

                for number in range(3):
                    os.system("cp /var/www/html/img/{}.png /var/www/html/img/{}.png".format(number+1,number))
                os.system("rm /var/www/html/img/0.png")
                im = Image.open('/var/lib/clouds/images/{}.png'.format(timestamp))
                im.thumbnail((600,400), Image.ANTIALIAS)
                im.save('/var/www/html/img/3.png', "PNG")

            light_value = self.light_sensor.get_value()
            if light_value > 8500 and day_time == True: #gray_sum/count < day_threshold and camera.iso != 800 :
                self.camera.set_preset('night')
                logger.info('going dark')
                buffer = night
                day_time = False
            elif light_value <= 8500 and day_time == False:
                self.camera.set_preset('day')
                logger.info('good day')
                buffer = day
                day_time = True



            '''
            Adding a datetime stamp

            We conver the timestamp of the image to a datetime string and print it onto the bottom right corner of the image for the final time lapse
            '''
            img = Image.open('/var/lib/clouds/images/{}.png'.format(timestamp))
            draw = ImageDraw.Draw(img)
            font = ImageFont.truetype('/var/lib/clouds/Aileron-Regular.otf', 30)
            draw.text((1550, 1010),datetime.datetime.fromtimestamp(int(timestamp)).strftime('%Y/%m/%d %I:%M:%S %p'),(255,50,50),font=font)
            img.save('/var/lib/clouds/images/{}.png'.format(timestamp))


            '''
            some final book keeping
            
            we're simply persisting the time stamp to keep things consistant
            '''    
            
            #check if enough pictures have accumulated
            
            
            '''
            Perform video export and upload if required
            
            Check if enough frames have been captured and then call export function
            '''
            
            if self.getscreencount() >= time_lapse_length:
                self.export()
Exemplo n.º 47
0
import camera
import time

if __name__ == "__main__":
    camera_object = camera.camera()
    camera_object.start_detection()
    while True:
        time.sleep(1)
Exemplo n.º 48
0
def test_calculate_hot_dark():
	########################################
	## 	Initialize Cameras
	########################################
	cam1 = camera.camera(
        2.0,            # detector_height
        2.0,            # detector_width
        5.0,            # focal_length
        512,            # resolution_height
        512,            # resolution_width
        np.identity(3), # body2cameraDCM
        1000,           # maximum magnitude
        -1000,          # minimum magnitude (for debugging)
        qe,
        tc,
        1,
        0.01**2, 		# effective area in m^2
        100, 			# dark current in electrons per second
        100, 			# std for read noise in electrons
		init_hot_dark,	# hot_dark_cam1
        sc,
        msg,
        db='../db/tycho.db'
        )

    	cam2= camera.camera(
 		3.0,
        4.0,
        7.0,
        512,
        512,
        np.identity(3),
        1000,
        -1000,
        qe,
        tc,
        1,
        0.01**2,
		100, 			# dark current in electrons per second
		100, 			# std for read noise in electrons
		init_hot_dark,  # hot_dark_cam2,
		sc,
        msg,
        db='../db/tycho.db'
        )

        cam3 = camera.camera(
        6.0,
        4.0,
        7.0,
        512,
        512,
        np.identity(3),
        1000,
        -1000,
        qe,
        tc,
        1,
        0.01**2,
        100, 			# dark current in electrons per second
        100, 			# std for read noise in electrons
		init_hot_dark,  # hot_dark_cam3,
        sc,
        msg,
        db='../db/tycho.db'
        )

########################################
## 	Take Two Images without Hot_dark
########################################
	msg['hot_dark'] = 0
# 	First Image
	msg['take_image'] = 1
	cam1.update_state()
	cam2.update_state()
	cam3.update_state()
	msg['take_image'] = 0
	cam1.update_state()
	cam2.update_state()
	cam3.update_state()
# 	Second Image
	msg['take_image'] = 1
	cam1.update_state()
	cam2.update_state()
	cam3.update_state()
	msg['take_image'] = 0
	cam1.update_state()
	cam2.update_state()
	cam3.update_state()
########################################
## 	Take Two Images with Hot_Dark
########################################
	msg['hot_dark'] = 1
# 	Image Three (+ Hot_Dark)
	msg['take_image'] = 1
	cam1.update_state()
	cam2.update_state()
	cam3.update_state()
	msg['take_image'] = 0
	cam1.update_state()
	cam2.update_state()
	cam3.update_state()
# 	Image Four (+ Hot_Dark)
	msg['take_image'] = 1
	cam1.update_state()
	cam2.update_state()
	cam3.update_state()
	msg['take_image'] = 0
	cam1.update_state()
	cam2.update_state()
	cam3.update_state()

########################################
## 	Camera 1 Properties
########################################
	# Image 1
	cam1_img1 = cam1.images[0].detector_array
	cam11_mean = np.mean(cam1_img1)
	cam11_var  = np.var(cam1_img1)
	# Image 2
	cam1_img2 = cam1.images[1].detector_array
	cam12_mean = np.mean(cam1_img2)
	cam12_var  = np.var(cam1_img2)
	# Image 3
	cam1_img3 = cam1.images[2].detector_array
	cam13_mean = np.mean(cam1_img3)
	cam13_var = np.var(cam1_img3)
	# Image 4
	cam1_img4 = cam1.images[3].detector_array
	cam14_mean = np.mean(cam1_img4)
	cam14_var = np.var(cam1_img4)

	pdb.set_trace()
	# Images without Hot_Dark should be different from those with Hot_dark
	assert ( cam1.images[0].detector_array.any != cam1.images[2].detector_array.any )
	assert ( cam1.images[1].detector_array.any != cam1.images[3].detector_array.any )

	assert ( cam2.images[0].detector_array.any != cam2.images[2].detector_array.any )
	assert ( cam2.images[1].detector_array.any != cam2.images[3].detector_array.any )

	assert ( cam3.images[0].detector_array.any != cam3.images[2].detector_array.any )
	assert ( cam3.images[1].detector_array.any != cam3.images[3].detector_array.any )


	# First Two Images from each camera should have the same MEAN & VARIANCE since NO Hot-Dark Pixels added
	assert ( abs(np.mean(cam1.images[0].detector_array) - np.mean(cam1.images[1].detector_array) ) <= 1e-12 )
	assert ( abs(np.mean(cam2.images[0].detector_array) - np.mean(cam2.images[1].detector_array) ) <= 1e-12 )
	assert ( abs(np.mean(cam3.images[0].detector_array) - np.mean(cam3.images[1].detector_array) ) <= 1e-12 )

	assert ( abs(np.std(cam1.images[0].detector_array) - np.std(cam1.images[1].detector_array) ) <= 1e-12 )
	assert ( abs(np.std(cam2.images[0].detector_array) - np.std(cam2.images[1].detector_array) ) <= 1e-12 )
	assert ( abs(np.std(cam3.images[0].detector_array) - np.std(cam3.images[1].detector_array) ) <= 1e-12 )


	# Third Image MEAN & VARIANCE should be different from First Image MEAN & VARIANCE
	assert ( abs(np.mean(cam1.images[2].detector_array) - np.mean(cam1.images[0].detector_array) ) >= 0.1 )
	assert ( abs(np.mean(cam2.images[2].detector_array) - np.mean(cam2.images[0].detector_array) ) >= 0.1 )
	assert ( abs(np.mean(cam3.images[2].detector_array) - np.mean(cam3.images[0].detector_array) ) >= 0.1 )

	assert ( abs(np.std(cam1.images[2].detector_array) - np.std(cam1.images[0].detector_array) ) >= 0.1 )
	assert ( abs(np.std(cam2.images[2].detector_array) - np.std(cam2.images[0].detector_array) ) >= 0.1 )
	assert ( abs(np.std(cam3.images[2].detector_array) - np.std(cam3.images[0].detector_array) ) >= 0.1 )


	# Fourth Image MEAN & VARIANCE should be different from SECOND Image MEAN & VARIANCE
	assert ( abs(np.mean(cam1.images[3].detector_array) - np.mean(cam1.images[1].detector_array) ) >= 0.1 )
	assert ( abs(np.mean(cam2.images[3].detector_array) - np.mean(cam2.images[1].detector_array) ) >= 0.1 )
	assert ( abs(np.mean(cam3.images[3].detector_array) - np.mean(cam3.images[1].detector_array) ) >= 0.1 )

	assert ( abs(np.std(cam1.images[3].detector_array) - np.std(cam1.images[1].detector_array) ) >= 0.1 )
	assert ( abs(np.std(cam2.images[3].detector_array) - np.std(cam2.images[1].detector_array) ) >= 0.1 )
	assert ( abs(np.std(cam3.images[3].detector_array) - np.std(cam3.images[1].detector_array) ) >= 0.1 )
Exemplo n.º 49
0
#create camera with no stars in it for tests that don't need them
#They will run significantly faster without them.
cam = camera.camera(
	0.019968, 			#detector_height
	0.019968, 			#detector_width
	0.05, 				#focal_length
	512, 				#resolution_height
	512,				#resolution_width
	np.identity(3), 	#body2cameraDCM
	1000,		    	#maximum magnitude
	-1000,				#minimum magnitude (for debugging)
	qe,
	tc,
	1,
	0.01**2, 			#effective area in m^2
	100, 				#dark current in electrons per second
	100, 				#std for read noise in electrons
	1000, 				#bin size
	2**32, 				#max bin depth
	1,
	0.01, 				#simulation timestep
	scState,			#position state of s/c
	scDCM,				#intertal 2 body DCM for s/c
	bodies,				#bodies to track in images
	takeImage,			#takeImage message
	debug=msg,			#debug message
	db='db/tycho.db'	#stellar database
	)
msg = {
	'addStars': 1,'rmOcc': 1, 'addBod': 1, 'psf': 1, 
	'raster': 1, 'photon': 1, 'dark': 1, 'read': 1, 'dt': 0.01}
Exemplo n.º 50
0
def main():

    parser = argparse.ArgumentParser(description="A program to do stereo correspondence matching on two image with calibration target in frame. Outputs disparity maps and a point cloud.")
    parser.add_argument("caseDirectory", type=str)
    caseDirectory = parser.parse_args().caseDirectory + "/"

    if(os.path.exists(caseDirectory + "caseDict.py")):

        caseDict = eval("{0}".format(open(caseDirectory + "caseDict.py").read()))

        if caseDict["camUsePreviousCalibrationData"]:
            if (os.path.exists(caseDirectory + caseDict["stereoPreviousIntrinsicsMatrix"]) and os.path.exists(caseDirectory + caseDict["stereoPreviousDistortionMatrix"])):
                doSingleCamcalibration = False
                print("\nUsing previously calculated single camera calibration data\n")
            #end if
            else:
                print("\nPrevious single camera calibration data not found, attempting to perform single camera calibration\n")
                doSingleCamcalibration = True
            #end else

        if doSingleCamcalibration:
            #do single camera calibration if required
            cam = camera.camera(
                verticalFormat = caseDict["camVerticalFormat"],
                pathToCalibrationImages = caseDirectory + caseDict["camCalibrationImagesPath"],
                calibrationImages = caseDict["camCalibrationImages"],
                numberOfPointsPerRow= caseDict["camNumberOfPointsPerRow"],
                numberOfPointsPerColumn = caseDict["camNumberOfPointsPerColumn"],
                circleSpacingSize = caseDict["camCircleSpacingSize"],
                drawCentres = caseDict["camDrawCentres"],
                pathToCalibrationImagesWithCentresDrawn = caseDirectory + caseDict["camCalibrationImagesWithCentresPath"])

            #store calibration data
            cameraIntrinsicsMatrix = cam.intrinsicsMatrix
            cameraDistortionMatrix = cam.distortionMatrix
            cameraIntrinsicsMatrix = cam.intrinsicsMatrix
            cameraDistortionMatrix = cam.distortionMatrix
        #end if
        else:
            #use single camera calibration data loaded from file
            cameraIntrinsicsMatrix = numpy.genfromtxt(caseDirectory + caseDict["stereoPreviousIntrinsicsMatrix"])
            cameraDistortionMatrix = numpy.genfromtxt(caseDirectory + caseDict["stereoPreviousDistortionMatrix"])
            cameraIntrinsicsMatrix = numpy.genfromtxt(caseDirectory + caseDict["stereoPreviousIntrinsicsMatrix"])
            cameraDistortionMatrix = numpy.genfromtxt(caseDirectory + caseDict["stereoPreviousDistortionMatrix"])
        #end else

        #check for stereo images
        stereoImageFileNamesList = []
        for file in os.listdir(caseDirectory + caseDict["stereoPathToStereoImages"]):
            if fnmatch.fnmatch(file, caseDict["stereoStereoImages"]):
                stereoImageFileNamesList.append(file)
            #end if
        #end for
        #sort the list into filename order
        stereoImageFileNamesList.sort()
        print(stereoImageFileNamesList)

        rgbImagesList = []
        xyzImagesList = []
        validityMasksList = []
        stList = []

        if(len(stereoImageFileNamesList) >= 2):

            for i in range(len(stereoImageFileNamesList) - 1):
                #initialise
                st = stereo.stereo(
                    verticalFormat = caseDict["stereoVerticalFormat"],
                    cameraOneIntrinsicsMatrix = cameraIntrinsicsMatrix,
                    cameraOneDistortionMatrix = cameraDistortionMatrix,
                    cameraTwoIntrinsicsMatrix = cameraIntrinsicsMatrix,
                    cameraTwoDistortionMatrix = cameraDistortionMatrix,
                    pathToImagePair = caseDirectory + caseDict["stereoPathToStereoImages"],
                    leftImageFilename = stereoImageFileNamesList[i],
                    rightImageFilename = stereoImageFileNamesList[i+1])

                #do stereo calibration
                if(i == 0):
                    (cameraRotationMatrix, cameraTranslationVectorMatrix, leftCameraRotationMatrix, leftCameraTranslationVectorMatrix, imageSize, errorFlag) = st.calibrate(
                    drawCentres = caseDict["stereoDrawCentres"],
                    pathToImagePairWithCentresDrawn = caseDirectory + caseDict["stereoStereoImagesWithCentresPath"],
                    numberOfPointsPerRow = caseDict["stereoNumberOfPointsPerRow"],
                    numberOfPointsPerColumn = caseDict["stereoNumberOfPointsPerColumn"],
                    circleSpacingSize = caseDict["stereoCircleSpacingSize"])
                #end if

                #rectify images
                (leftRectificationMatrix, _) = st.rectify(
                    imageSize = imageSize,
                    cameraRotationMatrix = cameraRotationMatrix,
                    cameraTranslationVectorMatrix = cameraTranslationVectorMatrix,
                    alpha = caseDict["stereoRectificationAlpha"],
                    calibrateForZeroDisparity = caseDict["stereoCalibrateForZeroDisparity"],
                    saveRectifiedImages = caseDict["stereoSaveRectifiedImages"],
                    pathToRectifiedImages = caseDirectory + caseDict["stereoRectifiedStereoImagesPath"],
                    imageScaleFactor = caseDict["stereoRectificationImageScaleFactor"])

                #calculate camera matricies relative to the first camera position
                if (i == 0):
                    relativeLeftCameraRotationMatrix = leftRectificationMatrix.transpose()#numpy.identity(3, dtype = numpy.float64)
                    relativeLeftCameraTranslationVectorMatrix = numpy.zeros((3,1), dtype = numpy.float64)
                else:
                    relativeLeftCameraRotationMatrix = numpy.dot(cameraRotationMatrix.transpose(), previousRelativeLeftCameraRotationMatrix)
                    relativeLeftCameraTranslationVectorMatrix = previousRelativeLeftCameraTranslationVectorMatrix - numpy.dot(cameraRotationMatrix.transpose(), cameraTranslationVectorMatrix)
                #end else
                previousRelativeLeftCameraRotationMatrix = numpy.copy(relativeLeftCameraRotationMatrix)
                previousRelativeLeftCameraTranslationVectorMatrix = numpy.copy(relativeLeftCameraTranslationVectorMatrix)

                #do semi global block matching
                st.calculateDisparitySGBM(
                    saveDisparityMaps = caseDict["SGBMSaveDisparityMaps"],
                    pathToDisparityMaps = caseDirectory + caseDict["SGBMDisparityMapsPath"],
                    useSobelPreFilter = caseDict["SGBMUseSobelPreFilter"],
                    preFilterCap = caseDict["SGBMPreFilterCap"],
                    SADWindowSize = caseDict["SGBMSADWindowSize"],
                    minDisparity = caseDict["SGBMMinDisparity"],
                    numberOfDisparities = caseDict["SGBMNumberOfDisparities"],
                    uniquenessRatio = caseDict["SGBMUniquenessRatio"],
                    speckleWindowSize = caseDict["SGBMSpeckleWindowSize"],
                    speckleRange = caseDict["SGBMSpeckleRange"],
                    P1 = caseDict["SGBMP1"],
                    P2 = caseDict["SGBMP2"])

                #create the data to generate point clouds
                st.createPointClouds(
                    cameraRotationMatrix = relativeLeftCameraRotationMatrix,
                    cameraTranslationVectorMatrix = relativeLeftCameraTranslationVectorMatrix)

                #append point cloud data to the relevant lists
                rgbImagesList.append(st.leftRgbImage2DArray)
                xyzImagesList.append(st.leftTransformedXyzImage2DArray)
                validityMasksList.append(st.leftSuccessfulMatchMask)
            #end for

            #concatenate point cloud data
            concatenatedRgbArray = rgbImagesList[0]
            concatenatedXyxArray = xyzImagesList[0]
            concatenatedValidityMask = validityMasksList[0]

            for i in range(len(stereoImageFileNamesList) - 2):
                concatenatedRgbArray = numpy.concatenate((concatenatedRgbArray, rgbImagesList[i+1]), axis = 1)
                concatenatedXyxArray = numpy.concatenate((concatenatedXyxArray, xyzImagesList[i+1]), axis = 1)
                concatenatedValidityMask = numpy.concatenate((concatenatedValidityMask, validityMasksList[i+1]), axis = 1)

            #create point cloud from left image and disparity map
            pc = pointCloud.xyzrgbPointCloudOriented(
                rgbImage = concatenatedRgbArray,
                xyzImage = concatenatedXyxArray,
                validityMask = concatenatedValidityMask)

            print("Writing point cloud of {0} points ...\n".format(concatenatedValidityMask.shape[1] - numpy.sum(concatenatedValidityMask)))

            #write to disk
            pc.writeUnstructured(
                pointCloudPath = caseDirectory,
                pointCloudFilename = caseDict["PCPointCloudFilename"])

        #end if
        else:
            print("Error: two or more images not found for stereo matching")
        #end else

    #end if
    else:
        print("Error: Case directory not found")
Exemplo n.º 51
0
import cv
import cv2
import numpy
import camera

print "Camera 1 script"

# find free filename
file_suffix = 0
while os.path.exists("pics/test%s.png" % str(file_suffix)):
    file_suffix += 1

imageWidth = 40
imageHeight = 2000

cam = camera.camera()
cam.setParametersAsString("cmd -row 0 -column 0 -width 39 -height 1999 -speed 2 -bin 0 -rshift 2 -nopattern")
'''cam.setRow(0)
cam.setColumn(0)
cam.setWidth(19)
cam.setHeight(1999)
cam.setSpeed(10)
cam.setBinning(0)
cam.setBarPattern()'''
# image = cam.takePicture()

#take several pictures and merge them together
numberOfImages = 66
image = numpy.zeros(((imageHeight)/2, imageWidth/2*numberOfImages, 3), numpy.uint8)

for i in range(0, numberOfImages):