def __init__(self):
        threading.Thread.__init__(self)

        #MQTT IP for widefind
        self.broker_url = "130.240.114.24"
        self.broker_port = 1883

        #DATA must be sent inside an event object
        self.event = {}
        self.pos = {}

        self.ids = []

        self.debug = False

        self.person = [0, 0, 0]
        self.p1 = [0, 0, 0]
        #self.cameraRoof = [3141, -3812, 1220]
        self.camera = [3261, -3800, 740]
        self.cameraFloor = [3635, -4074, 418]

        # Object controlling the camera
        self.c = Camera()

        self.following = False
        self.rotation = 0

        self.client = mqtt.Client()
        self.client.on_connect = self.on_connect
        self.client.on_message = self.on_message
Ejemplo n.º 2
0
    def __init__(self, url, id, token, save_record, default_mode):

        self.cam = Camera()
        self.detector = detector(self.cam)
        self.detector_thread = None
        self.save_record = save_record
        self.default_mode = default_mode
        self.running = False
        self.state = {
            "server": {
                "mode": default_mode
            },
            "client": {
                "mode": default_mode
            },
        }
        self.mode_change = False
        self.last_mode_check = 0
        self.save_path = "/dev/shm/"
        self.disk_space = {}

        self.url = url
        self.id = id
        self.token = token
        self.mask_image = (np.ones((100, 100)), 0, [])
Ejemplo n.º 3
0
 def set_pixel_array(self, pixel_array, **kwargs):
     Camera.set_pixel_array(self, pixel_array, **kwargs)
     for shifted_camera in self.shifted_cameras:
         shifted_camera.camera.set_pixel_array(
             pixel_array[shifted_camera.start_y:shifted_camera.end_y,
                         shifted_camera.start_x:shifted_camera.end_x],
             **kwargs)
Ejemplo n.º 4
0
 def __init__(self, frame, **kwargs):
     """
     frame is a Mobject, (should be a rectangle) determining 
     which region of space the camera displys
     """
     self.frame = frame
     Camera.__init__(self, **kwargs)
Ejemplo n.º 5
0
def main():
    # Clear the screen
    subprocess.call('clear', shell=True)
    config_object = Config(os.getcwd() + '/config/config.ini').raw_config_object
    transport = UdpSocket(config_object)
    camera = Camera(config_object)

    if config_object['COMPRESSION']['switch'] == 'On':
        compressor = Compressor(config_object)
        transport.add_compression(compressor)

    if config_object['ENCRYPTION']['switch'] == 'On':
        # encryptor = Encryptor(config_object)
        # transport.add_encryption(encryptor)
        pass

    try:
        while True:
            time.sleep(float(config_object['QUALITY']['delay']))
            image = camera.get_frame()
            transport.send_data(image)

    except LookupError as e:
        print(e)
        sys.exit(2)

    except KeyboardInterrupt:
        print('keyboard interruption')
        sys.exit(1)
Ejemplo n.º 6
0
class MainLoop(Thread):
    def __init__(self):
        Thread.__init__(self)
        self.camera = Camera()
        self.detector = Detector()
        self.stop_loop = False
        self.telemetry = Telemetry()
        self.position_calculator = PositionCalculator()
        self.frame = None

    def run(self):

        if Values.PRINT_FPS:
            last_time = time.time()
            ind = 0
        while True:
            try:
                if self.stop_loop:
                    break

                #self.frame = cv2.imread("images/pole_new.jpg")
                self.frame = self.camera.get_frame()

                #self.frame = cv2.resize(self.frame, (Values.CAMERA_WIDTH, Values.CAMERA_HEIGHT))
                if self.frame is None:
                    continue

                detections = self.detector.detect(self.frame)
                self.telemetry.update_telemetry()
                self.position_calculator.update_meters_per_pixel(
                    self.telemetry.altitude)
                self.position_calculator.calculate_max_meters_area()
                for d in detections:
                    lat, lon = self.position_calculator.calculate_point_lat_long(
                        d.middle_point, self.telemetry.latitude,
                        self.telemetry.longitude, self.telemetry.azimuth)
                    d.update_lat_lon(lat, lon)
                    d.area_m = self.position_calculator.calculate_area_in_meters_2(
                        d.area)
                    d.draw_detection(self.frame)

                cv2.imshow("frame", self.frame)

                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break

                if Values.PRINT_FPS:
                    ind += 1
                    if time.time() - last_time > 1:
                        print("FPS:", ind)
                        ind = 0
                        last_time = time.time()

            except Exception as e:
                print("Some error accrued: ", str(e))
        self.close()

    def close(self):
        self.camera.close()
        self.stop_loop = True
Ejemplo n.º 7
0
 def __init__(self):
     Thread.__init__(self)
     self.camera = Camera()
     self.detector = Detector()
     self.stop_loop = False
     self.telemetry = Telemetry()
     self.position_calculator = PositionCalculator()
     self.frame = None
Ejemplo n.º 8
0
 def __init__(self):
     self.distance = Echo()
     self.camera = Camera()
     self.imageMod = ImageMod()
     self.img = None
     self.cm = -1
     self.rec = ('None', 0)
     self.counter = 0
Ejemplo n.º 9
0
 def __init__(self, *args, **kwargs):
     Camera.__init__(self, *args, **kwargs)
     self.phi_tracker = ValueTracker(self.phi)
     self.theta_tracker = ValueTracker(self.theta)
     self.distance_tracker = ValueTracker(self.distance)
     self.gamma_tracker = ValueTracker(self.gamma)
     self.light_source = Point(self.light_source_start_point)
     self.frame_center = Point(self.frame_center)
     self.reset_rotation_matrix()
Ejemplo n.º 10
0
 def set_pixel_array(self, pixel_array, **kwargs):
     Camera.set_pixel_array(self, pixel_array, **kwargs)
     for shifted_camera in self.shifted_cameras:
         shifted_camera.camera.set_pixel_array(
             pixel_array[
                 shifted_camera.start_y:shifted_camera.end_y,
                 shifted_camera.start_x:shifted_camera.end_x],
             **kwargs
         )
Ejemplo n.º 11
0
 def __init__(self, *args, **kwargs):
     Camera.__init__(self, *args, **kwargs)
     self.unit_sun_vect = self.sun_vect / np.linalg.norm(self.sun_vect)
     ## rotation_mobject lives in the phi-theta-distance space
     self.rotation_mobject = VectorizedPoint()
     ## moving_center lives in the x-y-z space
     ## It representes the center of rotation
     self.moving_center = VectorizedPoint(self.space_center)
     self.set_position(self.phi, self.theta, self.distance)
Ejemplo n.º 12
0
 def __init__(self, frame=None, **kwargs):
     """
     frame is a Mobject, (should be a rectangle) determining
     which region of space the camera displys
     """
     if frame is None:
         frame = ScreenRectangle(height=FRAME_HEIGHT)
         frame.fade(1)
     self.frame = frame
     Camera.__init__(self, **kwargs)
Ejemplo n.º 13
0
 def __init__(self, *cameras_with_start_positions, **kwargs):
     self.shifted_cameras = [
         DictAsObject(
         {
             "camera" : camera_with_start_positions[0], 
             "start_x" : camera_with_start_positions[1][1],
             "start_y" : camera_with_start_positions[1][0],
             "end_x" : camera_with_start_positions[1][1] + camera_with_start_positions[0].pixel_shape[1],
             "end_y" : camera_with_start_positions[1][0] + camera_with_start_positions[0].pixel_shape[0],
         })
         for camera_with_start_positions in cameras_with_start_positions
     ]
     Camera.__init__(self, **kwargs)
Ejemplo n.º 14
0
    def display_multiple_vectorized_mobjects(self, vmobjects, pixel_array):
        rot_matrix = self.get_rotation_matrix()

        def z_key(vmob):
            # Assign a number to a three dimensional mobjects
            # based on how close it is to the camera
            if vmob.shade_in_3d:
                return np.dot(vmob.get_center(), rot_matrix.T)[2]
            else:
                return np.inf

        Camera.display_multiple_vectorized_mobjects(
            self, sorted(vmobjects, key=z_key), pixel_array)
Ejemplo n.º 15
0
 def __init__(self, *cameras_with_start_positions, **kwargs):
     self.shifted_cameras = [
         DictAsObject(
             {
                 "camera": camera_with_start_positions[0],
                 "start_x": camera_with_start_positions[1][1],
                 "start_y": camera_with_start_positions[1][0],
                 "end_x": camera_with_start_positions[1][1] + camera_with_start_positions[0].get_pixel_width(),
                 "end_y": camera_with_start_positions[1][0] + camera_with_start_positions[0].get_pixel_height(),
             })
         for camera_with_start_positions in cameras_with_start_positions
     ]
     Camera.__init__(self, **kwargs)
Ejemplo n.º 16
0
    def add(self, model):

        if self.is_exist(model.manage_url):
            return False

        camera = Camera(model,
                        notifier=self.notifier,
                        object_detector_queue=self.object_detector_queue)
        camera.add_to_homekit(self.homekit_bridge)

        self.cameras[camera.id] = camera

        return camera
Ejemplo n.º 17
0
 def __init__(self, frame=None, **kwargs):
     """
     frame is a Mobject, (should almost certainly be a rectangle)
     determining which region of space the camera displys
     """
     digest_config(self, kwargs)
     if frame is None:
         frame = ScreenRectangle(height=FRAME_HEIGHT)
         frame.set_stroke(
             self.default_frame_stroke_color,
             self.default_frame_stroke_width,
         )
     self.frame = frame
     Camera.__init__(self, **kwargs)
Ejemplo n.º 18
0
 def capture_mobjects(self, mobjects, **kwargs):
     mobjects = self.get_mobjects_to_display(mobjects, **kwargs)
     if self.allow_object_intrusion:
         mobject_copies = mobjects
     else:
         mobject_copies = [mobject.copy() for mobject in mobjects]
     for mobject in mobject_copies:
         if isinstance(mobject, VMobject) and \
                 0 < mobject.get_num_anchor_points() < self.min_anchor_points:
             mobject.insert_n_anchor_points(self.min_anchor_points)
     Camera.capture_mobjects(
         self, mobject_copies,
         include_submobjects=False,
         excluded_mobjects=None,
     )
Ejemplo n.º 19
0
 def capture_mobjects(self, mobjects, **kwargs):
     mobjects = self.get_mobjects_to_display(mobjects, **kwargs)
     if self.allow_object_intrusion:
         mobject_copies = mobjects
     else:
         mobject_copies = [mobject.copy() for mobject in mobjects]
     for mobject in mobject_copies:
         if isinstance(mobject, VMobject) and \
         0 < mobject.get_num_anchor_points() < self.min_anchor_points:
             mobject.insert_n_anchor_points(self.min_anchor_points)
     Camera.capture_mobjects(
         self, mobject_copies, 
         include_submobjects = False,
         excluded_mobjects = None,
     )
Ejemplo n.º 20
0
def video_stream():
    def gen(camera):
        while True:
            frame = camera.get_frame()
            yield (b'--frame\r\n'
                   b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
    return Response(gen(Camera()), mimetype='multipart/x-mixed-replace; boundary=frame')
Ejemplo n.º 21
0
def main():
    # Clear the screen
    subprocess.call("clear", shell=True)
    config_object = Config(os.getcwd() + "/config/config.ini").raw_config_object

    try:

        """
        need to spin the threads and get all the jazz up and running
        probably need a separate config parser and starter classes...?
        """
        mic = Mic(config_object)
        camera = Camera(config_object)
        archiver = Archiver(config_object)
        mailer = Mailer(config_object)

        while True:
            mic_response = mic.send({"action": "listen"})
            print(".", end="", flush=True)

            if mic_response == mic.MIC_DONE:
                camera_res = camera.send({"action": "photos"})
                print("taking photos")
                if camera_res == camera.CAMERA_DONE:
                    archiver_res = archiver.send({"action": "archive"})
                    print("archiving...")
                    if archiver_res == archiver.ARCHIVER_DONE and archiver.zfilename is not None:
                        print("sending mail")
                        mailer_done = mailer.send({"action": "last", "last_archive_name": archiver.zfilename})
                        if mailer_done == mailer.MAILER_DONE:
                            print("...cleaning up")
                            archiver.send({"action": "clearup"})
                            print("cycle done...")

    except LookupError as e:
        print(e)
        sys.exit(2)

    except KeyboardInterrupt:
        print("keyboard interruption")
        camera.close()
        mic.close()
        archiver.close()
        mailer.close()
        sys.exit(1)
Ejemplo n.º 22
0
    def points_to_pixel_coords(self, points):
        distance_ratios = np.divide(self.camera_distance,
                                    self.camera_distance - points[:, 2])
        scale_factors = interpolate(0, 1, distance_ratios)
        adjusted_points = np.array(points)
        for i in 0, 1:
            adjusted_points[:, i] *= scale_factors

        return Camera.points_to_pixel_coords(self, adjusted_points)
Ejemplo n.º 23
0
class CVProcess(multiprocessing.Process):
    def __init__(self, results):
        super().__init__()
        print("DEBUG: CVProcess spawned")
        self.results = results
        self.camera = Camera(verbose=True)
        self.lane_recognizer = LaneRecognizer(self.camera)

    def replace_none(self, val):
        return val if val is not None else float("nan")
    
    def retrieve_state(self):
        self.results[0] = self.replace_none(self.lane_recognizer.curve_radius)
        self.results[1] = self.replace_none(self.lane_recognizer.lateral_deviation)
        self.results[2] = self.replace_none(self.lane_recognizer.last_valid)

    def stop(self):
        self.camera.stop_measuring()
        self.lane_recognizer.stop_measuring()
Ejemplo n.º 24
0
    def __init__(self, mob, iterations=5, radius=10, alpha=30, camera=None):
        if camera is None:
            from camera.camera import Camera
            camera = Camera()
        arr = mob.get_binary_array()
        arr = ndimage.binary_dilation(
            arr,
            structure=circular_binary_structure(radius),
            iterations=iterations,
        )
        pixel_list = np.column_stack(np.where(arr == 1)).astype("float64")

        concave_hull = list(
            alpha_shape(pixel_list, alpha=alpha, only_outer=True))

        # sort edges
        for i, first in enumerate(concave_hull):
            loop = True
            for j, second in enumerate(concave_hull[i + 1:]):
                j += i + 1
                if first[1] == second[0]:
                    loop = False
                    concave_hull[i + 1], concave_hull[j] = \
                        concave_hull[j], concave_hull[i + 1]
            if loop and i != len(concave_hull) - 1:
                warnings.warn(
                    "the alpha shape in split into different parts. This can "
                    "be fixed by increasing alpha.")
                print(i, len(concave_hull))
                # breakpoint(context=9)
                pass

        temp = np.zeros((len(concave_hull) + 1, 2))
        for i, pair in enumerate(concave_hull):
            temp[i] = pixel_list[pair[0]]
        temp[-1] = pixel_list[concave_hull[0][0]]
        pixel_list = temp

        point_list = np.zeros((pixel_list.shape[0], pixel_list.shape[1] + 1))
        point_list[:,
                   0] = pixel_list[:,
                                   0] * camera.frame_height / camera.pixel_height
        point_list[:,
                   1] = -pixel_list[:,
                                    1] * camera.frame_width / camera.pixel_width

        # TODO: figure out optimal num_anchor_points
        ParametricFunction.__init__(
            self,
            lambda t, point_list=point_list: point_list[int(t)],
            t_min=0,
            t_max=len(point_list) - 1,
            scale_handle_to_anchor_distances_after_applying_functions=False,
        )
        self.move_to(mob.get_center())
    def get_mobjects_to_display(self, *args, **kwargs):
        mobjects = Camera.get_mobjects_to_display(self, *args, **kwargs)
        rot_matrix = self.get_rotation_matrix()

        def z_key(mob):
            if not (hasattr(mob, "shade_in_3d") and mob.shade_in_3d):
                return np.inf
            # Assign a number to a three dimensional mobjects
            # based on how close it is to the camera
            return np.dot(mob.get_center(), rot_matrix.T)[2]

        return sorted(mobjects, key=z_key)
Ejemplo n.º 26
0
    def display_multiple_vectorized_mobjects(self, vmobjects):
        camera_point = self.spherical_coords_to_point(
            *self.get_spherical_coords())

        def z_cmp(*vmobs):
            # Compare to three dimensional mobjects based on
            # how close they are to the camera
            # return cmp(*[
            #     -np.linalg.norm(vm.get_center()-camera_point)
            #     for vm in vmobs
            # ])
            three_d_status = map(should_shade_in_3d, vmobs)
            has_points = [vm.get_num_points() > 0 for vm in vmobs]
            if all(three_d_status) and all(has_points):
                cmp_vect = self.get_unit_normal_vect(vmobs[1])
                return cmp(
                    *[np.dot(vm.get_center(), cmp_vect) for vm in vmobs])
            else:
                return 0

        Camera.display_multiple_vectorized_mobjects(
            self, sorted(vmobjects, cmp=z_cmp))
Ejemplo n.º 27
0
def test():
    server = CameraServer()
    camera = Camera()
    tracker = ObjectTracker()

    def update(time):
        frame = camera.update()
        masked, pos, radius = tracker.find_object(frame, draw_circle=True)
        server.put_images(frame, masked)

    updater = Updater(0.01)
    updater.add(update)
    while updater.timer < 120:
        updater.update()

    server.stop()
Ejemplo n.º 28
0
 def __init__(self, config_path):
     self.config = configparser.ConfigParser()
     self.config.read(config_path)
     self.config_path = config_path
     self.instantiating_time = time.time()
     self.picture_interval = self.config['SCHEDULE'].getfloat('PictureInterval')
     self.start_time = self.convert_time_str(self.config['SCHEDULE'].get('StartTime'))
     self.end_time = self.convert_time_str(self.config['SCHEDULE'].get('EndTime')) - self.start_time
     self.w_capture = self.config['SCHEDULE'].get('WithCapture')
     self.w_prediction = self.config['SCHEDULE'].get('WithPrediction')
     self.buffer_time = self.config['SCHEDULE'].getint('PictureBufferTime')
     self.camera_output_dir = self.config['CAMERA'].get('CameraOutputDirectory')
     self.prediction_output_dir = self.config['PREDICTION'].get('PredictionOutputDirectory')
     self.compiler = Compiler(config_path)
     self.camera = Camera(config_path)
     self.schedule = sched.scheduler(time.time, time.sleep)
Ejemplo n.º 29
0
    def get_binary_array(self, camera=None):
        if camera is None:
            from camera.camera import Camera
            camera = Camera()
        camera.capture_mobject(self)

        camera.get_image().save("lmao.png")

        # why the transpose?
        arr = np.int16(np.all(camera.pixel_array[:, :, :3] == 0, axis=2)).T
        return arr
Ejemplo n.º 30
0
    def __init__(self):
        self.Serial = ArduinoSerial('/dev/ttyACM0', 115200)  # TODO: Riconoscere seriale

        self.Color = Color(self.Serial)
        self.Distance = [Distance(self.Serial, i) for i in range(1, 4)]
        self.Gyroscope = Gyroscope(self.Serial)
        self.Temperature = [Temperature(self.Serial, i) for i in range(1, 4)]

        self.Camera = Camera()

        self.Motor = Motor(self.Serial)
        self.Servo = Servo(self.Serial)

        self.Sensors = [self.Color, self.Distance, self.Gyroscope, self.Temperature]

        self.Maze = Maze(self.Sensors, self.Servo, self.Camera)
        self.ActualX = self.Maze.StartX
        self.ActualY = self.Maze.StartY
        self.ActualZ = self.Maze.StartZ

        self.StartTime = 0
        self.StartHeading = 0

        self.stop()
Ejemplo n.º 31
0
    def __init__(self):
        gpio = pigpio.pi()

        self.Color = Color(7)
        self.Distance = [Distance(2), Distance(3), Distance(0), Distance(1)]
        self.Gyroscope = Gyroscope('/dev/ttyAMA0')
        self.Temperature = [Temperature(i) for i in range(85, 89)]

        self.Camera = Camera()

        self.LedRed = Led(gpio, 20)
        self.LedYellow = Led(gpio, 21)

        self.LedRed.off()
        self.LedYellow.off()

        self.ButtonStart = Button(gpio, 9)
        self.ButtonStop = Button(gpio, 10)

        self.Motor = [Motor(19, 11, 26), Motor(13, 6, 5)]
        self.Servo = Servo(gpio, 12)

        self.Sensors = [
            self.Color, self.Distance, self.Gyroscope, self.Temperature
        ]
        self.Actuators = [self.LedRed, self.Servo]

        self.Maze = Maze(self.Sensors, self.Actuators, self.Camera)
        self.ActualX = self.Maze.StartX
        self.ActualY = self.Maze.StartY
        self.ActualZ = self.Maze.StartZ

        self.StartTime = 0
        self.StartHeading = 0

        self._stop()
Ejemplo n.º 32
0
import sys
from PyQt4 import QtGui
from gui.gui import Gui
from gui.threadgui import ThreadGui
from camera.camera import Camera
from camera.threadcamera import ThreadCamera
import signal

signal.signal(signal.SIGINT, signal.SIG_DFL)

if __name__ == '__main__':

    camera = Camera()
    print(camera)
    app = QtGui.QApplication(sys.argv)
    window = Gui()
    window.setCamera(camera)
    window.show()

    t1 = ThreadCamera(camera)
    t1.start()

    t2 = ThreadGui(window)
    t2.start()

    sys.exit(app.exec_())
Ejemplo n.º 33
0
def video(request):

    return StreamingHttpResponse(
        gen(Camera()),
        content_type="multipart/x-mixed-replace; boundary=frame")
 def __init__(self, frame_retrieval, index=0, manager=None):
     Camera.__init__(self, frame_retrieval)
     self.index = index
     self.manager = manager
Ejemplo n.º 35
0
 def init_background(self):
     Camera.init_background(self)
     for shifted_camera in self.shifted_cameras:
         shifted_camera.camera.init_background()
Ejemplo n.º 36
0
class GameCore:

    def __init__(self, _main):
        
        self.main = _main

        ## Set parent nodes in the scenegraph
        self.physicsParentNode = render.attachNewNode("physicsParentNode")
        self.levelParentNode = render.attachNewNode("levelParentNode")
        self.objectsParentNode = render.attachNewNode("objectsParentNode")
        self.lightsParentNode = render.attachNewNode("lightsParentNode")
        self.aiParentNode = render.attachNewNode("aiParentNode")

        ## Start Event Manager
        self.eventMgr = EventMgr(self)

    def startGame(self, _playerName="DefaultPlayer"):
        self.autoShader()
        self.eventMgr.start()
        print "---> Started EventSystem"
        self.loadPhysicsSystem()
        print "---> Loaded Physics System"

        self.loadInputSystem()
        print "---> Loaded Input System"

        self.loadLevelSystem()
        print "---> Loaded Level System"

        self.loadPlayerSystem()
        print "---> Loaded Player System"

        self.loadCameraSystem()
        print "---> Loaded Camera System"

    def stopGame(self):
    	pass

##------- SUB SYSTEMS -------##
    def loadPhysicsSystem(self):
        self.physicsMgr = PhysicsMgr(self)
        self.physicsMgr.startPhysics()
        self.physicsMgr.setPhysicsDebug()

    def loadInputSystem(self):
        self.input = Input(self)
        #self.input.start()

    def loadLevelSystem(self):
        self.level = Level(self)
        self.level.buildLevel("assets/level/intro")
        self.level.start()

    def loadPlayerSystem(self):
        self.player = Player(self, "Main Man")
        self.player.start()

    def loadCameraSystem(self):
        self.camera = Camera(self)
        self.camera.start()

    def autoShader(self):
        render.setShaderAuto()
Ejemplo n.º 37
0
 def loadCameraSystem(self):
     self.camera = Camera(self)
     self.camera.start()
Ejemplo n.º 38
0
 def get_image(self, camera=None):
     if camera is None:
         from camera.camera import Camera
         camera = Camera()
     camera.capture_mobject(self)
     return camera.get_image()
Ejemplo n.º 39
0
 def points_to_pixel_coords(self, points):
     return Camera.points_to_pixel_coords(self, np.apply_along_axis(self.mapping_func, 1, points))