Ejemplo n.º 1
0
def hyperloop():

    imageFolder = None
    imageNum = 0

    logger = Logger('relog')
    logger.setLogLevel('debug')
    logger.info('Started replay')

    state = State()
    for p in sys.argv:
        if (os.path.isdir(p)):
            imageFolder = p
        elif (p.isdigit()):
            imageNum = int(p)
        elif (p == "-lap"):
            state.Approaching = Signal.LAP
        elif (p == "-up"):
            state.Approaching = Signal.UPPER
        elif (p == "-lo"):
            state.Approaching = Signal.LOWER
        elif (p == "-s"):
            state.Approaching = Signal.UPPER

    if (state.Approaching != Signal.LAP):
        state.setStopSignal(1)

    camera = Camera(None, True)

    if imageFolder:
        # program loop
        files = sorted_aphanumeric(os.listdir(imageFolder))
        while True:
            try:
                file = os.path.join(imageFolder, files[imageNum])
                logger.info("[" + str(imageNum) + "] Loaded file: " + file)
                image = cv2.imread(file, 1)

                camera.capture(image)

                key = cv2.waitKey(0) & 0xFF

                if key == ord("n"):
                    # cv2.destroyAllWindows()
                    if (imageNum + 1 < len(files)):
                        imageNum += 1
                elif key == ord("b"):
                    # cv2.destroyAllWindows()
                    if (imageNum - 1 >= 0):
                        imageNum -= 1
                elif key == ord('q'):
                    break

            except KeyboardInterrupt:
                break
            except Exception as e:
                logger.logError(e)
                traceback.print_exc(limit=3, file=sys.stdout)

    logger.info('Stopped')
Ejemplo n.º 2
0
def on_open(ws):
    camera = Camera()
    filepath = uuid.uuid4()
    filename = '%s.jpg' % filepath
    camera.snapshot(filename)
    file = open('images/'+filename, "rb").read()
    file_data = base64.b64encode(file)
    ws.send(json.dumps({'upload_file': file_data}))
    time.sleep(0.2)
Ejemplo n.º 3
0
    def __init__(self, config):
        self.config = config

        calib_file = "../" + config['cam']['calibration']
        calib = {}
        with open(calib_file, 'r') as ymlfile:
            calib = yaml.load(ymlfile, Loader=yaml.FullLoader)

        self.camera = Camera(config, calib)
Ejemplo n.º 4
0
    def __init__(self):
        self.tasks = []

        self.camera = Camera()
        self.window = Window('Camera')

        self.should_detect = True
        self.trackers = []

        self.color_filter = ColorFilter()
Ejemplo n.º 5
0
def start():
    cams = [{
        "tz":
        "ID",
        "url":
        'https://cf-stream.coastalwatch.com/cw/' +
        os.environ.get('CAMNAME', 'bondicamera') + '.stream/chunklist.m3u8',
        "framerate":
        int(os.environ.get('FR', '20')),
        "videolength":
        os.environ.get('LENGTH', '00:06:00')
    }]

    cam = Camera(cams, test=TEST)
    cam.start = True
    for img in cam:
        if cam.start:
            height, width, _ = img.shape
            panarama = Panarama(img, height, width)
            cam.start = False
        else:
            panarama.stitch(img)

        # if no more states
        if not panarama.REQUIREDSTATESBEFORERESET:
            cam.resetnext = True

        # either display at constant framerate (if zero display last frame)
        if (cam.framerate > 0
                and cam.f % cam.framerate == 0) or (cam.framerate == 0
                                                    and cam.resetnext):
            if TEST:
                img = cv2.imencode('.jpg', img)[1]
                frame = img.tostring()
                # yield (b'--frame\r\n'
                #       b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
            else:
                frame = np.flip(panarama.raw, axis=2)
                (h, w, _) = frame.shape
                im = Image.fromarray(frame)
                name = cams[0]['url'][:-22].split('/')[-1]
                dir = "/tmp/frame-" + name + ".jpeg"
                #im = im.resize((w, h*2), Image.ANTIALIAS)
                im.save(dir, quality=100)
                sp.check_call('gsutil -m mv -a public-read ' + dir +
                              ' gs://handy-contact-219622.appspot.com/frame-' +
                              name + '.jpeg',
                              shell=True)
Ejemplo n.º 6
0
    def __init__(self, parent=None):
        QtOpenGL.QGLWidget.__init__(self, parent)

        self.setAutoFillBackground(False)

        self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent, True)
        self.setAttribute(QtCore.Qt.WA_NoSystemBackground, True)

        self.setAttribute(QtCore.Qt.WA_StaticContents, True)
        self.setAttribute(QtCore.Qt.WA_PaintOnScreen, True)

        self.setMouseTracking(True)

        self._mousePos = QtCore.QPoint()
        self._zoomAnchor = QtCore.QPoint()
        self.zoomPos1 = QtCore.QPoint()
        self.zoomPos2 = QtCore.QPoint()

        self.camera = Camera()
        self.camera.identity()

        self.settings = {}
        self.settingsFilename = os.path.expanduser("~/%s.%s" % ("editor", "settings"))

        self.loadSettings()

        self.move(self.settings.get("x", 0), self.settings.get("y", 0))
        self.resize(self.settings.get("w", 450), self.settings.get("h", 700))
Ejemplo n.º 7
0
	def __init__(self):
		destiny_coords = [29.15168, -81.01726];
		partial_degree = 0;

		camera = Camera(); 
		camera_frame = None;
		lidar = Lidar(); 
		radar = Radar();
		imu = Imu();
		navigation = Navigation();

		destiny = imu.get_degrees_and_distance_to_gps_coords(29.15168, -81.01726);

		# Create new threads
		thread_camera = CameraThread(1, "CameraThread");
		thread_lidar  = LidarThread(2, "LidarThread");
		thread_navigation = NavigationThread(3, "NavigationThread");
		thread_main = MainThread(4, "MainThread");
		
		# Start Threads
		thread_camera.start();
		thread_lidar.start();
		thread_main.start();
		thread_navigation.start();

		thread_camera.join();
		thread_lidar.join();
		thread_main.join();
		thread_navigation.join();

		print ("Terminating Main Program");
Ejemplo n.º 8
0
    def __init__(self):
        # Pygame launch
        pygame.display.set_caption("SANTARENA")

        # Screen (camera)
        self.camera = Camera()

        # Tilemap
        self.tilemap = Tilemap(MAP_H, MAP_W)

        # Characters
        self.character = Character()
        self.enemies = []

        # Clock
        self.clock = Clock()
        self.dt = 0
Ejemplo n.º 9
0
 def __init__(self):
     super().__init__()
     self.organisms = {}
     self.camera = Camera(cfg.SCENE_WIDTH, cfg.SCENE_HEIGHT)
     self.surface = pygame.Surface((cfg.SCENE_WIDTH, cfg.SCENE_HEIGHT))
     self.zoom = 1
     self.play = False
     self.clock = pygame.time.Clock()
     self.interval = 0
    def __init__(self):
        self.initParam()
        self.pan_speed = 0
        self.tilt_speed = 0
        self.latestImage = None

        # init 
        self.robot = Robot()
        self.ball_detector = BallDetector()
        self.camera = Camera()
Ejemplo n.º 11
0
    def __init__(self):
        self.tasks = []

        self.camera = Camera()
        self.window = Window('Camera')

        self.should_detect = True
        self.trackers = []

        self.color_filter = ColorFilter()
    def __init__(self, fps, duration, W, H, R, osm_cart, usgs_cart):

        self._id = mp.current_process().name
        self._fps = fps
        self._duration = duration
        self._W = W
        self._H = H
        self._R = R
        self._osm_cart = osm_cart
        self._usgs_cart = usgs_cart

        self._dist = 3.0 * self._R
        self._frames = self._duration * self._fps

        self._camera = Camera()
        self._camera.set_focal(50.0)
        self._camera.set_factor(30)
        self._camera.set_center(self._W / 2, self._H / 2)

        self._usgs_cart_2d = np.zeros((2, self._usgs_cart.shape[1]), dtype = 'f4')
Ejemplo n.º 13
0
    def __init__(self):

        # read config file
        config = configparser.ConfigParser()
        config.read(self.config_filename)
        refresh_in_seconds = 1

        # exit on unavailable config file
        if len(config) == 1:
            sys.exit("Config file not found or empty, please create ./%s" %
                     (self.config_filename))

        # exit if sections not present
        if 'default' not in config.sections():
            sys.exit("Default settings not found")

        refresh_in_seconds = int(config['default']['refresh_in_seconds'])
        # exit if sections not present
        if 'camera' not in config.sections():
            sys.exit("Camera settings not found")

        self.cam = Camera(config['camera'])

        # exit if sections not present
        if 'backend' not in config.sections():
            sys.exit("Backend settings not found")
        self.backend_notifier = BackendNotifier(config['backend'])

        # exit if sections not present
        if 'parking_lot' not in config.sections():
            sys.exit("Parking lot settings not found")

        self.parking_lot = ParkingLot(config['parking_lot'], self.cam)

        # write any changes to config
        with open(self.config_filename, 'w') as configfile:
            config.write(configfile)

        # Execute Main Loop
        self.execute(refresh_in_seconds)
def add_faces():
    if not os.path.exists(DATABASE_DIR):
        os.mkdir(DATABASE_DIR)
    # face_id = input('Name: ')
    face_id = sys.argv[1]
    face_dir = DATABASE_DIR + face_id
    if not os.path.exists(face_dir):
        os.mkdir(face_dir)
        camera = Camera()
        face_detector = FaceDetector(FACE_CASCADES)
        counter = 1
        timer = 0
        cv2.namedWindow('Video Feed', cv2.WINDOW_AUTOSIZE)
        cv2.namedWindow('Saved Face', cv2.WINDOW_NORMAL)
        while counter < 26:
            frame = camera.get_frame()
            # frame = camera.ip_camera(True)
            face_coordinate = face_detector.detect(frame)
            if len(face_coordinate):
                shape = 'rectangle'
                frame, face_image = get_images(frame, face_coordinate, shape)
                if timer % 100 == 20:
                    cv2.imwrite(face_dir + '/' + str(counter) + '.jpg', face_image[0])
                    print(json.dumps({
                        'imageCounter': str(counter)
                    }))
                    counter += 1
                    cv2.imshow('Saved Face', face_image[0])

            cv2.imshow('Video Feed', frame)
            cv2.waitKeyEx(50)
            timer += 20
            if cv2.waitKey(100) & 0xFF == 27:
                sys.exit()
    else:
        print(json.dumps({
            'message': 'Face ID already exist',
            'status': 'false'
        }))
Ejemplo n.º 15
0
    def __init__(self):
        self.initParam()
        self.pan_speed = 0
        self.tilt_speed = 0

        # init
        self.robot = Robot()
        self.ball_detector = BallDetector()
        self.camera = Camera(self.imageCallback)

        # move robot head to center
        self.robot.center()

        # load timer
        rospy.Timer(rospy.Duration(1.0 / self.rate), self.robotUpdate)
Ejemplo n.º 16
0
      def __init__(self, Story):
          self.rsc = Resources()
          self.camera = Camera(self.rsc.graphics, (0, 100, 0, 50))

          self._timekeeper = TimeKeeper()
          self._timekeeper.schedule('draw', 1.0 / settings.target_fps)
          self._timekeeper.schedule('input', 1.0 / 100.0)
          self._timekeeper.schedule('update_game', 1.0 / 50.0, game = True)
          self._timekeeper.schedule('update', 1.0 / 50.0)
          self._timekeeper.set_game_speed(settings.game_speed)

          # world objects
          self.fields = {}
          for fieldtype in fieldtypes:
            field = fieldtype()
            self.fields[fieldtype] = field
          self.actors = []

          # initiate story
          self.story = Story(self)
          self.run()
Ejemplo n.º 17
0
class Main:
    def __init__(self):
        self.tasks = []

        self.camera = Camera()
        self.window = Window('Camera')

        self.should_detect = True
        self.trackers = []

        self.color_filter = ColorFilter()

    def add_target(self, filename):
        self.trackers.append(Tracker(filename))

    def clear(self):
        for tracker in self.trackers:
            tracker.clear()

    def loop(self):
        runner = CmdRunner(self)
        runner.start()

        while runner.isAlive():
            while len(self.tasks) > 0:
                task = self.tasks.pop(0)
                task.execute()

            try:
                img = self.camera.get_frame()

                if img is None:
                    self.window.clear()
                    continue

                img = cv2.resize(img, (640, 480))

                # TODO: show color mask
                color_mask = self.color_filter.filter(img)

                next_should_detect = self.should_detect
                for tracker in self.trackers:
                    if self.should_detect:
                        rect = tracker.detect(img)

                        if rect is not None:
                            ret = tracker.update_window(img, rect)
                            self.window.draw_rectangle(rect, (255, 0, 255))
                        next_should_detect = False

                    rect = tracker.track(img)

                    if rect is None:
                        next_should_detect = True
                        continue

                    self.window.draw_rectangle(rect, (255, 0, 0))

                self.should_detect = next_should_detect
            except Exception as e:
                print >> sys.stderr, e
            finally:
                self.window.update()
                runner.join(0)

    def _add_task(self, task):
        self.should_detect = True
        self.tasks.append(task)

    def do_add_target(self, filename):
        self._add_task(Task(self.add_target, (filename, )))

    def do_clear(self):
        self._add_task(Task(self.clear))

    def do_snapshot(self, filename):
        self._add_task(Task(self.window.snapshot, (filename, )))

    def do_switch_camera(self, cid):
        self._add_task(Task(self.camera.switch, (cid, )))
Ejemplo n.º 18
0
    def keyboard_event_handler(self):
        k = cv2.waitKey(1)
        if k == 27:
            self.quit = True
            self.ms.close()

        elif k == ord('t'):
            self.ip.toggle_threshold = not self.ip.toggle_threshold
        elif k == ord('g'):
            self.ip.toggle_grayscale = not self.ip.toggle_grayscale
        elif k == ord('g'):
            self.ip.toggle_grayscale = not self.ip.toggle_grayscale
        elif k == ord('f'):
            self.ip.toggle_face_detection = not self.ip.toggle_face_detection
        elif k == ord('z'):
            self.ip.threshold_limit -= self.ip.threshold_delta
        elif k == ord('u'):
            self.ip.threshold_limit += self.ip.threshold_delta

    def run(self):
        while not self.quit:
            self.ms.update_cam(0)
            self.ms.update_cam(1)
            self.keyboard_event_handler()


if __name__ == '__main__':
    lv = LiveViewer(Camera('/dev/video1'), Camera('/dev/video3'))
    lv.run()
Ejemplo n.º 19
0
remote_camera = False
initial_delay = 0
save = False
load = False
saved_directory = './saved'
body_address = ('192.168.1.4', 8089)
height = 120
width = 160
randomized_input = False

assert 2 * temp_buffer_length <= buffer_length, "Size of temp_buffer_length is too large!"

FLAG = {'time_point': 1., 'running': True, 'buffer_avg': 0., 'stream_avg': 0.}

# cmara setup
cam = Camera()

if remote_camera:
    # connect to body
    connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    connection.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
    connection.settimeout(100)
    connection.connect(body_address)

    # receive stream
    cam.receive(connection)
else:
    cam.setSize(width, height)
    cam.capture()

image_sample = cam.getFrame()
from lib.home_surveillance import HomeSurveillance

cpu = CPUTemperature()
cputemp = cpu.temperature

#################
# Instantiation #
#################
# Create an instance of the telegram bot
bot = Bot(token=TOKEN_ID)

# Create an instance of HomeSurveillance
surveillance = HomeSurveillance()

# Create an instance of the camera
camera = Camera(PiCamera(), REGISTRATION_FOLDER)

###########
# Utility #
###########


def restricted(func):
    """Restrict usage of func to allowed users only and replies if necessary"""
    @wraps(func)
    def wrapped(update, context, *args, **kwargs):
        chat_id = update.effective_chat.id
        if int(chat_id) != int(CHAT_ID):
            update.message.reply_text('Unauthorized access.')
            return None  # quit function
        return func(update, context, *args, **kwargs)
Ejemplo n.º 21
0
#!/usr/bin/env python3
#-*-coding:utf-8-*-

import pickle
import gzip
from lib.server import Server
from lib.camera import Camera, MultiStream
from lib.image_utils import resize, grayscale
import cv2


class CamServer(Server):
    def func(self, sreq):
        if sreq.strip() == '0':
            iar = grayscale(resize(s0.__next__(), (320, 240)))
            return gzip.compress(pickle.dumps(iar))
        elif sreq.strip() == '1':
            iar = grayscale(resize(s1.__next__(), (320, 240)))
            return gzip.compress(pickle.dumps(iar))
        else:
            return b""


s0 = Camera('/dev/video1').start_stream()
s1 = Camera('/dev/video3').start_stream()
cs = CamServer()
cs.start()
Ejemplo n.º 22
0
def main():
    '''
    メイン処理を行う部分
    '''
    global stop_thread_running
    global main_thread_running

    # I2C Bus number
    BUSNUM = 1

    # CAR設定
    HANDLE_NEUTRAL = 95  # ステアリングニュートラル位置
    HANDLE_ANGLE = 42  # 左右最大アングル
    # カメラ設定
    COLS = 160
    ROWS = 120
    FPS = 5
    # 通信設定
    HOST = '192.168.0.77'  # Server IP Address
    PORT = 6666  # Server TCP Port
    #HOST = '34.224.145.198' # AWS
    #PORT = 8091 # AWS
    #HOST = '192.168.0.17' # PC
    #PORT = 8091 # PC

    sock = None
    try:
        ########################################
        # CAR準備
        ########################################
        car = Car(busnum=BUSNUM)
        speed = 0
        ########################################
        # カメラ準備
        ########################################
        camera = Camera()
        camera.init_webcam(cols=COLS, rows=ROWS, fps=FPS, save=False)
        frame_count = 0
        ########################################
        # 通信準備
        ########################################
        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        server_address = (HOST, PORT)
        sock.connect(server_address)
        is_need_header_send = True

        ########################################
        # 画像サイズ、FPSをサーバに送る
        ########################################
        message = "CAMERA," + str(COLS) + "," + str(ROWS) + "," + str(FPS / 5)
        sock.sendall(message.encode('ascii'))

        ########################################
        # 処理開始
        ########################################
        while main_thread_running:
            start_time, clock_time = time.time(), time.clock()
            if not stop_thread_running: break  # 強制停止ならループを抜ける
            # カメラ画像を読み込む
            for i in range(5):
                if not camera.webcam_capture():
                    break
            frame_count += 1

            ########################################
            # 送信するデータを準備する
            ########################################
            bytes = cv2.imencode('.jpg', camera.cv_bgr)[1].tostring()
            size = len(bytes)

            ########################################
            # 最初にデータサイズを送り、返答を待つ
            ########################################
            if is_need_header_send:
                # send image size to server
                #print("send size = {}".format(size))
                sock.sendall(("SIZE %s" % size).encode('ascii'))
                answer = sock.recv(4096)
                answer = answer.decode('ascii')
                #print('answer = {}'.format(answer))

            ########################################
            # 画像データを送り、返答を待つ
            ########################################
            if answer == 'GOT SIZE':
                is_need_header_send = False
                sock.sendall(bytes)
                # 画像データ送信後、0.5秒したら前回制御命令で動作している車両を停止する
                time.sleep(0.5)
                car.stop()
                # check what server send
                answer = sock.recv(4096)
                answer = answer.decode('ascii')
                print('answer = {}'.format(answer))

            ########################################
            # 車両制御を行う
            ########################################
            if answer.startswith('CONTROL'):
                is_need_header_send = True
                _, speed, handle_angle = answer.split(',')
                speed = int(float(speed))
                handle_angle = int(float(handle_angle))
                print("CONTROL,speed={},handle_angle={}".format(
                    speed, handle_angle))

                # 動作可能な角度内に調整する
                if handle_angle > HANDLE_ANGLE:
                    handle_angle = HANDLE_ANGLE
                if handle_angle < -1 * HANDLE_ANGLE:
                    handle_angle = -1 * HANDLE_ANGLE

                if not stop_thread_running: break  # 強制停止ならループを抜ける

                ########################################
                # 車両を制御する
                ########################################
                car.set_angle(HANDLE_NEUTRAL + handle_angle)
                if speed == 0:
                    car.stop()
                else:
                    car.forward(speed)
            print("FPS:{} ".format(1 / (time.time() - start_time)))
            # サーボを高速動作させるとRPに電力遮断されてカメラが動作不能になるため、車両制御にsleepを入れる
            time.sleep(0.2)
    except:
        import traceback
        traceback.print_exc()
    finally:
        main_thread_running = False
        stop_thread_running = False
        pass
    if sock is not None:
        sock.close()
    car.stop()
    car.set_angle(HANDLE_NEUTRAL)
    return
Ejemplo n.º 23
0
class GLView(QtOpenGL.QGLWidget):
    _zoom_coeff = 0.05
    _scale_coeff = 1.2
    _scale_coeff1 = 1/_scale_coeff

    def __init__(self, parent=None):
        QtOpenGL.QGLWidget.__init__(self, parent)

        self.setAutoFillBackground(False)

        self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent, True)
        self.setAttribute(QtCore.Qt.WA_NoSystemBackground, True)

        self.setAttribute(QtCore.Qt.WA_StaticContents, True)
        self.setAttribute(QtCore.Qt.WA_PaintOnScreen, True)

        self.setMouseTracking(True)

        self._mousePos = QtCore.QPoint()
        self._zoomAnchor = QtCore.QPoint()
        self.zoomPos1 = QtCore.QPoint()
        self.zoomPos2 = QtCore.QPoint()

        self.camera = Camera()
        self.camera.identity()

        self.settings = {}
        self.settingsFilename = os.path.expanduser("~/%s.%s" % ("editor", "settings"))

        self.loadSettings()

        self.move(self.settings.get("x", 0), self.settings.get("y", 0))
        self.resize(self.settings.get("w", 450), self.settings.get("h", 700))


    def saveSettings(self):
        fout = open(self.settingsFilename, "w")
        pprint.pprint(self.settings, fout)
        fout.close()

    def loadSettings(self):
        try:
            fin = open(self.settingsFilename, "r")
            self.settings = eval(fin.read())
            fin.close()
        except:
            pass

    def moveEvent(self, event):
        self.settings["x"] = self.x()
        self.settings["y"] = self.y()
        QtOpenGL.QGLWidget.moveEvent(self, event)

    def resizeEvent(self, event):
        self.settings["w"] = self.width()
        self.settings["h"] = self.height()
        QtOpenGL.QGLWidget.resizeEvent(self, event)

    def closeEvent(self, event):
        self.saveSettings()
        QtOpenGL.QGLWidget.closeEvent(self, event)

    def glInit(self):
        QtOpenGL.QGLWidget.glInit(self)

    def initializeGL(self):
        glDisable(GL_DEPTH_TEST)
        glDisable(GL_LIGHTING)

        glMatrixMode(GL_PROJECTION)
        glLoadIdentity()
        glOrtho(0.0, self.width(), self.height(), 0.0, -1.0, 1.0)

        glMatrixMode(GL_MODELVIEW)
        glLoadIdentity()

    def convertNumpyToGLTexture(self, rgb): # rgb should be numpy array

        glEnable(texture_mode)

        tex_id = glGenTextures(1)

        glBindTexture(texture_mode, tex_id)

        glPixelStorei(GL_UNPACK_ALIGNMENT, 1)
        glTexImage2D(texture_mode, 0, GL_RGB, rgb.shape[0], rgb.shape[1], 0, GL_RGB, GL_UNSIGNED_BYTE, rgb)
        glTexParameterf(texture_mode, GL_TEXTURE_WRAP_S, GL_CLAMP)
        glTexParameterf(texture_mode, GL_TEXTURE_WRAP_T, GL_CLAMP)
        glTexParameterf(texture_mode, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
        glTexParameterf(texture_mode, GL_TEXTURE_MIN_FILTER, GL_NEAREST)

        glDisable(texture_mode)

        return tex_id

    def resizeGL(self, width, height):
        self.setUpdatesEnabled(False)

        glViewport(0, 0, self.rect().width(), self.rect().height())
        self.initializeGL()

        self.setUpdatesEnabled(True)

    def _beforeScale(self):
        self.zoomPos1 = self.camera.qt_to_opengl(self._zoomAnchor)

    def _afterScale(self):
        self.zoomPos2 = self.camera.qt_to_opengl(self._zoomAnchor)
        self.camera.translate((self.zoomPos2.x() - self.zoomPos1.x()) * self.camera.m[0],
                              (self.zoomPos2.y() - self.zoomPos1.y()) * self.camera.m[0])

    def wheelEvent(self, event):
        self._zoomAnchor = event.pos()

        self._beforeScale()

        # math.pow(self._scale_coeff, math.copysign(1, event.delta()))
        # => 3 times slower (as measured with timeit module)

        if event.delta() > 0:
            self.camera.scale(self._scale_coeff)
        else:
            self.camera.scale(self._scale_coeff1)

        self._afterScale()

    def mousePressEvent(self, event):
        QtOpenGL.QGLWidget.mousePressEvent(self, event)

        self._mousePos = event.pos()
        self._zoomAnchor = event.pos()

        #if event.buttons() & QtCore.Qt.LeftButton:
        #    self.playhead = event.x()

        self.update()

    def mouseReleaseEvent(self, event):
        QtOpenGL.QGLWidget.mouseReleaseEvent(self, event)

        self._mousePos = event.pos()

        self.update()

    def mouseMoveEvent(self, event):
        QtOpenGL.QGLWidget.mouseMoveEvent(self, event)

        dx = event.x() - self._mousePos.x()
        dy = event.y() - self._mousePos.y()

        self._mousePos = event.pos()

        if (event.buttons() & QtCore.Qt.MidButton) and (event.modifiers() & QtCore.Qt.AltModifier):
            self.camera.translate(dx, dy)

        elif (event.buttons() & QtCore.Qt.RightButton) and (event.modifiers() & QtCore.Qt.AltModifier):
            self._beforeScale()
            self.camera.scale(1.0 + math.copysign(self._zoom_coeff, dx))
            self._afterScale()

        self.update()
Ejemplo n.º 24
0
class World:
      """
      A container for all level objects (actors, fields)
      A time source
      """
      def __init__(self, Story):
          self.rsc = Resources()
          self.camera = Camera(self.rsc.graphics, (0, 100, 0, 50))

          self._timekeeper = TimeKeeper()
          self._timekeeper.schedule('draw', 1.0 / settings.target_fps)
          self._timekeeper.schedule('input', 1.0 / 100.0)
          self._timekeeper.schedule('update_game', 1.0 / 50.0, game = True)
          self._timekeeper.schedule('update', 1.0 / 50.0)
          self._timekeeper.set_game_speed(settings.game_speed)

          # world objects
          self.fields = {}
          for fieldtype in fieldtypes:
            field = fieldtype()
            self.fields[fieldtype] = field
          self.actors = []

          # initiate story
          self.story = Story(self)
          self.run()

      def get_time(self): return self._timekeeper.get_game_time()
      def pause(self): self._timekeeper.pause()
      def set_speed(self, val): self._timekeeper.set_game_speed(val)
      def get_speed(self): return self._timekeeper.get_game_speed()

      def run(self):
          story = self.story
          player = story.get_player()
          rsc = self.rsc

          # debug objects
          tm = debug.StatSet('World main loop timers')
          tm.add(debug.Timer, 
                 'update', 'update_actors', 'update_magic', 'update_fields',
                 'draw', 'draw_actors', 'draw_magic', 'draw_fields', 'events', 'calibrate')
          ct = debug.StatSet('World main loop counters')
          ct.add(debug.RateCounter, 'fps', 'update', 'input')
          debug_rl = debug.RateLimit(1.0, exp = 0)
          dd = debug.DrawDebug()
          stats = ""

          # input event handlers
          if player is not None:
            c_char = CharacterControl(self, player)
          else:
            c_char = None
          c_game = GameControl(self, player)

          while True:
            # exit condition
            if story.exit_now == True:
              return
            
            # wait for the next scheduler event
            tm.calibrate.start()
            sch_event = self._timekeeper.wait_for_event()
            tm.calibrate.end()

            if sch_event == "update_game":
              ## update
              tm.update.start()
              # actors moving
              tm.update_actors.start()
              if not self._timekeeper.paused():
                for actor in self.get_actors(exclude = [actors.MagicParticle]):
                  actor.update()
              tm.update_actors.end()
  
              # magic moving
              tm.update_magic.start()
              if not self._timekeeper.paused():
                for actor in self.get_actors(include = [actors.MagicParticle]):
                  actor.update()
              tm.update_magic.end()
  
              # storyline evolving
              story.update()
  
              # update fields
              tm.update_fields.start()
              for fieldtype in self.fields.keys():
                self.fields[fieldtype].update()
              tm.update_fields.end()
              tm.update.end()
              ct.update.count()

            elif sch_event == "update":
              # camera movements
              self.camera.update()

            elif sch_event == "draw":
              ## draw
              tm.draw.start()
              rsc.graphics.clear()
              # clear allocated debug message space
              dd.clear_allocations()
  
              total_actor_count = total_magic_count = draw_actor_count = draw_magic_count = 0
              # background changes slightly in color
              if self._timekeeper.paused():
                day = -1.0
                rsc.graphics.fill([16, 32, 96])
              else:
                day = math.sin(time.time()) + 1
                rsc.graphics.fill([day * 32, 32 + day * 32, 128 + day * 32])
  
              # draw actors
              self.sort_actors()
  
              tm.draw_actors.start()
              for actor in self.get_actors(exclude = [actors.MagicParticle]):
                total_actor_count += 1
                if actor.draw():
                  draw_actor_count += 1
                  if settings.debug and actor.debug_me:
                    dd.draw_msg(actor.debug_info(), *actor.get_xy())
              tm.draw_actors.end()
  
              # magic particles
              tm.draw_magic.start()
              for actor in self.get_actors(include = [actors.MagicParticle]):
                total_magic_count += 1
                if actor.draw():
                  draw_magic_count += 1
                  if settings.debug and actor.debug_me:
                    dd.draw_msg(actor.debug_info(), *actor.get_xy())
              tm.draw_magic.end()
  
              # draw fields
              tm.draw_fields.start()
              for field in self.all_fields():
                field.draw(self.camera)
              tm.draw_fields.end()
  
              # draw storyline elements
              story.draw()
  
              # draw performance stats
              if settings.debug:
                if debug_rl.check():
                  # dump stats to debug log
                  #tm.dump()
                  #ct.dump()

                  # do not update stats when paused
                  if not self._timekeeper.paused():
                    def p(t, *parts):
                        r = ()
                        for part in parts:
                          if t == 0:
                            r += (float(part), 0.0)
                          else:
                            r += (float(part), float(part) / float(t) * 100.0)
                        return r
                    stats = "FPS: %.1f UPDATE: %.1f EVENT: %.1f\n" % (ct.fps, ct.update, ct.input)
                    draw_left = tm.draw - tm.draw_actors - tm.draw_magic - tm.draw_fields
                    stats += "DRAW=%.3f" % (tm.draw)
                    stats += " (actors=%.3f/%u%% magic=%.3f/%u%% fields=%.3f/%u%% left=%.3f/%u%%)" % \
                             p(tm.draw, tm.draw_actors, tm.draw_magic, tm.draw_fields, draw_left)
                    stats += " actors=%u/%u balls=%u/%u\n" % \
                             (draw_actor_count, total_actor_count, draw_magic_count, total_magic_count)
                    update_left = tm.update - tm.update_actors - tm.update_magic - tm.update_fields
                    stats += "UPDATE=%.3f" % (tm.update)
                    stats += " (actors=%.3f/%u%% magic=%.3f/%u%% fields=%.3f/%u%% left=%.3f/%u%%) calibrate=%.3f" % \
                             (p(tm.update, tm.update_actors, tm.update_magic, tm.update_fields, update_left) + (tm.calibrate,))
                dd.draw_stats(stats)
                dd.draw_msg(debug.debugger.last_messages)

              # draw ball selector
              if c_char is not None and c_char.get_magic:
                i = 1
                for ball in c_char.local_balls:
                  ball_txt = rsc.fonts.textfont.render("%u: %s" % (i, ball.__class__.__name__), True, ball.field.color)
                  ball_nr  = rsc.fonts.textfont.render("%u" % (i), True, ball.field.color)
                  rsc.graphics.blit(ball_txt, (10, 40 + i * 20))
                  rsc.graphics.blit(ball_nr, (self.camera.pl2sc_x(ball.pos), self.camera.sc_h() - 80))
                  i += 1

              rsc.graphics.update()
              tm.draw.end()
              ct.fps.count()

            elif sch_event == "input":
              ## handle events
              tm.events.start()
              for event in pygame.event.get():
                if event.type == QUIT or event.type == KEYDOWN and event.key == K_ESCAPE:
                  return
                c_game.handle(event)
                if c_char is not None:
                  c_char.handle(event)
              tm.events.end()
              ct.input.count()

      ## actor management
      def new_actor(self, actor_class, pos):
          actor = actor_class(self, pos)
          self.actors.append(actor)
          return actor
      def del_actor(self, actor):
          self.actors.pop(self.actors.index(actor))
      def all_actors(self):
          return self.actors
      def get_actors(self, x1 = False, x2 = False, filter = False, include = False, exclude = False):
          """
          Get actors with position in range [x1 : x2] and matching filter
          """
          ret = []
          for actor in self.actors:
            if x1 and actor.pos < x1:
              continue
            if x2 and actor.pos > x2:
              continue
            if filter and not filter(actor):
              continue
            if include:
              decision = False
              for klass in include:
                if isinstance(actor, klass):
                  decision = True
                  break
              if not decision:
                continue
            if exclude:
              decision = True
              for klass in exclude:
                if isinstance(actor, klass):
                  decision = False
                  break
              if not decision:
                continue

            ret.append(actor)
          return ret
      def sort_actors(self):
          self.actors.sort(lambda x, y: cmp(x.stacking, y.stacking) or cmp(x.pos, y.pos))

      # field management
      def get_field(self, fieldtype):
          return self.fields[fieldtype]
      def all_fields(self):
          return self.fields.values()
Ejemplo n.º 25
0
auto_settings = settings.get_auto()

# Find closest allowed ISO
speed = auto_settings['iso']
speeds = [100, 200, 320, 400, 500, 640, 800]
iso = 800
minimum_diff = float('inf')
for value in speeds:
    diff = abs(speed - value)
    if diff <= minimum_diff:
        minimum_diff = diff
        iso = value
auto_settings['iso'] = iso

# Setup camera for capture
camera = Camera()
camera.set_settings(auto_settings)
photo_counter = 0

# Check for previous folders
for content in list_dir(frames_dir):
    if path_exists(frames_dir + '/' + content):
        folder_number = (int(content) + 1) * 1000
        if folder_number > photo_counter:
            photo_counter = folder_number

# Start main loop
while 1==1:
    # Set pins for next stepper motor
    for index in range(0, 4):
        pin = step_pins[index]
Ejemplo n.º 26
0
def startConsole(directory=None, camera_address=None):

    if directory is None:
        should_save = False
    else:
        should_save = True

        if not os.path.isdir(directory):
            os.makedirs(directory)

        dir_positive = os.path.join(directory, 'positives')
        if not os.path.isdir(dir_positive):
            os.makedirs(dir_positive)
        files_positive = glob.glob(os.path.join(dir_positive, '*.png'))
        if files_positive:
            files_positive = [
                os.path.split(file)[1] for file in files_positive
            ]
            files_positive = [
                int(os.path.splitext(file)[0]) for file in files_positive
            ]
            id_positive = sorted(files_positive)[-1] + 1
        else:
            id_positive = 0

        dir_negative = os.path.join(directory, 'negatives')
        if not os.path.isdir(dir_negative):
            os.makedirs(dir_negative)
        files_negative = glob.glob(os.path.join(dir_negative, '*.png'))
        if files_negative:
            files_negative = [
                os.path.split(file)[1] for file in files_negative
            ]
            files_negative = [
                int(os.path.splitext(file)[0]) for file in files_negative
            ]
            id_negative = sorted(files_negative)[-1] + 1
        else:
            id_negative = 0

        print("Starting positive id from %i and negative id from %i." %
              (id_positive, id_negative))

    cam = Camera()

    if camera_address is None:
        send_command = False
        cam.setSize(160, 120)
        cam.capture()

    else:
        send_command = True
        connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        connection.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        connection.settimeout(100)
        connection.connect(camera_address)
        # cam.receive(connection)  # TODO: refactore camera module

    pygame.init()

    fpsClock = pygame.time.Clock()

    # http://www.pygame.org/docs/ref/display.html#pygame.display.set_mode
    surface = pygame.display.set_mode((width, height), 0, 0)

    pygame.display.set_caption('Control Console')
    pygame.mouse.set_visible(1)

    background = pygame.Surface(surface.get_size())
    background.fill((20, 20, 20))

    # test=pygame.image.load("test.png")

    rect = pygame.Rect(0, 0, width, height)

    border_time = 0.
    forward = 0.
    turn = 0.
    keep_running = True
    while keep_running:

        # surface.blit(background,(0,0))
        # frame_received = cam.getFrame()
        # frame = cv2.resize(frame_received, (width, height))
        # frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
        # frame = np.rot90(frame)
        # frame = pygame.surfarray.make_surface(frame)
        # surface.blit(frame, (0, 0))

        reward = 0

        events = pygame.event.get()

        for event in events:
            if event.type == QUIT:
                keep_running = False
                print('Exiting!!!')

            if event.type == KEYDOWN:
                if (event.key == K_q):
                    print('q')

                if event.key == K_EQUALS:
                    border_time = time()
                    border_color = [0, 255, 0]
                    reward = 1

                    if should_save:
                        writeFrame(
                            frame_received,
                            os.path.join(dir_positive, '%i' % id_positive))
                        id_positive += 1
                elif event.key == K_MINUS:
                    border_time = time()
                    border_color = [255, 0, 0]
                    reward = -1

                    if should_save:
                        writeFrame(
                            frame_received,
                            os.path.join(dir_negative, '%i' % id_negative))
                        id_negative += 1

                if event.key == K_ESCAPE:
                    keep_running = False
                    print('Exiting!!!')

        if time() - border_time < .2:
            pygame.draw.rect(surface, border_color, rect, border)

        keys_pressed = pygame.key.get_pressed()

        if keys_pressed[K_RIGHT]:
            turn = runningMean(1, turn, agility)

        elif keys_pressed[K_LEFT]:
            turn = runningMean(-1, turn, agility)

        else:
            turn = runningMean(0, turn, agility)

        if keys_pressed[K_UP]:
            forward = runningMean(1, forward, agility)

        elif keys_pressed[K_DOWN]:
            forward = runningMean(-1, forward, agility)

        else:
            forward = runningMean(0, forward, agility)

        if abs(turn) < .1:
            _turn = 0.
        else:
            _turn = turn
        if abs(forward) < .1:
            _forward = 0.
        else:
            _forward = forward

        message = struct.pack(">cffc", "s", _forward, _turn, "e")
        # print(struct.unpack(">cffc",message))

        if send_command:
            connection.send(message)

        # if forward or turn or reward:
        print("Positives: %i | Negatives: %i" % (id_positive, id_negative))
        print("%+i | %+ 1.3f | %+ 1.3f" % (reward, forward, turn))
        stdout.write("\033[F\033[K")
        stdout.write("\033[F\033[K")

        pygame.display.flip()  # or pygame.display.update()
        fpsClock.tick(FPS)

    pygame.quit()
Ejemplo n.º 27
0
class Main:

    config_filename = 'config'
    cam = None
    parking_lot = None
    backend_notifier = None

    def __init__(self):

        # read config file
        config = configparser.ConfigParser()
        config.read(self.config_filename)
        refresh_in_seconds = 1

        # exit on unavailable config file
        if len(config) == 1:
            sys.exit("Config file not found or empty, please create ./%s" %
                     (self.config_filename))

        # exit if sections not present
        if 'default' not in config.sections():
            sys.exit("Default settings not found")

        refresh_in_seconds = int(config['default']['refresh_in_seconds'])
        # exit if sections not present
        if 'camera' not in config.sections():
            sys.exit("Camera settings not found")

        self.cam = Camera(config['camera'])

        # exit if sections not present
        if 'backend' not in config.sections():
            sys.exit("Backend settings not found")
        self.backend_notifier = BackendNotifier(config['backend'])

        # exit if sections not present
        if 'parking_lot' not in config.sections():
            sys.exit("Parking lot settings not found")

        self.parking_lot = ParkingLot(config['parking_lot'], self.cam)

        # write any changes to config
        with open(self.config_filename, 'w') as configfile:
            config.write(configfile)

        # Execute Main Loop
        self.execute(refresh_in_seconds)

    def execute(self, refresh_in_seconds):
        self.cam.start()
        frame_num = -1
        while (True):
            check, frame = self.cam.getNextFrame()
            frame_num += 1
            if (check and
                (frame_num %
                 (self.cam.fps * refresh_in_seconds) == 0 or frame_num == 0)):
                spots.find(frame, self.parking_lot)
                frame_small = cv2.resize(frame, (1280, 720))
                cv2.imshow('Free spots marked with green (press q to close)',
                           frame_small)
                if (frame_num % (self.cam.fps * 2) == 0):
                    self.backend_notifier.notifyAll(
                        self.parking_lot.parking_spots)
                    frame_num = 0
            if (cv2.waitKey(1) & 0xFF == ord('q')):
                break
        self.cam.stop()
Ejemplo n.º 28
0
class Game:
    def __init__(self):
        # Pygame launch
        pygame.display.set_caption("SANTARENA")

        # Screen (camera)
        self.camera = Camera()

        # Tilemap
        self.tilemap = Tilemap(MAP_H, MAP_W)

        # Characters
        self.character = Character()
        self.enemies = []

        # Clock
        self.clock = Clock()
        self.dt = 0

    def start(self):
        """
        Called once.
        """
        # Set the camera in the center of the map
        self.camera.posH = (self.tilemap.h - self.camera.h) / 2
        self.camera.posW = (self.tilemap.w - self.camera.w) / 2

        # Set the character sprite in the center of the screen
        self.character.posH = (self.camera.posH +
                               (self.camera.h - self.character.rectH) / 2)
        self.character.posW = (self.camera.posW +
                               (self.camera.w - self.character.rectW) / 2)

        # Spawn some enemies
        for _ in range(3):
            randH = randrange(400)
            randW = randrange(700)
            self.enemies.append(Enemy(randH, randW))

    def event(self):
        # Quit
        for event in pygame.event.get():
            if event.type == QUIT:
                pygame.quit()
                sys.exit()
            # Full screen toggle
            if event.type is pygame.KEYDOWN and event.key == pygame.K_f:
                if self.camera.screen.get_flags() & pygame.FULLSCREEN:
                    pygame.display.set_mode(self.camera.size)
                else:
                    pygame.display.set_mode((0, 0), pygame.FULLSCREEN)

        # Character
        self.character.event(
            pygame.key.get_pressed(),
            self.camera,
            self.enemies,
            self.tilemap.blocks,
            self.dt,
        )

        # Enemies
        for enemy in self.enemies:
            enemy.event(self.character, self.enemies, self.tilemap.blocks)

        # Camera
        self.camera.move(pygame.key.get_pressed(), self.character, self.dt)

    def update(self):
        # Map
        self.tilemap.update(self.camera)

        # Character & projectiles
        self.character.update(self.camera, self.tilemap.blocks, self.dt)

        # Enemies
        for enemy in self.enemies:
            enemy.update(self.character, self.enemies, self.camera, self.dt)

        # Tic tac
        self.dt = self.clock.tick(FPS)

    def draw(self):
        # Reset
        self.camera.screen.fill(COLORS["BLACK"])

        # Tilemap
        self.tilemap.draw(self.camera)

        # Enemies
        for enemy in self.enemies:
            enemy.draw(self.camera)

        # Character
        self.character.draw(self.camera)

        pygame.display.update()
Ejemplo n.º 29
0
class Grid(Character):

    def __init__(self):
        super().__init__()
        self.organisms = {}
        self.camera = Camera(cfg.SCENE_WIDTH, cfg.SCENE_HEIGHT)
        self.surface = pygame.Surface((cfg.SCENE_WIDTH, cfg.SCENE_HEIGHT))
        self.zoom = 1
        self.play = False
        self.clock = pygame.time.Clock()
        self.interval = 0
    
    def update(self, event):
        
        zw = int(cfg.ORG_WIDTH*self.zoom)
        zh = int(cfg.ORG_HEIGHT*self.zoom)
            
        if event.type == pygame.MOUSEMOTION:
            # Mouse position
            pos = event.pos
            # Mouse velocity
            vel = event.rel
            
            # Add organisms when drawing
            if event.buttons == (True, False, False):
                x = int((pos[0]/ zw + self.camera.x) )
                y = int((pos[1]/ zh + self.camera.y) )
                self.organisms[(x, y)] = Organism()
            # Move the viewport
            if event.buttons == (False, True, False):
                dx = -int(vel[0]) / zw
                dy = -int(vel[1]) / zh
                self.camera.move(dx, dy)
            # delete organisms
            if event.buttons == (False, False, True):
                x = int((pos[0]/ zw + self.camera.x) )
                y = int((pos[1]/ zh + self.camera.y) )
                if (x, y) in self.organisms.keys():
                    del(self.organisms[(x, y)])
        
        if event.type == pygame.MOUSEBUTTONUP:
            if event.button == 5:
                # zoom smaller
                self.zoom = self.zoom * .9
            if event.button == 4:
                # zoom larger
                self.zoom = self.zoom * 1.1
        
        if event.type == pygame.KEYUP:
            self.play = not self.play
        
        if self.play:
            self.interval = self.interval + self.clock.tick()
            if self.interval > 1000 / cfg.GRID_FPS:
                self.interval = 0
                self.generate()
        
        # Blit organisms that are in view
        self.surface.fill((255,255,255))
        visible = self.camera.focus(self.organisms, self.zoom)
        for (x, y) in visible:
            org = visible[(x, y)]
            #TODO: call these functions only once when necessary, not every iteration
            if not self.play:
                org.sprite.animation.pause()
            else:
                org.sprite.animation.unpause()
            org.update()
            # scale the sprite surface to the zoom factor
            zoomed = pygame.transform.scale(org.sprite.surface, (zw, zh))
            self.surface.blit(zoomed, (
            (x - self.camera.x) * zw,
            (y - self.camera.y) * zh
            ))
            
    def generate(self):
        new = {}
        for coord in self.organisms.keys():
            org = self.organisms[coord]
            neighbors = self.live_neighbors(coord)
            
            # determine if the organism should remain alive
            if neighbors <= 1:
                pass
            elif neighbors >= 4:
                pass
            else:
                new[coord] = org
                
            # get a list of nearby dead organisms
            dead = self.dead_neighbors(coord)
            for coord in dead:
                # resurrect it if it has 3 living neighbors
                neighbors = self.live_neighbors(coord)
                if neighbors == 3:
                    new[coord] = Organism()
                    
        self.organisms = new
    
    def dead_neighbors(self, pos):
        coords = []
        for i in range(-1,2):
            for z in range(-1,2):
                rpos = (pos[0]+i,pos[1]+z)
                if not rpos in self.organisms.keys():
                    coords.append(rpos)
        return coords
        
    def live_neighbors(self, pos):
        n = 0
        for i in range(-1,2):
            for z in range(-1,2):
                rpos = (pos[0]+i,pos[1]+z)
                if rpos in self.organisms.keys() and not rpos == pos:
                    n = n + 1
        return n
            
@app.route('/droneblocks/<path:path>', methods=['GET'])
def droneblocks(path):
    if not os.path.isfile(os.path.join(static_file_dir, path)):
        path = os.path.join(path, 'tello.html')

    return send_from_directory(static_file_dir, path)


if __name__ == "__main__":

    # Initialize the drone class
    drone = Drone(is_aruco_tracking_enabled=True)

    # Camera for stream, photo, video
    camera = Camera(drone)

    # Udp for sending commands
    udp = UDP()
    udp.start_listening()

    # Create the mission handler
    mission = Mission(udp, camera)

    # Handle Tello's state information
    telemetry = Telemetry(drone)
    telemetry.receive_telemetry()

    # Fire up the app
    app.run()
class _worker_context:

    def __init__(self, fps, duration, W, H, R, osm_cart, usgs_cart):

        self._id = mp.current_process().name
        self._fps = fps
        self._duration = duration
        self._W = W
        self._H = H
        self._R = R
        self._osm_cart = osm_cart
        self._usgs_cart = usgs_cart

        self._dist = 3.0 * self._R
        self._frames = self._duration * self._fps

        self._camera = Camera()
        self._camera.set_focal(50.0)
        self._camera.set_factor(30)
        self._camera.set_center(self._W / 2, self._H / 2)

        self._usgs_cart_2d = np.zeros((2, self._usgs_cart.shape[1]), dtype = 'f4')

    def render_frame(self, frame_index):

        angle = 2 * math.pi * frame_index / self._frames

        self._camera.set_position(self._dist * math.cos(angle), self._dist * math.sin(angle), 0.0)
        self._camera.set_direction(_rad(180.0) + angle, 0.0)
        get_points = self._camera.compiled_get_points()

        image = Image(self._W, self._H, background_color = (0.1, 0.1, 0.1))

        for line in self._osm_cart:
            image.draw_polygon(
                *[self._camera.get_point(*point) for point in line],
                line_color = (0.7, 0.7, 0.7),
                line_width = 0.3,
            )

        get_points(self._usgs_cart, self._usgs_cart_2d)
        for index in range(self._usgs_cart_2d.shape[1]):
            image.draw_filledcircle(
                *self._usgs_cart_2d[:, index], r = 1.0,
                fill_color = (1.0, 0.0, 0.0),
            )

        image.save(os.path.join('frames', f'frame_{frame_index:05d}.png'))
Ejemplo n.º 32
0
            jtalk.speak("遠くの%s" % text)
        else:
            jtalk.speak("%-3.2fメートル前方に%s" % (distance, text))
    except UnboundLocalError:
        print 'エラーが発生しました'


def on_publish(client, userdata, mid):
    print("publish: {0}".format(mid))


if __name__ == "__main__":
    client = mqtt.Client(protocol=mqtt.MQTTv311)
    client.on_connect = on_connect
    client.on_message = on_message
    client.on_publish = on_publish
    client.connect(host, port=port, keepalive=60)
    client.loop_start()

    accel = Acceralation()
    camera = Camera()
    while True:
        if accel.permit_snapshot():
            filepath = uuid.uuid4()
            filename = '%s.jpg' % filepath
            camera.snapshot(filename)
            file = open('images/' + filename, "rb").read()
            file_data = base64.b64encode(file)
            client.publish(pub_topic, file_data, 0)
            time.sleep(10.0)
Ejemplo n.º 33
0
class Scanner:
    """
    The Scanner class uses the camera to "scan" the state of the cube.
    This scanning process is very sensitive to lighting conditions.
    """
    def __init__(self, config):
        self.config = config

        calib_file = "../" + config['cam']['calibration']
        calib = {}
        with open(calib_file, 'r') as ymlfile:
            calib = yaml.load(ymlfile, Loader=yaml.FullLoader)

        self.camera = Camera(config, calib)

    def scan_state(self, motors):
        """
        Scan the current state of the cube, and return a map of <facename:array<facename>> to represent the cube state.
        Cube state is represented like:

                     |------------|
                     |*U0**U1**U2*|
                     |------------|
                     |*U3**U4**U5*|
                     |------------|
                     |*U6**U7**U8*|
        |------------|------------|------------|------------|
        |*L0**L1**L2*|*F0**F1**F2*|*R0**R1**R2*|*B0**B1**B2*|
        |------------|------------|------------|------------|
        |*L3**L4**L5*|*F3**F4**F5*|*R3**R4**R5*|*B3**B4**B5*|
        |------------|------------|------------|------------|
        |*L6**L7**L8*|*F6**F7**F8*|*R6**R7**R8*|*B6**B7**B8*|
        |------------|------------|------------|------------|
                     |*D0**D1**D2*|
                     |------------|
                     |*D3**D4**D5*|
                     |------------|
                     |*D6**D7**D8*|
                     |------------|

        The camera is always pointed at the edge <F2-R0-F5-R3-F8-R6>. We obtain the entire cube state by getting the
        colors of all facelets on that edge, then rotating the cube through a predetermined sequence, such that all
        edge facelets rotate through that edge.
        """

        state = dict(SOLVED_STATE)  # Initialize to fully solved.

        # pylint: disable=unbalanced-tuple-unpacking
        state['F'][2], state['R'][0], state['F'][5], state['R'][3], state['F'][
            8], state['R'][6] = self.camera.get_faces()

        motors.execute('F')  # rotate front face clockwise
        state['F'][0], state['U'][6], state['F'][1], state['U'][7], _, state[
            'U'][8] = self.camera.get_faces()

        motors.execute('F')
        state['F'][6], state['L'][8], state['F'][3], state['L'][5], _, state[
            'L'][2] = self.camera.get_faces()

        motors.execute('F')
        _, state['D'][2], state['F'][7], state['D'][1], _, state['D'][
            0] = self.camera.get_faces()

        # front map complete.

        motors.execute('F R')
        _, _, state['D'][5], state['R'][7], state['D'][8], state['R'][
            8] = self.camera.get_faces()

        motors.execute('R')
        state['B'][6], _, state['B'][3], state['R'][5], state['B'][0], state[
            'R'][2] = self.camera.get_faces()

        motors.execute('R')
        state['U'][2], _, state['U'][5], state['R'][
            1], _, _ = self.camera.get_faces()

        # right map complete

        motors.execute("R B' U R'")
        state['L'][6], state['B'][8], state['L'][3], state['B'][5], state['L'][
            0], state['B'][2] = self.camera.get_faces()

        motors.execute("R U' B L' F2")
        state['D'][6], _, state['D'][3], state['L'][
            7], _, _ = self.camera.get_faces()

        motors.execute("F2 L U R'")
        state['U'][0], _, state['U'][1], state['B'][1], state['U'][
            2], _ = self.camera.get_faces()

        # up map complete

        motors.execute("R U R'")
        _, _, _, state['L'][1], _, _ = self.camera.get_faces()

        # left map complete

        motors.execute("B R2")
        _, _, state['B'][7], state['D'][7], _, _ = self.camera.get_faces()

        # back and down maps complete

        motors.execute("R2 B' R U2")

        return state

    def get_state_string(self, motors, state=None):
        """
        Given a map of <face,array<facelet>>, representing the cube state, return a
        string representation, according to the order U0, U1, U2, U3, U4, U5, U6, U7, U8, R0, R1,
        R2, R3, R4, R5, R6, R7, R8, F0, F1, F2, F3, F4, F5, F6, F7, F8, D0, D1, D2, D3,
        D4, D5, D6, D7, D8, L0, L1, L2, L3, L4, L5, L6, L7, L8, B0, B1, B2, B3, B4, B5,
        B6, B7, B8

        Example state: "DLRUULBDFLFFDRBBRRDLUFFFBRDUDLRDFRDULBRLLUBULDBFRBUFBU":

                     |------------|
                     |*D **L **R *|
                     |------------|
                     |*U **U **L *|
                     |------------|
                     |*B **D **F *|
        |------------|------------|------------|------------|
        |*L **B **R *|*D **L **U *|*L **F **F *|*D **B **F *|
        |------------|------------|------------|------------|
        |*L **L **U *|*F **F **F *|*D **R **B *|*R **B **U *|
        |------------|------------|------------|------------|
        |*B **U **L *|*B **R **D *|*B **R **R *|*F **B **U *|
        |------------|------------|------------|------------|
                     |*U **D **L *|
                     |------------|
                     |*R **D **F *|
                     |------------|
                     |*R **D **U *|
                     |------------|
        """
        if state is None:
            state = self.scan_state(motors)

        retval = "".join(state['U']) \
            + "".join(state['R']) \
            + "".join(state['F']) \
            + "".join(state['D']) \
            + "".join(state['L']) \
            + "".join(state['B'])

        # fast (imperfect) test for a valid cube state.
        if len(retval) != 54:  # Is the string exactly 54 characters?
            logging.error("Invalid Cube State! (not enough faces!)")
            retval = ""

        if (len(state['U']) != 9 or len(state['R']) != 9
                or len(state['F']) != 9 or len(state['D']) != 9
                or len(state['L']) != 9 or len(state['B']) != 9):
            # Is each face exactly 9 facelets?
            logging.error(
                'Invalid Cube State! (inconsistent colors: U={:d}, R={:d}, F={:d}, D={:d}, L={:d}, B={:d})'
                .format(len(state['U']), len(state['R']), len(state['F']),
                        len(state['D']), len(state['L']), len(state['B'])))
            retval = ""

        if state['U'][4] != 'U' or state['R'][4] != 'R' or state['F'][
                4] != 'F' or state['D'][4] != 'D' or state['L'][
                    4] != 'L' or state['B'][4] != 'B':
            # Is the center facelet of each face the same as the face itself? (since center squares can't move)
            logging.error(
                'Invalid Cube State! (Center facelets are not correct)')
            retval = ""

        return retval
def start_recognize():
    shape = 'rectangle'
    try:
        faces = [employee for employee in os.listdir(DATABASE_DIR)]
    except Exception as ex:
        print(
            json.dumps({
                "message":
                "Have you added at least one person to the system?" + str(ex),
                "status":
                "isEmpty"
            }))
        sys.exit()
    print(
        json.dumps(
            {"message": "This are the people in the Recognition System:"}))
    for employee in faces:
        print(json.dumps({"-": employee}))

    choice = 3

    detector = FaceDetector(FACE_CASCADES)
    if choice == 1:
        recognizer = cv2.face.EigenFaceRecognizer_create()
        threshold = 4000
    elif choice == 2:
        recognizer = cv2.face.FisherFaceRecognizer_create()
        threshold = 300
    elif choice == 3:
        recognizer = cv2.face.LBPHFaceRecognizer_create()
        # threshold = 105
        threshold = 80
    images = []
    labels = []
    state = []
    state.append(0)  # default
    labels_faces = {}
    for i, image_person in enumerate(faces):
        labels_faces[i] = image_person
        for image in os.listdir(DATABASE_DIR + image_person):
            images.append(
                cv2.imread(DATABASE_DIR + image_person + '/' + image, 0))
            labels.append(i)
    try:
        recognizer.train(images, np.array(labels))
    except:
        print(
            json.dumps({
                "message": "Do you have at least two people in the database",
                "status": "isEmpty"
            }))
        sys.exit()
    video = Camera()
    while True:
        stroke = 1
        color = (136, 150, 0)
        cv2.namedWindow('Frame', cv2.WND_PROP_FULLSCREEN)
        cv2.setWindowProperty('Frame', cv2.WND_PROP_FULLSCREEN,
                              cv2.WINDOW_FULLSCREEN)
        frame = video.get_frame()
        # frame = video.ip_camera(True)
        faces_coord = detector.detect(frame, False)
        if len(faces_coord):
            frame, faces_img = get_images(frame, faces_coord, shape)
            for i, face_img in enumerate(faces_img):
                pred, conf = recognizer.predict(face_img)
                print(
                    json.dumps({
                        'prediction': pred,
                        'Confidence': conf,
                        'Threshold': threshold,
                    }))
                if conf < threshold:
                    # Fetch data from face_entity the data from database
                    # to filter the first_name using uid
                    data = face_entity.get_face_profile(
                        labels_faces[pred].capitalize())
                    print(json.dumps({'code': str(data)}))

                    cv2.putText(frame,
                                data['firstname'] + ' | ' + str(round(conf)),
                                (faces_coord[i][0], faces_coord[i][1] - 2),
                                cv2.FONT_HERSHEY_PLAIN, 1.7, color, stroke,
                                cv2.LINE_AA)
                    # Push Available state
                    if state[0] == 0:
                        state.append(labels_faces[pred].capitalize())
                    if state[0] != str(labels_faces[pred].capitalize()):
                        if sys.argv[1] == "1":
                            face_post_module.login_employee_by_id(
                                data['employee_id'])
                            print(
                                json.dumps({
                                    'status': 'IN',
                                    'id': str(data['employee_id'])
                                }))
                        elif sys.argv[1] == "0":
                            face_post_module.logout_employee_by_id(
                                data['employee_id'])
                            print(
                                json.dumps({
                                    'status': 'OUT',
                                    'id': str(data['employee_id'])
                                }))

                        # Pop available state if they are not the same
                        state.pop(0)

                else:
                    cv2.putText(frame, "Unknown",
                                (faces_coord[i][0], faces_coord[i][1]),
                                cv2.FONT_HERSHEY_PLAIN, 1.7, color, stroke,
                                cv2.LINE_AA)

        print(json.dumps({"test": sys.argv[1]}))

        if sys.argv[1] == "1":
            if state[0] != 0:
                data = face_entity.get_face_profile(12345678)
                cv2.putText(
                    frame,
                    str(data['firstname'] + ' ' + data['lastname']).upper(),
                    (110, frame.shape[0] - 10), cv2.FONT_HERSHEY_PLAIN, 3,
                    color, 3, cv2.LINE_AA)
        else:
            if state[0] != 0:
                data = face_entity.get_face_profile(12345678)
                cv2.putText(
                    frame,
                    str(data['firstname'] + ' ' + data['lastname']).upper(),
                    (110, frame.shape[0] - 10), cv2.FONT_HERSHEY_PLAIN, 3,
                    color, 3, cv2.LINE_AA)
        cv2.imshow('Frame', frame)

        if cv2.waitKey(100) & 0xFF == 27:
            sys.exit()
Ejemplo n.º 35
0
def hyperloop():

    logger = Logger()
    logger.setLogLevel('info')
    logger.info('Started')

    state = State()
    for p in sys.argv:
        if (p == "--standalone" or p == "-s"):
            state.Standalone = True
            logger.info("Standalone mode activated")
        elif (p == "--nocamera" or p == "-n"):
            state.NoImageTransfer = True
            logger.info("Camera image transfer X11 disabled")
        elif (p == "--record" or p == "-r"):
            state.RecordImage = True
            logger.info("Record mode activated")
        elif (p == "--measure" or p == "-m"):
            state.MeasureMode = True
            logger.info("Measure mode activated")
        elif (p == "--invert" or p == "-i"):
            state.InvertCamera = True
            logger.info("Inverted camera activated")

    vs = PiVideoStream(resolution=(480, 368), framerate=32).start()
    piCamera = vs.camera
    piCamera.exposure_mode = 'sports'
    piCamera.ISO = 1600
    camera = Camera(vs, not state.NoImageTransfer)

    # camera warmup
    camera.warmup()

    communication = Communication(logger)
    acceleration = Acceleration(logger)

    # reads any input incoming over UART / i2c / GPIO
    communication.readThreadStart()
    # measure acceleration
    acceleration.measureThreadStart()

    fps = FPS().start()

    # program loop
    while True:
        try:
            if ((not state.Stopped and state.Loaded) or state.Standalone):

                # if (state.StopSignalNum == 0 or (state.Approaching and not state.StopSignalNum == 0) or state.Standalone):
                # capture image from videostream and analyze
                camera.capture()
                fps.update()

                if (state.StopSignalNum == 0 and state.LapSignalCount < 2 and not state.Approaching == Signal.UPPER):
                    communication.sendSpeedPercent(25)
                    state.Approaching = Signal.UPPER
                    logger.info("Approaching upper signal")
                # if we found our stop signal, announce it
                elif (state.StopSignalNum != 0 and not state.StopSignalAnnounced):
                    communication.sendSpeedPercent(100)
                    communication.buzzSignalNumber(state.StopSignalNum)
                    state.setStopSignalAnnounced(True)
                    state.Approaching = Signal.LAP
                    logger.info("Approaching lap signal")
                # if we passed the lap signal twice, deccelerate to X percent
                elif (state.StopSignalAnnounced and state.LapSignalCount >= 2 and not state.Approaching == Signal.LOWER):
                    communication.sendSpeedPercent(25)
                    state.Approaching = Signal.LOWER
                    logger.info("Approaching lower signal")
                elif (state.StopSignalAnnounced and state.StopSignalNum == state.CurrentNum and not state.ApproachStop):
                    communication.sendApproachStop()
                    communication.sendSpeedPercent(25)
                    state.ApproachStop = True
                    logger.info("Approaching stop")

            if (cv2.waitKey(1) & 0xFF) == ord('q'):
                break

        except KeyboardInterrupt:
            break
        except Exception as e:
            logger.logError(e)
            traceback.print_exc(limit=3, file=sys.stdout)
    fps.stop()

    communication.sendStartStop('stop')
    time.sleep(1)
    logger.info('FPS: ' + str(fps.fps()))
    logger.info('Aborting running threads')
    communication.readThreadStop()
    acceleration.measureThreadStop()
    logger.info('Stopped')
Ejemplo n.º 36
0
#!/usr/bin/env python
"""
Home surveillance application
"""
import time

from lib.camera import Camera
from lib.config import TOKEN_ID, REGISTRATION_FOLDER, VIDEO_TIME
from lib.telebot import Telebot
from lib.pir import MotionDetector

camera = Camera(REGISTRATION_FOLDER)
bot = Telebot(TOKEN_ID)
pir = MotionDetector()


@bot.handler("/start")
def on_start():
    """
    command /start: start bot
    """
    bot.is_listen = True
    return bot.send_message("Bot start")


@bot.handler("/stop")
def on_stop():
    """
    command /stop: stop bot
    """
    bot.is_listen = False
Ejemplo n.º 37
0
class Main:
    def __init__(self):
        self.tasks = []

        self.camera = Camera()
        self.window = Window('Camera')

        self.should_detect = True
        self.trackers = []

        self.color_filter = ColorFilter()

    def add_target(self, filename):
        self.trackers.append(Tracker(filename))

    def clear(self):
        for tracker in self.trackers:
            tracker.clear()

    def loop(self):
        runner = CmdRunner(self)
        runner.start()
        
        while runner.isAlive():
            while len(self.tasks) > 0:
                task = self.tasks.pop(0)
                task.execute()

            try:
                img = self.camera.get_frame()

                if img is None:
                    self.window.clear()
                    continue

                img = cv2.resize(img, (640, 480))

                # TODO: show color mask
                color_mask = self.color_filter.filter(img)

                next_should_detect = self.should_detect
                for tracker in self.trackers:
                    if self.should_detect:
                        rect = tracker.detect(img)

                        if rect is not None:
                            ret = tracker.update_window(img, rect)
                            self.window.draw_rectangle(rect, (255, 0, 255))
                        next_should_detect = False

                    rect = tracker.track(img)

                    if rect is None:
                        next_should_detect = True
                        continue

                    self.window.draw_rectangle(rect, (255, 0, 0))

                self.should_detect = next_should_detect
            except Exception as e:
                print >> sys.stderr, e
            finally:
                self.window.update()
                runner.join(0)

    def _add_task(self, task):
        self.should_detect = True
        self.tasks.append(task)

    def do_add_target(self, filename):
        self._add_task(Task(self.add_target, (filename, )))

    def do_clear(self):
        self._add_task(Task(self.clear))

    def do_snapshot(self, filename):
        self._add_task(Task(self.window.snapshot, (filename, )))

    def do_switch_camera(self, cid):
        self._add_task(Task(self.camera.switch, (cid, )))
 def setUpClass(cls):
     """
     Initialize camera
     """
     cls.camera = Camera(REGISTRATION_FOLDER)
import cv2
import glob
import matplotlib.pyplot as plt
from lib.camera import Camera
from lib.line_finder import LineFinder
import os
from moviepy.editor import VideoFileClip

# sample_camera controls whether we should recalibrate the camera from the sample pictures or just used the saved
# matrix from previous execution
sample_camera = False
camera = Camera()

if sample_camera:
    # Make a list of calibration images
    images = glob.glob('./camera_cal/calibration*.jpg')

    fig, axs = plt.subplots(5, 4, figsize=(16, 11))
    axs = axs.ravel()

    # Go through the images one by one
    for i, fname in enumerate(images):
        img = cv2.imread(fname)
        img = camera.sample_image(img)

        axs[i].axis('off')
        axs[i].imshow(img)

    plt.show()

    camera.calibrate()