def __init__(self, lines): self.camera = None self.settings = None self.materials = [] self.planes = [] self.spheres = [] self.boxes = [] self.lights = [] for line in lines: if line[0] != '#' and not line.isspace(): split_line = line.split() if split_line[0] == 'cam': self.camera = Camera(split_line[1:]) elif split_line[0] == 'set': self.settings = Settings(split_line[1:]) elif split_line[0] == 'mtl': material = Material(split_line[1:], len(self.materials) + 1) self.materials.append(material) elif split_line[0] == 'pln': self.planes.append(Plane(split_line[1:], self.materials)) elif split_line[0] == 'sph': self.spheres.append(Sphere(split_line[1:], self.materials)) elif split_line[0] == 'box': self.boxes.append(Box(split_line[1:], self.materials)) elif split_line[0] == 'lgt': self.lights.append(Light(split_line[1:])) self.objects = self.spheres + self.planes + self.boxes
def start_md(): '''Manage Motion Detector.''' conf = json.load(open("config.json")) print("[INFO]Starting Camera...") vs = Camera(src=0).start() print("[INFO]Warming Up...") time.sleep(conf["camera_warmup"]) print("[INFO]Initializing motion detectors...") camMotion = MotionDetector(conf["min_area"], conf["delta_thresh"]) while True: frame = vs.read() frame = np.array(frame) frame = imutils.resize(frame, width=500) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) gray = cv2.GaussianBlur(gray, (21, 21), 0) locs = camMotion.update(gray) if len(locs) > 0: (minX, minY) = (np.inf, np.inf) (maxX, maxY) = (-np.inf, -np.inf) for l in locs: (x, y, w, h) = cv2.boundingRect(l) (minX, maxX) = (min(minX, x), max(maxX, x + w)) (minY, maxY) = (min(minY, y), max(maxY, y + h)) cv2.rectangle(frame, (minX, minY), (maxX, maxY), (0, 0, 255), 3) timestamp = datetime.now() ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p") cv2.putText(frame, "Perimeter Satus: {}".format(camMotion.status), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) cv2.putText(frame, ts, (10, np.shape(frame)[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1) if camMotion.status == "Breached": t = Thread(target=alert, args=( conf, frame, )) t.start() if conf["is_streaming"] == True: cv2.imshow("Security Feed", frame) key = cv2.waitKey(1) & 0xFF if key == ord("q"): print("[INFO]Exitting...") break cv2.destroyAllWindows() vs.stop()
def request_img(): camera = Camera() controller = Controller() D1 = DD(27) D2 = DD(4) print("request_img") while (1): if (camera.test() > 0.2 and D2 == False and D1 == True): controller.Forward()
def __init__(self, width=1280, height=700): self.win_w = width self.win_h = height self.asteroids = deque() self.light = Light([0, -3, 3]) self.skybox = Skybox(self.win_w, self.win_h) self.center = Sphere(0.1, [1.0, 0.0, 1.0], [1, 1, 1]) self.paused = False self.score = 0 self.camera = Camera() self.isRoaming = False
def main(): if not args.game: argparser.print_help() print print "Available games:" list_games() exit(0) inipath = profiledir + args.game + ".ini" if not isfile(inipath): print "File %s not found." % inipath exit(2) gameconfig = ConfigParser.ConfigParser() gameconfig.read(inipath) print "Starting with game profile '%s'... Press Ctrl+C to stop" % args.game camera = Camera(gameconfig) xml_event_stream = get_xml_event_stream() for event in parser.make_gaze_event_stream(xml_event_stream): camera.tick(event)
def stream_v2(): stream_mime = 'multipart/x-mixed-replace; boundary=frame' camera = Camera() camera.start() return Response(gen(camera), mimetype=stream_mime)
def video_feed(): """Video streaming route. Put this in the src attribute of an img tag.""" return Response(gen(Camera()), mimetype='multipart/x-mixed-replace; boundary=frame')
# Configuration Parameters for Camera rpiCam = True cameraNum = 0 width = 800 height = 480 fps = 30 # Configuration Parameters for Video # Video File Path: VidPath = "./app/data/videos/example_01.mp4" # Initialization Stage of the program: if cam_feed: # Initialize the setup for Camera. # Import only in case of Camera to avoid conflicts on other machines. from modules.camera import Camera feed = Camera(rpiCam, cameraNum, width, height, fps) # Camera parameters are hard coded at this stage. else: # Initialize setup for video feed = cv2.VideoCapture(VidPath) # Sanity check and Initialization of "snapshots" folder in the data folder if not os.path.isdir("./app/data/snapshots"): os.mkdir("./app/data/snapshots") # First Program Cycle Declarations/Initializations: #Frame Number imgno = 1 #Initializing the motion detector module md = MotionDetector() #Configuring the motion detector
parser.add_argument("-r", "--resolution", type=int, default=720, help='Resolution (width) of the webcam') parser.add_argument("-d", "--show_fps", dest='show_fps', action='store_true', help='Whether to show FPS or not') parser.add_argument("-a", "--assets_path", type=str, default=assets_path, help='Path of the assets') args = parser.parse_args() camera = Camera(assets_path=args.assets_path, cam_number=args.cam_number, resolution=args.resolution, show_fps=args.show_fps, n_frames=args.n_frames, seconds_to_be_recorded=args.predictions_delta) asl_recognizer = ASLRecognizer(camera=camera, assets_path=assets_path, predictions_delta=args.predictions_delta) camera.start() asl_recognizer.start() print(f"Press ESC to quit")
from modules.vector import Point from modules.color import Color from modules.blackhole import BlackHole from modules.disk import Disk from modules.camera import Camera from modules.scene import Scene from modules.engine import Engine c_origin = Point(0, 0.7, -9.0) c_focus = Point(0, 0, 0.0) bh = BlackHole(c_focus, 80) # You can specify a texture file for the accretion disk with `texture='filename.png'` or a color by `color=Color('#ffffff') (default)` disk = Disk(c_focus, 4.5*bh.radius, 16.2*bh.radius) scene = Scene( width = 500, height = 250, camera = Camera(c_origin, c_focus-c_origin, 1.2), blackhole = bh, disk = disk ) engine = Engine(scene) engine.render() engine.save('images/blackhole.png')
GPIO.setmode(GPIO.BCM) GPIO.setwarnings(False) CFG = read_conf("global") stepper = Stepper(CFG['PORTE_STEPPER_1'], CFG['PORTE_STEPPER_2'], CFG['PORTE_STEPPER_3'], CFG['PORTE_STEPPER_4']) porte = Porte(stepper, CFG['PORTE_PIN_BAS'], CFG['PORTE_PIN_HAUT']) abreuvoir = Abreuvoir(CFG['ABREUVOIR_PIN_VIDE'], CFG['ABREUVOIR_PIN_MEDIUM'], CFG['ABREUVOIR_PIN_PLEIN']) batterie = Batterie() luminosite = Luminosite() temp_ext = Temperature(CFG['TEMP_ID_EXT']) temp_int = Temperature(CFG['TEMP_ID_INT']) camera = Camera(CFG['CAMERA_WIDTH'], CFG['CAMERA_HEIGHT']) camera_usb = Camera_usb(CFG['CAMERA_WIDTH'], CFG['CAMERA_HEIGHT']) # page par defaut, redirection vers la page principale @app.route('/') def index(): return redirect(url_for('board')) # page principale @app.route('/board') def board(): return render_template('board.html')
# Get a list of all folders in the /pi/home/Music folder and set player to the first playList = musicFolder.getPlayList(0) if shuffle: playList.shuffle() indexMediaFolder = 0 player.switchMedia(playList) player.togglePlayPause() # Set up camera object camera = Camera(player, logger) # Subcribe to state received event so that when the Arduino updates us with state we update controls camera.events.onQRDataReceived += qrDataReceived camera.start() player.playFile(MediaFile(os.path.join(os.getcwd(), 'assets', 'audio', 'player-ready.ogg'))) quit = False while quit == False: # Work out position and duration and loop to start if at end pos = player.position() dur = player.duration()
def main(): pygame.init() pygame.key.set_repeat(1, 25) pygame.display.set_caption('ColdLine Manhattan') start_screen(screen, HEIGHT) start_time = pygame.time.get_ticks() running = True t = pygame.time.get_ticks() player = generate_level(level_map, player_image, enemy_image) for enemy in enemies_group: enemy.player = player camera = Camera() pygame.mixer.music.load(music['song2']) pygame.mixer.music.queue(music['song1']) pygame.mixer.music.set_volume(VOLUME + 0.05) pygame.mixer.music.play(-1) while running: # Делает курсор прицелом try: pygame.mouse.set_cursor(pygame.cursors.broken_x) except: # pygame.mouse.set_cursor(pygame.cursors.arrow) pass # wasd_arrows_keys = [pygame.K_w, pygame.K_a, pygame.K_s, pygame.K_d, # pygame.K_LEFT, pygame.K_RIGHT, pygame.K_DOWN, pygame.K_UP] # keys = pygame.key.get_pressed() for event in pygame.event.get(): if event.type == pygame.QUIT: running = False terminate() if event.type == pygame.KEYDOWN: if event.key == pygame.K_ESCAPE: running = False terminate() if event.type == pygame.KEYUP: player.state = player.IDLE player.move() for enemy in enemies_group: enemy.move() if enemy.state == enemy.WALKING: now = pygame.time.get_ticks() if now - enemy.time > 250: enemy.time = now sound = random.choice(sounds['footsteps']) sound.set_volume(VOLUME) sound.play() if player.state == player.WALKING: now = pygame.time.get_ticks() if now - t > 250: t = now sound = random.choice(sounds['footsteps']) sound.set_volume(VOLUME) sound.play() screen.fill(pygame.Color(82, 0, 89)) # Сдвиг камеры camera.update(player) for sprite in all_sprites: if isinstance(sprite, Player): player.hitbox.x += camera.dx player.hitbox.y += camera.dy elif isinstance(sprite, Enemy): sprite.hitbox.x += camera.dx sprite.hitbox.y += camera.dy else: camera.apply(sprite) camera.apply_rect(level_map_rect) screen.blit(level_map_img, level_map_rect) hits = pygame.sprite.groupcollide(enemies_group, player_bullets_group, False, True) for hit in hits: hit.kill() hits = pygame.sprite.groupcollide(player_group, enemies_bullets_group, False, True) if hits: running = False game_over_screen(screen, WIDTH, HEIGHT) break if len(enemies_group) == 0: running = False finish_time = pygame.time.get_ticks() all_time = finish_time - start_time win_screen(screen, WIDTH, HEIGHT, all_time) # Отрисовка спрайтов player_group.draw(screen) enemies_group.draw(screen) player_bullets_group.draw(screen) enemies_bullets_group.draw(screen) # Обновление спрайтов player.update(pygame.mouse.get_pos()) enemies_group.update(player.rect.center) player_bullets_group.update() enemies_bullets_group.update() obstacles_group.update() # Фпс в углу экрана font = pygame.font.Font(None, 30) fps = font.render(f'FPS: {int(clock.get_fps())}', 1, pygame.Color('red')) fps_rect = fps.get_rect() screen.blit(fps, fps_rect) # Время font = pygame.font.Font(None, 75) now = pygame.time.get_ticks() time = font.render(f'Time: {round((now - start_time) / 1000, 1)}s', 1, pygame.Color('white')) time_rect = time.get_rect() time_rect = time_rect.move(WIDTH - time_rect.width - 20, 10) time_shadow = font.render( f'Time: {round((now - start_time) / 1000, 1)}s', 1, pygame.Color('black')) time_shadow_rect = time_shadow.get_rect() time_shadow_rect = time_shadow_rect.move(time_rect.x + 2, time_rect.y + 2) screen.blit(time_shadow, time_shadow_rect) screen.blit(time, time_rect) pygame.display.flip() clock.tick(FPS)
if len(sys.argv) != 2 or not (sys.argv[1] != 'text' or sys.argv[1] != 'speech'): print('Usage: python3 {} speech/text'.format(sys.argv[0])) exit(-1) import RPi.GPIO as gpio from flask import Flask from flask import jsonify from modules.camera import Camera from modules.controller import Controller from rfid_thread import rfid_thread mode = sys.argv[1] SUBSCRIPTION_KEY = 'PUT SUBSCRIPTION KEY HERE' if mode == 'text': camera = Camera(SUBSCRIPTION_KEY) app = Flask(__name__) controller = Controller(23, 18) # motor_pin, servo_pin @app.route('/') def root(): return 'woooow' @app.route('/forward') def forward(): controller.forward() return jsonify(success=True)
from modules.camera import Camera from screens.mainscreen import MainScreen from ui.ui import UserInterface def read_config(filepath): # read configuration file config = {} with codecs.open('config.yml', 'r', encoding='utf8') as f: yml_dict = yaml.safe_load(f) for k in yml_dict: config[k] = yml_dict[k] return config if __name__ == "__main__": config = read_config('config.yml') cam = Camera(rpiCam=config['rpi_camera'], cameraNum=config['camera_number'], width=config['camera_width'], height=config['camera_height'], fps=config['camera_fps']) screen = MainScreen(config, cam) ui = UserInterface(screen, (800, 480), True, 60, True) while True: image = cam.grabFrame() if (image != None): screen.setImage(image) ui.tick() cam.cleanup()
import os, sys, time from datetime import datetime import threading from modules.camera import Camera from modules.body import Body from modules.config import Config from modules.sensors import Sensors body = Body() camera = Camera() config = Config() sensors = Sensors() sensors.daemon = True sensors.start() try: while True: time.sleep(0.1) Config.read_config(config) # Keep reading config for changes Body.move(body, Config.retrieve(config, 'Body', 'direction'), Sensors.retrieve(sensors), config) Camera.move(camera, Config.retrieve(config, 'Head', 'x'), Config.retrieve(config, 'Head', 'y')) except KeyboardInterrupt: Sensors.__exit__(sensors) Body.__exit__(body) Camera.__exit__(camera)
from modules.servo import ServoMotor from modules.mic import Mic from modules.camera import Camera from modules.vector import vector_to_value, convert_to_servo_value import random, time import numpy as np if __name__ == "__main__": # Camera path = "./modules/data/shape_predictor_68_face_landmarks.dat" cam0 = Camera(0, path) cam1 = Camera(2, path) cams = [cam0, cam1] # Mic mic0 = Mic(0) mic1 = Mic(2) mics = [mic0, mic1] sound_level = 0 level_count = 0 level_count_limit = 10 # Servo servoMotors = [] servoMotors.append(ServoMotor(Channel=3, ZeroOffset=0)) servoMotors.append(ServoMotor(Channel=0, ZeroOffset=0)) servoMotors.append(ServoMotor(Channel=1, ZeroOffset=0)) servoMotors.append(ServoMotor(Channel=2, ZeroOffset=0)) # intialize servo angle for i, servoMotor in enumerate(servoMotors):