def get_snap(cam_number): global APP_IP global APP_PORT cam = Cam("mecCam") colorSpace = request.args.get('colorSpace', default="RGB", type=str) colorSpace = colorSpace.upper() if colorSpace not in cam.supported_colorspace: return render_template( 'errors_basic.html', code=400, error="colorSpace {} is not supported".format(colorSpace), ip=APP_IP, port=APP_PORT) status, retStr = cam.take_snap(cam_number, colorSpace) if status == "SUCCESS": image_filename = retStr attachment_name = request.args.get('imageName', default=image_filename, type=str) if ".jpg" not in attachment_name[-4:]: error_str = "Only .jpg image can be expected." error_code = 400 response = make_response( render_template('errors_basic.html', error=error_str, ip=APP_IP, port=APP_PORT), error_code) else: response = make_response( send_file(image_filename, as_attachment=True, attachment_filename=attachment_name), 200) else: if "Cannot identify" in retStr: error_str = "Camera is not on PI port {}.".format(cam_number) error_code = 512 elif "PERMISSION DENIED" in retStr.upper(): error_str = "Enable Camera in PI conig." error_code = 512 elif "SystemError" in retStr: error_str = "dev id is wrong." error_code = 512 else: error_str = retStr error_code = 500 rendered = render_template('errors_basic.html', error=error_str, ip=APP_IP, port=APP_PORT) response = make_response(rendered, error_code) return response
def create_list_of_cam_objects(thelist, resolution): result = [] thelist2 = [item[1] for item in thelist] for index, item in enumerate(thelist2): # cam = item + "/axis-cgi/mjpg/video.cgi?resolution=1280x720&compression=25&camera=1" # cam = Cam(item, get_full_urls(item), 1, 1) cam = Cam(item, get_full_urls(item, resolution), (index + 1), 1) result.append(cam) # cam = thelist[1] + "/videostream.cgi" # cam = Cam(thelist[1], cam, 2, 1) # result.append(cam) return result
def __init__(self): self.led_on = False self.connected = False self.pattern = None self.color_spectrum = None self.error = False self.errormsg = None self.aws_access_key_id = None self.aws_secret_access_key = None self.aws_default_region = None self.aws_kinesis_stream_name = None self.device_state = {} if config.device_type == 'strand': self.strand = Strand() elif config.device_type == 'cam': self.cam = Cam() elif config.device_type == 'flame': self.flame = Flame()
def main(): """Initializes a camera. Initializes a streamer. Opens the camera when using the "start" method. When the streamer is started, it gets the frames from the camera and sends it to the server.""" cam_url = CAM_URL port = PORT server_address = SERVER_ADDRESS parser = argparse.ArgumentParser() parser.add_argument('-s', '--server', required=False) parser.add_argument('-p', '--port', required=False) parser.add_argument('-c', '--cam', required=False) args = parser.parse_args() if args.port: port = args.port if args.server: server_address = args.server if args.cam: cam_url = args.cam cam = Cam(cam_url) streamer = Streamer(cam, server_address, port) streamer.start()
# try to open it to read class names with open(class_names, 'r') as f: class_names = [l.strip() for l in f.readlines()] else: class_names = [c.strip() for c in class_names.split(',')] for name in class_names: assert len(name) > 0 else: raise RuntimeError("No valid class_name provided...") return class_names if __name__ == '__main__': args = parse_args() if args.cpu: ctx = mx.cpu() else: ctx = mx.gpu(args.gpu_id) # parse image list image_list = [i.strip() for i in args.images.split(',')] assert len(image_list) > 0, "No valid image specified to detect" network = args.network class_names = parse_class_names(args.class_names) # run if args.identifier == -1: Cam(network, image_list, args.model_path, ctx, args.data_shape, class_names, args.thresh, num_class=args.num_class) elif args.identifier == 1: Cam_resp(network, image_list, args.model_path, ctx, args.data_shape, class_names, args.thresh, num_class=args.num_class)
def printChar(pair): print "char sent = ", print serial.convert(pair) print "stack = ", print pair[0], print " color = ", print pair[1] # SETUP GPIO.setwarnings(False) GPIO.setmode(GPIO.BOARD) GPIO.setup(8, GPIO.OUT, initial=GPIO.LOW) GPIO.setup(11, GPIO.OUT, initial=GPIO.LOW) cam1 = Cam(0) cam2 = Cam(1) try: brain = Algorithm() serial = Serial() for i in range(3): GPIO.output(8, GPIO.HIGH) time.sleep(0.5) GPIO.output(8, GPIO.LOW) time.sleep(0.5) print "starting..." # serial.start() c = serial.read() if c == 'Z':
ddx = cam.w / img.size[0] * k * cam.X for l in np.linspace(-1. / samples, 1. / samples, num=samples): ddy = cam.h / img.size[1] * l * cam.Y o = dz + dx + dy + ddx + ddy v = o - cam.p c = getColor(Ray(o, v), depth) / samples**2 color += c pixels[i, j] = tuple(map(int, 255 * color)) return img if __name__ == '__main__': import sys near, fov = map(float, sys.argv[1:3]) width, height, depth, samples = map(int, sys.argv[3:7]) cam = Cam(Vec3(0, 0, 0), Vec3(0, 0, -1), Vec3(0, 1, 0), near, fov, float(width) / height) primitives.append( Sphere(blue, Vec3(0, 10, -30), Vec3(0, 1, 0), Vec3(0, 0, 1), 5)) primitives.append( Sphere(red, Vec3(0, 0, -30), Vec3(0, 1, 0), Vec3(0, 0, 1), 5)) primitives.append( Sphere(green, Vec3(0, -10, -30), Vec3(0, 1, 0), Vec3(0, 0, 1), 5)) primitives.append( Sphere(world_map, Vec3(-10, -10, -30), Vec3(0, 1, 0), Vec3(0, 0, 1), 5)) primitives.append( Sphere(world_bumpy, Vec3(10, -10, -30), Vec3(0, 1, 0), Vec3(0, 0, 1), 5)) primitives.append( Triangle(cyan, [Vec3(-30, -15, 0),
default=1) parser.add_argument('-d', help='enable draw for marks and functional areas', action='store_true') parser.add_argument('--finger', help='choose the finger for control', type=int, default=8, choices=[4, 8, 12, 16, 20]) parser.add_argument('-p', help='enable camera to take photos', action='store_true') args = parser.parse_args() if args.virtual: # virtual cam vc = VCam(video=args.video, mxhand=args.maxhands, du=args.d, f=args.finger) vc.start() else: # own cam cam = Cam(video=args.video, mxhand=args.maxhands, du=args.d, f=args.finger, p=args.p) cam.open()
fov = min(w,h) cx, cy = w//2, h//2 os.environ['SDL_VIDEO_CENTERED'] = '1' pygame.display.set_caption('3D Graphics') screen = pygame.display.set_mode((w, h)) clock = pygame.time.Clock() # grab pygame internal methods pygame.event.get() pygame.mouse.get_rel() pygame.mouse.set_visible(0) pygame.event.set_grab(1) # Instantiate classes cubes = [Cube((x,0,z)) for x,z in pacman_points] cam = Cam((0,0,-5)) run = True while run: face_list = [] face_color = [] depth = [] dt = clock.tick()/1000 for event in pygame.event.get(): if event.type == pygame.QUIT: run = False if event.type == pygame.KEYDOWN: if event.key == pygame.K_ESCAPE:
import time import numpy as np from cam import Cam import vision from modbus_callback import start_cb_register_modbus_server cam = Cam() time.sleep(2) px_mu, px_cov = vision.load_color_stats() def cb(): img = vision.brick_crop(cam.capture()) img = img.astype(np.float) / 255 return vision.classify(img, px_mu, px_cov) start_cb_register_modbus_server({0x00: cb})
default=0.001, help='precision', metavar='P') parser_sim.add_argument('--gravity', '-g', type=float, default=9.8, help='gravitational acceleration', metavar='G') # TODO Simulation with conj, not simple # TODO Export 2d polyline in DXF, ai, eps, pdf... travel = Travel() follower = Follower() cam = Cam(travel, follower) while True: try: command = input('camdesign: ').split() if len(command) and command[0] == 'update': # defaults = parser._defaults # parser._defaults = {} parser_gen_travel.set_defaults(kind=None, order=None, n=None, steps=None, x0=None, x1=None) parser_gen_cam.set_defaults(radius=None, ccw=None,
data_0_vote, data_1_vote = 0, 0 for i in range(x_block_size * argmax_x, x_block_size * argmax_x + 1): for j in range(y_block_size * argmax_y, y_block_size * argmax_y + 1): if data_0[i][j] > data_1[i][j]: data_0_vote += 1 elif data_0[i][j] < data_1[i][j]: data_1_vote += 1 return 0 if data_0_vote > data_1_vote else 1 if __name__ == '__main__': scaler: StandardScaler = load(SCALER_FILE_PATH) model = load_model(MODEL_FILE_PATH) cam_generator = Cam(model, WATCH_CONV_ACT_LAYER, WATCH_CLASSIFIER_LAYER) GPIO.setmode(GPIO.BCM) GPIO.setup(LEFT_VIB_PIN, GPIO.OUT) GPIO.setup(RIGHT_VIB_PIN, GPIO.OUT) with MicStream(MICS_SAM_RATE, MICS_SAM_RATE // 2, MICS_DEVICE_ID, MICS_CHANNELS) as stream: audio_generator = stream.generator(out_size=MICS_SAM_RATE) for data in audio_generator: left_data, right_data = data[:, 0], data[:, 1] left_mel = mel_spectrogram(left_data, MICS_SAM_RATE) right_mel = mel_spectrogram(right_data, MICS_SAM_RATE)