Пример #1
0
from car import Car
from vision import Vision
from camera import Camera
import cv2
import numpy as np

if __name__ == '__main__':
    #car = Car(18,12)
    vision = Vision(0.1)
    camera = Camera()
    
    #car.reset()

    while True:
        img ,depth = camera.getImages()
        img2 = img.copy()
        if depth is None:
            #car.stop()
            #car.reset()
            break

        ret = vision.processFrame(depth)
        print(ret)
        font = cv2.FONT_HERSHEY_SIMPLEX
        print(img.shape)
        if ret == 1:
            cv2.putText(img2,"BREAK",(90,120),font,1.5,(0,0,255),5)
        elif ret == 3:
            cv2.putText(img2,"STEER LEFT",(30,120),font,1.5,(0,0,255),5)
        elif ret == 2:
            cv2.putText(img2,"STEER RIGHT",(30,120),font,1.5,(0,0,255),5)
Пример #2
0
from camera import Camera

cams = []

cams.append(
    Camera(
        id=None,
        name=None,
        ip='192.168.0.102',
        onvif='10080',
        #rtsp = '10554',
        username='******',
        password='******',
        socks=None))
Пример #3
0
def iot_camera_run():
    return Response(iot_camera_start(Camera()),
                    mimetype="multipart/x-mixed-replace; boundary=frame")
def main():
    """Main function, it implements the application loop"""
    # Initialize pygame, with the default parameters
    pygame.init()

    # Define the size/resolution of our window
    res_x = 640
    res_y = 480

    # Create a window and a display surface
    screen = pygame.display.set_mode((res_x, res_y))

    # Create a scene
    scene = Scene("TestScene")
    scene.camera = Camera(False, res_x, res_y)

    # Moves the camera back 2 units
    scene.camera.position -= Vector3(0, 0, 3)

    # Create a sphere and place it in a scene, at position (0,0,0)
    obj1 = Object3d("TestObject")
    obj1.scale = Vector3(1, 1, 1)
    obj1.position = Vector3(0, 0, 0)
    obj1.mesh = Mesh.create_pyramide(5)
    obj1.material = Material(Color(1, 0, 0, 1), "TestMaterial1")
    scene.add_object(obj1)

    # Specify the rotation of the object. It will rotate 15 degrees around the axis given,
    # every second

    # Timer
    delta_time = 0
    prev_time = time.time()

    pygame.mouse.set_visible(True)
    pygame.event.set_grab(False)

    # Game loop, runs forever
    while True:
        # Process OS events
        for event in pygame.event.get():
            # Checks if the user closed the window
            if event.type == pygame.QUIT:
                # Exits the application immediately
                return

        key_state = pygame.key.get_pressed()

        if key_state[pygame.K_ESCAPE]:
            return

        if key_state[pygame.K_LEFT]:
            angle = 0.3
            axis = Vector3(0, 1, 0)
            axis.normalize()
            ax = (axis * math.radians(angle) * 0.2)
            q = Quaternion.AngleAxis(axis, math.radians(angle) * 0.2)
            obj1.rotation = q * obj1.rotation

        if key_state[pygame.K_RIGHT]:
            angle = -0.3
            axis = Vector3(0, 1, 0)
            axis.normalize()
            ax = (axis * math.radians(angle) * 0.2)
            q = Quaternion.AngleAxis(axis, math.radians(angle) * 0.2)
            obj1.rotation = q * obj1.rotation

        if key_state[pygame.K_DOWN]:
            angle = -0.3
            axis = Vector3(1, 0, 0)
            axis.normalize()
            ax = (axis * math.radians(angle) * 0.2)
            q = Quaternion.AngleAxis(axis, math.radians(angle) * 0.2)
            obj1.rotation = q * obj1.rotation

        if key_state[pygame.K_UP]:
            angle = 0.3
            axis = Vector3(1, 0, 0)
            axis.normalize()
            ax = (axis * math.radians(angle) * 0.2)
            q = Quaternion.AngleAxis(axis, math.radians(angle) * 0.2)
            obj1.rotation = q * obj1.rotation

        if key_state[pygame.K_PAGEUP]:
            angle = 0.3
            axis = Vector3(0, 0, 2)
            axis.normalize()
            ax = (axis * math.radians(angle) * 0.2)
            q = Quaternion.AngleAxis(axis, math.radians(angle) * 0.2)
            obj1.rotation = q * obj1.rotation

        if key_state[pygame.K_PAGEDOWN]:
            angle = -0.3
            axis = Vector3(0, 0, 1)
            axis.normalize()
            ax = (axis * math.radians(angle) * 0.2)
            q = Quaternion.AngleAxis(axis, math.radians(angle) * 0.2)
            obj1.rotation = q * obj1.rotation

        if key_state[pygame.K_w]:
            obj1.position = Vector3(obj1.position.x, obj1.position.y + 0.0005,
                                    obj1.position.z)

        if key_state[pygame.K_s]:
            obj1.position = Vector3(obj1.position.x, obj1.position.y - 0.0005,
                                    obj1.position.z)

        if key_state[pygame.K_a]:
            obj1.position = Vector3(obj1.position.x - 0.0005, obj1.position.y,
                                    obj1.position.z)

        if key_state[pygame.K_d]:
            obj1.position = Vector3(obj1.position.x + 0.0005, obj1.position.y,
                                    obj1.position.z)

        if key_state[pygame.K_q]:
            obj1.position = Vector3(obj1.position.x, obj1.position.y,
                                    obj1.position.z + 0.0005)

        if key_state[pygame.K_e]:
            obj1.position = Vector3(obj1.position.x, obj1.position.y,
                                    obj1.position.z - 0.0005)

        # Clears the screen with a very dark blue (0, 0, 20)
        screen.fill((0, 0, 0))

        # Rotates the object, considering the time passed (not linked to frame rate)

        scene.render(screen)
        # Swaps the back and front buffer, effectively displaying what we rendered
        pygame.display.flip()

        # Updates the timer, so we we know how long has it been since the last frame
        delta_time = time.time() - prev_time
        prev_time = time.time()
    def __init__(self, filepath_prefix, participant_directory, htwt):

        ##load participant info
        participant_info = load_pickle(participant_directory +
                                       "/participant_info.p")
        print "participant directory: ", participant_directory
        for entry in participant_info:
            print entry, participant_info[entry]

        self.gender = participant_info['gender']
        self.height_in = participant_info['height_in']
        self.weight_lbs = participant_info['weight_lbs']
        self.adj_2 = participant_info['adj_2']

        if self.gender == "m":
            model_path = '/home/henry/git/SMPL_python_v.1.0.0/smpl/models/basicModel_m_lbs_10_207_0_v1.0.0.pkl'
        else:
            model_path = '/home/henry/git/SMPL_python_v.1.0.0/smpl/models/basicModel_f_lbs_10_207_0_v1.0.0.pkl'

        self.m = load_model(model_path)
        self.m.pose[41] = -np.pi / 6 * 0.9
        self.m.pose[44] = np.pi / 6 * 0.9
        self.m.pose[50] = -np.pi / 3 * 0.9
        self.m.pose[53] = np.pi / 3 * 0.9
        self.ALL_VERTS = np.array(self.m.r)

        #participant_directory2 = "/media/henry/multimodal_data_2/CVPR2020_study/S187"
        #participant_info2 = load_pickle(participant_directory2+"/participant_info.p")
        self.calibration_optim_values = participant_info['cal_func']
        #self.calibration_optim_values = [-0.171537,   -4.05880298, -1.51663182,  0.08712198,  0.03664871,  0.09108604,  0.67524232]

        self.tf_corners = participant_info['corners']

        ## Load SMPL model
        self.filepath_prefix = filepath_prefix

        self.index_queue = []
        if self.gender == "m":
            model_path = filepath_prefix + '/git/SMPL_python_v.1.0.0/smpl/models/basicModel_m_lbs_10_207_0_v1.0.0.pkl'
        else:
            model_path = filepath_prefix + '/git/SMPL_python_v.1.0.0/smpl/models/basicModel_f_lbs_10_207_0_v1.0.0.pkl'

        self.reset_pose = False
        self.m = load_model(model_path)

        self.marker0, self.marker1, self.marker2, self.marker3 = None, None, None, None
        self.pressure = None
        self.markers = [self.marker0, self.marker1, self.marker2, self.marker3]

        self.point_cloud_array = np.array([[0., 0., 0.]])
        self.pc_isnew = False

        self.CTRL_PNL = {}
        self.CTRL_PNL['batch_size'] = 1
        self.CTRL_PNL['loss_vector_type'] = 'anglesDC'
        self.CTRL_PNL['verbose'] = False
        self.CTRL_PNL['num_epochs'] = 101
        self.CTRL_PNL['incl_inter'] = True
        self.CTRL_PNL['shuffle'] = False
        self.CTRL_PNL['incl_ht_wt_channels'] = htwt
        self.CTRL_PNL['incl_pmat_cntct_input'] = True
        self.CTRL_PNL['num_input_channels'] = 3
        self.CTRL_PNL['GPU'] = GPU
        self.CTRL_PNL['dtype'] = dtype
        self.CTRL_PNL['repeat_real_data_ct'] = 1
        self.CTRL_PNL['regr_angles'] = 1
        self.CTRL_PNL['dropout'] = DROPOUT
        self.CTRL_PNL['depth_map_labels'] = False
        self.CTRL_PNL['depth_map_output'] = True
        self.CTRL_PNL[
            'depth_map_input_est'] = False  #rue #do this if we're working in a two-part regression
        self.CTRL_PNL['adjust_ang_from_est'] = self.CTRL_PNL[
            'depth_map_input_est']  #holds betas and root same as prior estimate
        self.CTRL_PNL['clip_sobel'] = True
        self.CTRL_PNL['clip_betas'] = True
        self.CTRL_PNL['mesh_bottom_dist'] = True
        self.CTRL_PNL['full_body_rot'] = True  #False
        self.CTRL_PNL['normalize_input'] = True  #False
        self.CTRL_PNL['all_tanh_activ'] = True  #False
        self.CTRL_PNL['L2_contact'] = True  #False
        self.CTRL_PNL['pmat_mult'] = int(5)
        self.CTRL_PNL['cal_noise'] = False
        self.CTRL_PNL['output_only_prev_est'] = False
        self.CTRL_PNL['double_network_size'] = False
        self.CTRL_PNL['first_pass'] = True

        if self.CTRL_PNL['cal_noise'] == True:
            self.CTRL_PNL[
                'incl_pmat_cntct_input'] = False  #if there's calibration noise we need to recompute this every batch
            self.CTRL_PNL['clip_sobel'] = False

        if self.CTRL_PNL['incl_pmat_cntct_input'] == True:
            self.CTRL_PNL['num_input_channels'] += 1
        if self.CTRL_PNL[
                'depth_map_input_est'] == True:  #for a two part regression
            self.CTRL_PNL['num_input_channels'] += 3
        self.CTRL_PNL['num_input_channels_batch0'] = np.copy(
            self.CTRL_PNL['num_input_channels'])
        if self.CTRL_PNL['incl_ht_wt_channels'] == True:
            self.CTRL_PNL['num_input_channels'] += 2
        if self.CTRL_PNL['cal_noise'] == True:
            self.CTRL_PNL['num_input_channels'] += 1

        pmat_std_from_mult = [
            'N/A', 11.70153502792190, 19.90905848383454, 23.07018866032369,
            0.0, 25.50538629767412
        ]
        if self.CTRL_PNL['cal_noise'] == False:
            sobel_std_from_mult = [
                'N/A', 29.80360490415032, 33.33532963163579, 34.14427844692501,
                0.0, 34.86393494050921
            ]
        else:
            sobel_std_from_mult = [
                'N/A', 45.61635847182483, 77.74920396659292, 88.89398421073700,
                0.0, 97.90075708182506
            ]

        self.CTRL_PNL['norm_std_coeffs'] = [
            1. / 41.80684362163343,  #contact
            1. / 16.69545796387731,  #pos est depth
            1. / 45.08513083167194,  #neg est depth
            1. / 43.55800622930469,  #cm est
            1. / pmat_std_from_mult[int(self.CTRL_PNL['pmat_mult'])],  #pmat x5
            1. /
            sobel_std_from_mult[int(self.CTRL_PNL['pmat_mult'])],  #pmat sobel
            1. / 1.0,  #bed height mat
            1. / 1.0,  #OUTPUT DO NOTHING
            1. / 1.0,  #OUTPUT DO NOTHING
            1. / 30.216647403350,  #weight
            1. / 14.629298141231
        ]  #height

        self.CTRL_PNL['filepath_prefix'] = '/home/henry/'

        if self.CTRL_PNL[
                'depth_map_output'] == True:  # we need all the vertices if we're going to regress the depth maps
            self.verts_list = "all"

        self.TPL = TensorPrepLib()

        self.count = 0

        self.CTRL_PNL['filepath_prefix'] = '/home/henry/'
        self.CTRL_PNL['aws'] = False
        self.CTRL_PNL['lock_root'] = False

        self.bridge = CvBridge()
        self.color, self.depth_r, self.pressure = 0, 0, 0

        self.kinect_im_size = (960, 540)
        self.pressure_im_size = (64, 27)
        self.pressure_im_size_required = (64, 27)

        # initialization of kinect and thermal cam calibrations from YAML files
        dist_model = 'rational_polynomial'
        self.kcam = Camera('kinect', self.kinect_im_size, dist_model)
        self.kcam.init_from_yaml(
            osp.expanduser(
                '~/catkin_ws/src/multimodal_pose/calibrations/kinect.yaml'))

        # we are at qhd not hd so need to cut the focal lengths and centers in half
        self.kcam.K[0:2, 0:3] = self.kcam.K[0:2, 0:3] / 2

        # print self.kcam.K

        self.new_K_kin, roi = cv2.getOptimalNewCameraMatrix(
            self.kcam.K, self.kcam.D, self.kinect_im_size, 1,
            self.kinect_im_size)

        #print self.new_K_kin

        self.drawing = False  # true if mouse is pressed
        self.mode = True  # if True, draw rectangle. Press 'm' to toggle to curve
        self.ix, self.iy = -1, -1
        self.label_index = 0
        self.coords_from_top_left = [0, 0]
        self.overall_image_scale_amount = 0.85
        self.depthcam_midpixel = [0, 0]
        self.depthcam_midpixel2 = [0, 0]
        self.select_new_calib_corners = {}
        self.select_new_calib_corners["lay"] = True
        self.select_new_calib_corners["sit"] = True
        self.calib_corners = {}
        self.calib_corners["lay"] = 8 * [[0, 0]]
        self.calib_corners["sit"] = 8 * [[0, 0]]

        self.final_dataset = {}

        self.filler_taxels = []
        for i in range(28):
            for j in range(65):
                self.filler_taxels.append([i - 1, j - 1, 20000])
        self.filler_taxels = np.array(self.filler_taxels).astype(int)
Пример #6
0
black = 0, 0, 0

pygame.init()

planets1 = simulation.generate_star_system_config("Sol", (0, 0, 0), 5)
# planets2 = simulation.generate_star_system_config("Sol", (-15000, 1000, 0), 2)
# planets3 = simulation.generate_star_system_config("Sol", (8000, -10000, 4000), 1)
# planets4 = simulation.generate_star_system_config("Sol", (-8000, 1000, 8000), 3)
# planets5 = simulation.generate_star_system_config("Sol", (4000, -4000, 4000), 1)
# planets = planets1 + planets2 + planets3 + planets4 + planets5
planets = planets1

planets2 = simulation.generate_star_system_config("Sol", (10, 10, 0), 5)

sim = game.GravitySimulationSystem(planets, config)
cam = Camera(np.array([0, -5000, 300]), config["dimensions"])
screen = pygame.display.set_mode(config["dimensions"])
background = pygame.Surface(config["dimensions"])

background.fill(black)
sim.draw_background(background, cam)

pygame.mixer.init()
music = pygame.mixer.Sound("./sound/spheric_lounge_books_of_mantra.ogg")
music.play()

clock = pygame.time.Clock()
clock.tick()
clock.get_time()

pygame.key.set_repeat(10, 10)
Пример #7
0
import pygame
import numpy as np
from settings import *
from environment import Environment
from camera import Camera

pygame.init()

map_image = pygame.Surface = pygame.image.load('images/map.bmp')
screen_size = DEFAULT_RESOLUTION
screen = pygame.display.set_mode(screen_size, flags=pygame.RESIZABLE)

environment = Environment(map_image)
environment.add_random_creature()

camera = Camera(environment)

clock = pygame.time.Clock()
running = True
while running:
    events = pygame.event.get()
    for event in events:
        if event.type == pygame.QUIT:
            running = False
        elif event.type == pygame.VIDEORESIZE:
            screen = pygame.display.set_mode(event.size,
                                             flags=pygame.RESIZABLE)
        else:
            camera.process_event(event)

    screen.blit(map_image, (0, 0))
Пример #8
0
def main():
	# Parse command line
	if len(sys.argv) < 2:
		print("python main.py mode <query_image_path> [ <train_video_path> ]")
		return 1

	# Extract features and descriptors from query image
	query_image_path = sys.argv[1]
	print("Query Image Path: {}".format(query_image_path))
	marker = Marker() #class maker
	ret = marker.query_marker(query_image_path) #method from class; query is scan of card
	if ret == -1:
		print("Unable to get query image...terminating...")
		return

	cap = None
	if len(sys.argv) == 3:
		train_image_path = sys.argv[2] #live scene
		print("Using video file...Training Image Path: {}".format(train_image_path))
		cap = cv2.VideoCapture(train_image_path)
	else:
		print("Using web cam")
		cap = cv2.VideoCapture(0)

	# For loading and saving videos
	id_ = 3
	start_frame = 0
	# Get metadata from input movie
	cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame)
	fps = cap.get(cv2.CAP_PROP_FPS)
	frame_width = int(cap.get(3))
	frame_height = int(cap.get(4))
	fourcc = cv2.VideoWriter_fourcc(*'MJPG')
	# Create video writer
	video_writer = cv2.VideoWriter('videos/output_{}.avi'.format(id_), fourcc, fps,
									(frame_width, frame_height))

	i = 0
	while True:
		ret, frame = cap.read()
		i+=1
		# Display every 5 frames
		if i % 5 == 0:
			continue
		if ret == False:
			break
		if ret == -1:
			print("Unable to get frame...terminating...")
			return
		ret, outlined_frame, matches_image = marker.find_marker(frame) #drawn on webcam frame (blue outline)
		if ret != -1: #if no error
			frame = outlined_frame
			marker.estimate_pose(outlined_frame,Camera()) #get pose from drawn image
			cv2.imshow('Matches',matches_image)

		cv2.imshow('AR Frame',frame)
		key = cv2.waitKey(1)
		print(key)
		if key & 0xFF == ord('q'):
			break
		video_writer.write(frame)

	cv2.destroyAllWindows()
Пример #9
0
 def setUp(self):
     self.camera = Camera(800, 800, 10, 10)
Пример #10
0
                        '--logfile',
                        action='store',
                        help="save stats to file")
    parser.add_argument('-w',
                        '--wristband',
                        action='store_true',
                        help="enable bluetooth wristband")
    args = parser.parse_args()

    if args.wristband:
        wrist = Wristband()
        wrist.start()

    if args.camera:
        neural = Neural(parameters)
        camera = Camera(parameters, neural)
        camera.start()
        print("* Camera loop started")
        _socketHandler = (r"/websocket", SocketHandler, {'camera': camera})

    _statusHandler = (r"/status", StatusHandler, {
        'io': hardware,
        'wristband': wrist
    })

    _staticHandler = (r"/(.*)", tornado.web.StaticFileHandler, {
        'path': os.path.dirname(os.path.realpath(__file__)) + '/web/',
        'default_filename': 'index.html'
    })

    if args.stream and args.camera:
Пример #11
0
    ns = 100

    lower_left = Vec3(-2, -1, -1)
    horizontal = Vec3(4, 0, 0)
    vertical = Vec3(0, 2, 0)
    origin = Vec3(0, 0, 0)
    world = HitableList()
    world.append(Sphere(Vec3(0, 0, -1), 0.5, Lambertian(Vec3(0.8, 0.3, 0.3))))
    world.append(
        Sphere(Vec3(0, -100.5, -1), 100, Lambertian(Vec3(0.8, 0.8, 0.0))))
    world.append(
        Sphere(Vec3(1, 0, -1), 0.5, Metal(Vec3(0.8, 0.6, 0.2), fuzz=0.3)))
    world.append(
        Sphere(Vec3(-1, 0, -1), 0.5, Metal(Vec3(0.8, 0.8, 0.8), fuzz=1.0)))

    cam = Camera()
    pbar = ProgressBar(
        widgets=['Percentage ',
                 Percentage(), ' ',
                 ETA(), ' ',
                 Bar()],
        maxval=nx * ny).start()

    with open('image.ppm', 'w') as f:
        f.write('P3\n{} {}\n255\n'.format(nx, ny))
        for y, j in enumerate(xrange(ny - 1, -1, -1)):
            for i in xrange(nx):
                col = Vec3(0, 0, 0)
                for _ in xrange(ns):
                    u = float(i + random()) / nx
                    v = float(j + random()) / ny
Пример #12
0
    Texture_Check(6, Colour(0, 0, 0), Colour(0.5, 0.5, 0.5)))

scene = Scene([
    Sphere(Point3(0.35, 0.6, 0.5), 0.25, SHINY_BLUE),
    #Difference([
    Intersection([  # Cube
        #Plane(Point3(0.2,0.0,0.5), Vector3(0,-1,0), CHECK_FLOOR),
        Plane(Point3(0.1, 0.175, 0.8), Vector3(0.5, 1, 0.1), SHINY_BLUE),
        #Plane(Point3(0.1,0.1,0.5), Vector3(-1,0,0), CHECK_FLOOR),
        #Plane(Point3(0.4,0.1,0.5), Vector3( 1,0,0), CHECK_FLOOR),
        #Plane(Point3(0.5,0.1,0.8), Vector3(0,0, 1), CHECK_FLOOR),
        #Plane(Point3(0.5,0.1,0.5), Vector3(0,0,-1), CHECK_FLOOR),
        Sphere(Point3(0.1, 0.175, 0.8), 0.175, SHINY_BLUE),
    ]),
    #Sphere(Point3(0.1,0.175,0.8), 0.165, SHINY_BLUE)]),
    #Sphere(Point3(0.75,0.15,.2), 0.15, SHINY_RED),
    Plane(Point3(0, 0, 0), Vector3(0, 1, 0), CHECK_FLOOR)
])

scene.lights = [
    #Light(scene, unit(Vector3(2,5,3)), Colour(0.6, 0.6, 0.6)),
    #Light(scene, unit(Vector3(-4,3,0)), Colour(0.7, 0.7, 0.7)),
    PointLight(scene, Point3(.5, 1.1, 1.2), Colour(0.9, 0.9, 0.9)),
]
scene.background = Colour(0, 0, 0)
scene.ambient = Colour(0.4, 0.4, 0.4)

camera = Camera(scene, Point3(0.5, 0.2, 1.6), WIN_SIZE)
camera.lookAt(Point3(0.5, 0.2, 0.3))
#camera.lookAt(Point3(0.1,0.1, 0.9))
Пример #13
0
from camera import Camera, CameraModels

import cv2
import numpy as np
import os

from tqdm import tqdm

camera_model = CameraModels.FISHEYE
retval, K, d, rvecs, tvecs, obj_points_temp, img_points_temp, not_used = np.load(
    'calibration_file.npy', allow_pickle=True)

cam = Camera(camera_matrix=K,
             distortion_coefficients=d,
             camera_model=camera_model)

image_size = (640, 480)
image_fov = (150, 120)

show_image = False

path = 'images'

path_in = os.path.join(path, 'in/')
path_out = os.path.join(path, 'out/')

files = os.listdir(path_in)
files = [file for file in files]

#print(f'Searching: {}')
Пример #14
0

if __name__ == '__main__':

    #classe que cria os alvos
    tempo = time.time()
    alvo = drawObj.Alvo()
    # conta quadros para desenhar novo alvo
    alvoQuadrosCount = 0
    # classe detecta fluxo optico
    fluxo = fluxo.Fluxo()
    # guarda estado atual da aplicacao
    estado = 0
    salvo = 'user.png'
    user1 = cv2.imread(salvo, 0)
    cam = Camera(1)
    faceTracker = roi.Detect()
    recon = FaceRecognition()
    contador = 0
    while True:
        #print 'estdo = ', estado
        # captura frame
        frame = cam.preprocessa()
        # detecta faces
        frame = faceTracker.detectar(**frame)
        #if 'gray' in frame.keys():
        #    cv2.imshow('gray', frame['gray'])
        #if 'roi_face' in frame.keys():
        #    cv2.imshow('roi_face', frame['roi_face'])
        if 'roi_eyes' in frame.keys():
            #cv2.imshow('roi_eyes', frame['roi_eyes'])
Пример #15
0
    value_serializer=lambda v: json.dumps(v).encode('utf-8'),
    bootstrap_servers=statusKafka)
statusProducer.send(statusTopic, {
    "status": "starting",
    "client": "imagesfromopencv",
    "language": "python"
})

imagesService = env.get_service(name='raw-images-topic')
if imagesService is None:
    imagesKafka = "localhost:9092"
    imagesTopic = "opencv-kafka-demo-raw-images"
else:
    imagesKafka = imagesService.credentials.get("hostname")
    imagesTopic = imagesService.credentials.get("topicName")

imagesProducer = KafkaProducer(bootstrap_servers=imagesKafka)

# import camera driver
if os.environ.get('CAMERA'):
    Camera = import_module('camera_' + os.environ['CAMERA']).Camera
else:
    from camera import Camera

camerastream = Camera()
while True:
    frame = camerastream.get_frame()
    # print("frame")
    imagesProducer.send(imagesTopic, frame)
    # statusProducer.send(statusTopic, {"status":"image-sent", "client": "imagesfromopencv", "camera": camerastream.__class__.__name__})
Пример #16
0
    df_solar = pd.read_csv("data/solar.csv", delimiter=";", skiprows=1)
    S = interp1d(df_solar["Wavelength (nm)"],
                 df_solar["Extraterrestrial W*m-2*nm-1"],
                 fill_value="extrapolate")
    # W per meter squared per nanometer
    return S


if __name__ == "__main__":
    M = get_mirror()
    Q = get_detector()
    S = get_solar()

    print(snr(27000))

    CoCa = Camera()

    def integrand(w, alpha=0):
        return w * M(w) * Q(w) * ref_rock(w, alpha).T * S(w)

    phase_angle = np.arange(1, 90, 10)

    snr_vals = []
    snr_vals_80 = []
    signals = []
    signals_80 = []
    centers = range(450, 1000, 50)
    t_exp = 0.005
    t = []
    t_sat = []
    df = pd.read_csv("data/texp.csv")
Пример #17
0
from sys import stdout
from makeup_artist import Makeup_artist
import logging
from flask import Flask, render_template, Response
from flask_socketio import SocketIO
from camera import Camera
from utils import base64_to_pil_image, pil_image_to_base64

app = Flask(__name__)
app.logger.addHandler(logging.StreamHandler(stdout))
app.config['SECRET_KEY'] = 'secret!'
app.config['DEBUG'] = True
socketio = SocketIO(app)
camera = Camera(Makeup_artist())


@socketio.on('input image', namespace='/test')
def test_message(input):
    input = input.split(",")[1]
    camera.enqueue_input(input)
    #camera.enqueue_input(base64_to_pil_image(input))


@socketio.on('connect', namespace='/test')
def test_connect():
    app.logger.info("client connected")


@app.route('/')
def index():
    """Video streaming home page."""
Пример #18
0
import cv2
import pickle
import os
import numpy as np

# All this to include camera.py
from os.path import dirname, abspath
import sys
sys.path.insert(0, dirname(dirname(abspath(__file__))))
from camera import Camera

path = "./calibration/chessboard_imgs/"
imgs = os.listdir(path)
cam = Camera(disable_video=True)
for fname in imgs:
    img = cv2.imread(path + fname)
    if img is not None:
        img = cam.undistort(img)
        small = cv2.resize(img, (0, 0), fx=0.6, fy=0.6)
        cv2.imshow(fname, small)
        cv2.waitKey(0)
Пример #19
0
def get_game_variables(constants):  #플레이어 능력치 선언부 player stat
    fighter_component = Fighter(hp=4000,
                                sp=100,
                                dt=0,
                                ac=10,
                                dice='1d4t',
                                power='',
                                acc='1d20t',
                                STR=1,
                                DEX=3,
                                CON=2,
                                INT=1,
                                fort=0,
                                refl=0,
                                will=0,
                                melee=1,
                                gun=1,
                                light=2,
                                dark=2,
                                tec=0,
                                lore=1,
                                xp=0,
                                lpower='',
                                dpower='',
                                luck=5)
    inventory_component = Inventory(26)
    #플레이어 char랑 컴포넌트 선언부
    level_component = Level()
    equipment_component = Equipment()
    player = Entity(0,
                    0,
                    '@',
                    libtcod.white,
                    'Player',
                    blocks=True,
                    render_order=RenderOrder.ACTOR,
                    fighter=fighter_component,
                    inventory=inventory_component,
                    level=level_component,
                    equipment=equipment_component)
    entities = [player]
    #시작 장비
    equippable_component = Equippable(EquipmentSlots.MAIN_HAND,
                                      base_dice='1d6t',
                                      power_bonus='0',
                                      DEX_weapon=True,
                                      luck_bonus=5,
                                      ranged_weapon=True)
    dagger = Entity(0,
                    0,
                    '-',
                    libtcod.sky,
                    '경쇠뇌',
                    equippable=equippable_component)
    player.inventory.add_item(dagger)
    player.equipment.toggle_equip(dagger)

    game_map = GameMap(constants['map_width'], constants['map_height'])
    game_map.make_map(constants['max_rooms'], constants['room_min_size'],
                      constants['room_max_size'], constants['map_width'],
                      constants['map_height'], player, entities)

    #camera
    camera = Camera(
        x=0,
        y=0,
        width=constants['screen_width'],
        height=constants['screen_height'],
        map_width=constants['map_width'],
        map_height=constants['map_height'],
    )
    camera.update(player)

    message_log = MessageLog(constants['message_x'],
                             constants['message_width'],
                             constants['message_height'])

    game_state = GameStates.PLAYERS_TURN

    return player, entities, game_map, message_log, game_state
Пример #20
0
    dict_list = []
    keys = [sheet.cell(0, col_index).value for col_index in range(sheet.ncols)]
    for row_index in range(1, sheet.nrows):
        d = {
            keys[col_index]: sheet.cell(row_index, col_index).value
            for col_index in range(sheet.ncols)
        }
        dict_list.append(d)

    camerasArray = []

    for index in range(len(dict_list)):

        camerasArray.append(
            Camera(ip=dict_list[index]['ip'],
                   port=int(dict_list[index]['port']),
                   login=dict_list[index]['login'],
                   password=dict_list[index]['password']))

    #result = open(config_ini['DEFAULT']['path_to_save_file'], "w", encoding='utf8')

    log = open(config_ini['DEFAULT']['log_file'], 'w')

    for camer in camerasArray:
        print(camer)

        status = changeLogTries(camer.ip, camer.port, camer.login,
                                camer.password,
                                int(config_ini['DEFAULT']['mode']))
        log.write("{}:{}\t{}\n".format(camer.ip, camer.port, status))
Пример #21
0
 def freeze_background(self):
     self.update_frame()
     self.set_camera(Camera(self.get_frame()))
     self.clear()
Пример #22
0
def generate_camera():
    """
    Generates camera
    """
    config.CAMERA = Camera(config.MAP_INFO)
Пример #23
0
        result = xmlrpclib.Binary(self.im.tostring())
        self.lock.release()

        return result


def capture_frames(camera, controller):
    print "Starting the frame capture loop..."
    while True:
        yield camera.stream()
        controller.capture(camera.image())


def serve_requests(controller):
    print "Initializing the XML-RPC server..."
    server = SimpleXMLRPCServer(("0.0.0.0", 8002))
    server.register_instance(controller)
    print "Waiting for incoming requests..."
    server.serve_forever()


if __name__ == '__main__':
    controller = ColiController()

    t = threading.Thread(target=serve_requests, args=(controller, ))
    t.daemon = True
    t.start()

    camera = Camera(3)
    camera.capture_sequence(capture_frames(camera, controller))
Пример #24
0
def main() -> int:
    parser = argparse.ArgumentParser()
    parser.add_argument('-u', '--unit-config', type=str, required=True)
    parser.add_argument('-a', '--app-config', type=str, required=True)
    args = parser.parse_args()

    with open(args.app_config, 'r') as f:
        sim_config = json.load(f)["simulation"]
    with open(args.unit_config, 'r') as f:
        unit_config = json.load(f)

    robots = list()
    robots.append(
        Robot(0,
              pose=np.array([[0], [0], [0]], dtype=np.float64),
              world_size=np.array([
                  sim_config["world"]["length_m"],
                  sim_config["world"]["width_m"]
              ])))
    robots.append(
        Robot(1,
              pose=np.array([[0], [0], [pi / 4]], dtype=np.float64),
              world_size=np.array([
                  sim_config["world"]["length_m"],
                  sim_config["world"]["width_m"]
              ])))
    robots.append(
        Robot(2,
              pose=np.array([[0], [0], [pi / 2]], dtype=np.float64),
              world_size=np.array([
                  sim_config["world"]["length_m"],
                  sim_config["world"]["width_m"]
              ])))
    robots.append(
        Robot(3,
              pose=np.array([[0], [0], [3 * pi / 4]], dtype=np.float64),
              world_size=np.array([
                  sim_config["world"]["length_m"],
                  sim_config["world"]["width_m"]
              ])))
    robots.append(
        Robot(4,
              pose=np.array([[0], [0], [pi]], dtype=np.float64),
              world_size=np.array([
                  sim_config["world"]["length_m"],
                  sim_config["world"]["width_m"]
              ])))
    robots.append(
        Robot(5,
              pose=np.array([[0], [0], [5 * pi / 4]], dtype=np.float64),
              world_size=np.array([
                  sim_config["world"]["length_m"],
                  sim_config["world"]["width_m"]
              ])))
    robots.append(
        Robot(6,
              pose=np.array([[0], [0], [3 * pi / 2]], dtype=np.float64),
              world_size=np.array([
                  sim_config["world"]["length_m"],
                  sim_config["world"]["width_m"]
              ])))
    robots.append(
        Robot(7,
              pose=np.array([[0], [0], [7 * pi / 4]], dtype=np.float64),
              world_size=np.array([
                  sim_config["world"]["length_m"],
                  sim_config["world"]["width_m"]
              ])))
    robots.append(
        Robot(8,
              pose=np.array([[-1], [1], [0]], dtype=np.float64),
              world_size=np.array([
                  sim_config["world"]["length_m"],
                  sim_config["world"]["width_m"]
              ])))
    robots.append(
        Robot(9,
              pose=np.array([[-1], [1], [pi / 4]], dtype=np.float64),
              world_size=np.array([
                  sim_config["world"]["length_m"],
                  sim_config["world"]["width_m"]
              ])))

    for robot in robots:
        # create, initialize, and attach camera to robot
        camera = Camera(intrinsics=unit_config["camera"]["0"]["intrinsics"],
                        extrinsics=unit_config["camera"]["0"]["extrinsics"])
        robot.attach_camera(camera)

    # generate 10 fiducials and place them
    fiducials = []
    '''
    for i in range(0,10):
        fiducial = Fiducial(np.array([[random.randrange(0, world_length),
                                       random.randrange(0, world_width),
                                       random.randrange(0, world_height)]],
                                     dtype=np.float64), i)
        fiducials.append(fiducial)
    '''
    fiducials.append(
        Fiducial(
            np.array([[1, 1, 1, 1], [0, 0.1, 0.1, 0], [1, 1, 0.9, 0.9]],
                     dtype=np.float64), 0))
    fiducials.append(Fiducial(np.array([[1], [1], [1]], dtype=np.float64), 1))
    fiducials.append(Fiducial(np.array([[0], [1], [1]], dtype=np.float64), 2))
    fiducials.append(Fiducial(np.array([[-1], [1], [1]], dtype=np.float64), 3))
    fiducials.append(Fiducial(np.array([[-1], [0], [1]], dtype=np.float64), 4))
    fiducials.append(Fiducial(np.array([[-1], [-1], [1]], dtype=np.float64),
                              5))
    fiducials.append(Fiducial(np.array([[0], [-1], [1]], dtype=np.float64), 6))
    fiducials.append(Fiducial(np.array([[1], [-1], [1]], dtype=np.float64), 7))

    seen = Simulator(robots,
                     fiducials,
                     world_size=np.array([
                         sim_config["world"]["length_m"],
                         sim_config["world"]["width_m"]
                     ])).run()  # run the simulation
Пример #25
0
 def get_image(self, camera=None):
     if camera is None:
         from camera import Camera
         camera = Camera()
     camera.capture_mobject(self)
     return Image.fromarray(camera.get_image())
Пример #26
0
from PyQt5 import QtGui, QtCore, QtWidgets
import numpy as np
from ctypes import c_float, c_uint, sizeof
import time

from camera import CameraMovement, Camera

GLfloat = c_float
GLuint = c_uint

WIDTH = 800
HEIGHT = 600

# camera
camera = Camera(position=QtGui.QVector3D(0., 0., 3.),
                up=QtGui.QVector3D(0., 1., 0.))

firstMouse = True
lastX = WIDTH / 2.0
lastY = HEIGHT / 2.0

# timing
dateTime = 0.  # time between current frame and last frame
lastFrame = 0.

lightPos = QtGui.QVector3D(1.2, 1., 2.)


class Window(QtGui.QOpenGLWindow):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
Пример #27
0
import serial
import time
from datetime import datetime
import mysql.connector
from camera import Camera

#yhdistetään tietokantaan ja tehdään camera kirjastosta olio
#määritetään sarjaportti, jotta saadaan nucleolta dataa
cnx = mysql.connector.connect(user='******', password='******', host='localhost', database='testidb')
cur = cnx.cursor()
cm = Camera()

time.sleep (1)

ser=serial.Serial("/dev/ttyACM0",9600)

state = 0

#while-loop lukemaan sarjaportilta dataa ja päivittämään ne tietokantaan
while True:
    
    
    data_raw = ser.readline().decode().strip()
    
    data = str(data_raw)
    
    #jos valo käy päällä otetaan kuva
    if data == "on":
        if state == 1:
            #nothing
            print("a")
Пример #28
0
    def __init__(self):
        # composition stuff

        #: list of (int, child-reference) where int is the z-order, sorted by ascending z (back to front order)
        self.children = []

        #: dictionary that maps children names with children references
        self.children_names = {}

        self._parent = None

        # drawing stuff

        #: x-position of the object relative to its parent's children_anchor_x value.
        #: Default: 0
        self._x = 0

        #: y-position of the object relative to its parent's children_anchor_y value.
        #: Default: 0
        self._y = 0

        #: a float, alters the scale of this node and its children.
        #: Default: 1.0
        self._scale = 1.0

        #: a float, in degrees, alters the rotation of this node and its children.
        #: Default: 0.0
        self._rotation = 0.0

        #: eye, center and up vector for the `Camera`.
        #: gluLookAt() is used with these values.
        #: Default: FOV 60, center of the screen.
        #: IMPORTANT: The camera can perform exactly the same
        #: transformation as ``scale``, ``rotation`` and the
        #: ``x``, ``y`` attributes (with the exception that the
        #: camera can modify also the z-coordinate)
        #: In fact, they all transform the same matrix, so
        #: use either the camera or the other attributes, but not both
        #: since the camera will be overridden by the transformations done
        #: by the other attributes.
        #: You can change the camera manually or by using the `Camera3DAction`
        #: action.
        self.camera = Camera()


        #: offset from (x,0) from where rotation and scale will be applied.
        #: Default: 0
        self.transform_anchor_x = 0

        #: offset from (0,y) from where rotation and scale will be applied.
        #: Default: 0
        self.transform_anchor_y = 0

        #: whether of not the object and his childrens are visible.
        #: Default: True
        self.visible = True

        #: the grid object for the grid actions.
        #: This can be a `Grid3D` or a `TiledGrid3D` object depending
        #: on the action.
        self.grid = None

        # actions stuff
        #: list of `Action` objects that are running
        self.actions = []

        #: list of `Action` objects to be removed
        self.to_remove = []

        #: whether or not the next frame will be skipped
        self.skip_frame = False

        # schedule stuff
        self.scheduled = False          # deprecated, soon to be removed
        self.scheduled_calls = []       #: list of scheduled callbacks
        self.scheduled_interval_calls = []  #: list of scheduled interval callbacks
        self.is_running = False         #: whether of not the object is running

        # matrix stuff
        self.is_transform_dirty = False
        self.transform_matrix = euclid.Matrix3().identity()
        self.is_inverse_transform_dirty = False
        self.inverse_transform_matrix = euclid.Matrix3().identity()
Пример #29
0
def main():
    if len(argv) != 2 or argv[1] == '--help':
        print("""Usage: run.py MODEL
Use MODEL to classify camera frames and play sounds when class 0 is recognised."""
              )
        exit(1)

    model_file = argv[1]

    # We use the same MobileNet as during recording to turn images into features
    print('Loading feature extractor')
    extractor = PiNet()

    # Here we load our trained classifier that turns features into categories
    print('Loading classifier')
    classifier = keras.models.load_model(model_file)

    # Initialize the camera and sound systems
    camera = Camera(training_mode=False)
    random_sound = RandomSound()

    # Create a preview window so we can see if we are in frame or not
    if SHOW_UI:
        pygame.display.init()
        pygame.display.set_caption('Loading')
        screen = pygame.display.set_mode((512, 512))

    # Smooth the predictions to avoid interruptions to audio
    smoothed = np.ones(classifier.output_shape[1:])
    smoothed /= len(smoothed)

    print('Now running!')
    while True:
        raw_frame = camera.next_frame()

        # Use MobileNet to get the features for this frame
        z = extractor.features(raw_frame)

        # With these features we can predict a 'normal' / 'yeah' class (0 or 1)
        # Keras expects an array of inputs and produces an array of outputs
        classes = classifier.predict(np.array([z]))[0]

        # smooth the outputs - this adds latency but reduces interruptions
        smoothed = smoothed * SMOOTH_FACTOR + classes * (1.0 - SMOOTH_FACTOR)
        selected = np.argmax(
            smoothed)  # The selected class is the one with highest probability

        # Show the class probabilities and selected class
        summary = 'Class %d [%s]' % (selected, ' '.join('%02.0f%%' % (99 * p)
                                                        for p in smoothed))
        stderr.write('\r' + summary)

        # Perform actions for selected class. In this case, play a sound from the sounds/ dir
        if selected == 0:
            random_sound.play_from('sounds/')
        else:
            random_sound.stop()

        # Show the image in a preview window so you can tell if you are in frame
        if SHOW_UI:
            pygame.display.set_caption(summary)
            surface = pygame.surfarray.make_surface(raw_frame)
            screen.blit(pygame.transform.smoothscale(surface, (512, 512)),
                        (0, 0))
            pygame.display.flip()

            for evt in pygame.event.get():
                if evt.type == pygame.QUIT:
                    pygame.quit()
                    break
Пример #30
0
 def __init__(self, **kwargs):
     super().__init__(**kwargs)
     self.camera = Camera(fps=25)