Exemple #1
0
    def generateNeighbour(self, camera_move_method):
        # deep copy cameras
        cameras = [copy.copy(c) for c in self.cameras]

        transformation = self.randomlyChooseTransformationMethod(cameras)

        # perform transformation
        if transformation == 'insert':
            new_camera = Camera(self.problem,
                                self.getRandomFreePointFromRoom())
            cameras.append(new_camera)
        elif transformation == 'remove':
            cameras.remove(random.choice(self.cameras))
        elif transformation == 'move':
            to_modify = random.choice(self.cameras)
            if camera_move_method == 'local':
                to_modify.move()
            elif camera_move_method == 'random':
                to_modify.move(self.getRandomFreePointFromRoom())
            else:
                raise RuntimeError("Wrong move camera method!")
        else:
            raise RuntimeError("Wrong transformation method!")

        return State(self.problem, cameras)
Exemple #2
0
    def generateCameras(self):
        cameras = []
        for _ in range(self.problem.min_number_of_cams):
            cameras.append(
                Camera(self.problem, self.getRandomFreePointFromRoom()))

        return cameras
Exemple #3
0
    def __init__(self, argv):
        QtGui.QMainWindow.__init__(self)
        self.MainWindow = Ui_MainWindow
        self.ui = Ui_MainWindow
        self.MainWindow.__init__(self.window)
        self.setupUi(self)
        self.show()

        self.logger = Logger(self.logPanel)
        self.device = EthernetDevice(self.logger)

        self.device.disconState = QtGui.QPixmap(
            _fromUtf8('../res/disconnected'))
        self.device.conState = QtGui.QPixmap(_fromUtf8('../res/connected'))
        self.device.currentState = 0

        self.device.power = 0
        self.device.direction = 0

        self.addWidgets()

        self.updater = DataUpdateThread(self)  # create a thread
        self.updater.trigger.connect(
            self.updateState)  # connect to it's signal
        self.updater.start()

        self.cam = Camera(self)
Exemple #4
0
def start_game(game_data):
    char_data = game_data["CHAR"]
    map_data = game_data["LOC"]
    flags = game_data["FLAGS"]
    cam = Camera(screen_size=screen_size, roster=char_data, flags=flags)
    cam.load_map(map_name=map_data["MAP"], goto=map_data["POSITION"])
    return cam
Exemple #5
0
def ch7():
    # Step 1
    floor = Sphere()
    floor.transform = Matrix.scaling(10, 0.01, 10)
    floor.material = Material()
    floor.material.color = Color(1, 0.9, 0.9)
    floor.material.specular = 0

    # Step 2
    left_wall = Sphere()
    left_wall.transform = Matrix.translation(0, 0, 5) * Matrix.rotation_y(-45) * \
                          Matrix.rotation_x(90) * Matrix.scaling(10, 0.01, 10)
    left_wall.material = floor.material

    # Step 3
    right_wall = Sphere()
    right_wall.transform = Matrix.translation(0, 0, 5) * Matrix.rotation_y(45) * \
                           Matrix.rotation_x(90) * Matrix.scaling(10, 0.01, 10)
    right_wall.material = floor.material

    # Step 4
    middle = Sphere()
    middle.transform = Matrix.translation(-0.5, 1, 0.5)
    middle.material = Material()
    middle.material.color = Color(0.1, 1, 0.5)
    middle.material.diffuse = 0.7
    middle.material.specular = 0.3

    # Step 5
    right = Sphere()
    right.transform = Matrix.translation(1.5, 0.5, -0.5) * Matrix.scaling(
        0.5, 0.5, 0.5)
    right.material = Material()
    right.material.color = Color(0.5, 1, 0.1)
    right.material.diffuse = 0.7
    right.material.specular = 0.3

    # Step 6
    left = Sphere()
    left.transform = Matrix.translation(-1.5, 0.33, -0.75) * Matrix.scaling(
        0.33, 0.33, 0.33)
    left.material = Material()
    left.material.color = Color(1, 0.8, 0.1)
    left.material.diffuse = 0.7
    left.material.specular = 0.3

    world = World()
    world.light = PointLight(point(-10, 10, -10), Color(1, 1, 1))
    world.objects.extend([floor, left_wall, right_wall, middle, right, left])

    camera = Camera(100, 50, 60)
    # camera = Camera(500, 250, 60)
    camera.transform = view_transform(point(0, 1.5, -5), point(0, 1, 0),
                                      vector(0, 1, 0))

    canvas = camera.render(world)

    with open('ch8.ppm', 'w') as fp:
        fp.write(canvas.to_ppm())
Exemple #6
0
 def parse(self):
     args = sys.argv
     for i in range(len(args)):
         if args[i] == "-h":
             self.help()
         elif args[i] == "-t":
             camera = Camera()
             checkboard = (6, 9)
             winSize = (5, 5)
             print(args)
             camera.Calibration(args[i + 1], checkboard, winSize)
             camera.save()
         elif args[i] == "-i":
             if not args[i + 1]:
                 raise Exception(
                     "need path after -i type -h for more information")
             camera = Camera()
             camera.load()
             img = cv2.imread(args[i + 1])
             camera.imgCamToWorld(img)
Exemple #7
0
def ch14():
    world = World()
    world.light = PointLight(point(-10, 10, -10), Color(1, 1, 1))

    hex = hexagon()
    world.objects.append(hex)

    camera = Camera(100, 50, 60)
    camera.transform = view_transform(point(0, 2, -1), point(0, 0, 0),
                                      vector(0, 1, 0))
    canvas = camera.render(world)

    with open('ch14.ppm', 'w') as fp:
        fp.write(canvas.to_ppm())
Exemple #8
0
def ch9():
    # Step 1
    floor = Plane()
    floor.transform = Matrix.scaling(10, 0.01, 10)
    floor.material = Material()
    floor.material.color = Color(1, 0.9, 0.9)
    floor.material.specular = 0
    floor.material.pattern = StripePattern(Color(1, 0, 0), Color(0, 0, 1))

    # Middle biggest sphere
    middle = Sphere()
    middle.transform = Matrix.translation(-0.5, 1, 0.5)
    middle.material = Material()
    middle.material.color = Color(0.1, 1, 0.5)
    middle.material.diffuse = 0.7
    middle.material.specular = 0.3
    middle.material.pattern = RingPattern(Color(1, 0, 1), Color(1, 1, 1))
    middle.material.pattern.transform = Matrix.scaling(0.25, 0.5, 0.25)

    # Smaller right sphere
    right = Sphere()
    right.transform = Matrix.translation(1.5, 0.5, -0.5) * Matrix.scaling(0.5, 0.5, 0.5)
    right.material = Material()
    right.material.color = Color(0.5, 1, 0.1)
    right.material.diffuse = 0.7
    right.material.specular = 0.3
    right.material.reflective = 1.0

    # Left yellow sphere
    left = Sphere()
    left.transform = Matrix.translation(-1.5, 0.33, -0.75) * Matrix.scaling(0.33, 0.33, 0.33)
    left.material = Material()
    left.material.color = Color(1, 0.8, 0.1)
    left.material.diffuse = 0.7
    left.material.specular = 0.3

    world = World()
    world.light = PointLight(point(-10, 10, -10), Color(1, 1, 1))
    world.objects.extend([floor, middle, right, left])

    camera = Camera(100, 50, 60)
    # camera = Camera(500, 250, 60)
    camera.transform = view_transform(point(0, 1.5, -5),
                                      point(0, 1, 0),
                                      vector(0, 1, 0))

    canvas = camera.render(world)

    with open('ch9.ppm', 'w') as fp:
        fp.write(canvas.to_ppm())
Exemple #9
0
    def __init__(self):
        logging.info("Starting Spaceship")
        self.q_mode_camera = Queue.Queue()
        self.q_com_device_in = Queue.Queue()
        self.q_com_device_out = Queue.Queue()

        GPIO.setmode(GPIO.BOARD)
        # init pin 7 for camera PWM
        GPIO.setup(7, GPIO.OUT)
        # Init pin 11 for diode
        GPIO.setup(11, GPIO.OUT)
        # Init pin 13 for relay for power to servo
        GPIO.setup(13, GPIO.OUT)

        self.mode = ""
        self.online = False
        self.camera = Camera(self.q_mode_camera)
        self.com_device = ComDevice(self.q_com_device_in, self.q_com_device_out)
        self.running = True
Exemple #10
0
def step_impl(context):
    context.c = Camera(context.hsize, context.vsize, context.field_of_view)
Exemple #11
0
def step_impl(context, width, height, degrees):
    context.c = Camera(width, height, degrees)
Exemple #12
0
def detection_pipeline(log, args=None):
    """
    Detection main function
    :param log:
    :param args:
    :return:
    """
    config = Config(log)

    img_archive_dir = config.img_archive_dir
    if not os.path.exists(img_archive_dir):
        os.makedirs(img_archive_dir)

    father_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))

    '''would need a starting time for video'''
    config.st = datetime.datetime.now()
    if config.socket_enable:
        config.start_status_report_timer()
        if config.reupload_enbale:
            config.re_upload_cache()
        config.start_log_upload_timer()

    # Initialize for each camera
    if args[1] in ['use_onboard_camera', 'use_local_rtsp']:
        camera = Camera(log, args[1], local=False, cfg_path=father_dir + '/camera_info.txt')
    elif args[1] == 'use_captured_video':
        camera = Camera(log, args[1], local=True, cfg_path=father_dir + '/camera_info.txt')
    else:
        log.logger.error('Unrecognized execution mode argument {}'.format(args[1]))
        exit()

    dnn_model = DnnModel(log, config)
    for cam_id in camera.info:
        motion_roi = camera.info[cam_id]['coord']['bounding_rect']
        cov_roi, spot_roi1, spot_roi2 = camera.info[cam_id]['coord']['roi_rects']
        parking_ids = camera.info[cam_id]['parking_ids']
        transformation_matrix = camera.info[cam_id]['transformation_matrix']

        # convert to (left, top, right, bottom), may change in yaml file later.
        motion_roi = [motion_roi[0], motion_roi[1], motion_roi[0] + motion_roi[2], motion_roi[1] + motion_roi[3]]
        cov_roi = [cov_roi[0], cov_roi[1], cov_roi[0] + cov_roi[2], cov_roi[1] + cov_roi[3]]
        spot_roi1 = [spot_roi1[0], spot_roi1[1], spot_roi1[0] + spot_roi1[2], spot_roi1[1] + spot_roi1[3]]
        spot_roi2 = [spot_roi2[0], spot_roi2[1], spot_roi2[0] + spot_roi2[2], spot_roi2[1] + spot_roi2[3]]
        # trace_roi are used for backtrace
        trace_roi = [cov_roi[0] * 2 - cov_roi[2], cov_roi[1], cov_roi[0], cov_roi[3]]

        # set parameters for different cameras
        if int(cam_id) == 1:
            distance_thres = [-0.2, 0.9]
        elif int(cam_id) == 2:
            distance_thres = [-0.3, 0.9]
        elif int(cam_id) == 3:
            distance_thres = [-0.3, 0.9]
        else:
            distance_thres = [-0.3, 0.9]
        parking_spot = ParkingSpot(log, config, cov_roi, spot_roi1, spot_roi2, parking_ids, trace_roi, distance_thres,
                                   camera.info[cam_id]['initial_state'])
        logic = B_logic(log, 8)
        motion = Motion(log, motion_roi, dnn_model, parking_spot, logic, transformation_matrix)
        camera.info[cam_id]['ParkingSpot'] = motion

    if args[1] == 'use_onboard_camera' or args[1] == 'use_local_rtsp':
        camera.capture_pic_multithread()

    while config.cap_on is True:
        # camera robin-round
        for cam_id in camera.info:
            motion = camera.info[cam_id]['ParkingSpot']
            parking_draw = camera.info[cam_id]['parking_draw']
            # Read frame-by-frame
            if args[1] == 'use_onboard_camera' or args[1] == 'use_local_rtsp':
                ret, video_cur_frame, video_cur_time, video_cur_cnt = camera.get_frame_from_queue(cam_id)
            else:
                cap = camera.info[cam_id]['video_cap']
                if cap is None:
                    continue
                # Current position of the video file in seconds, indicate current time
                video_cur_time = cap.get(cv2.CAP_PROP_POS_MSEC) / 1000.0
                # Index of the frame to be decoded/captured next
                video_cur_cnt = cap.get(cv2.CAP_PROP_POS_FRAMES)
                ret, video_cur_frame = cap.read()

            if ret is False:
                log.logger.info('Video Analysis Finished!')
                config.cap_on = False
                break
            elif ret is None:
                continue

            '''run each ParkingSpot'''
            frame_info = {'cur_frame': video_cur_frame, 'cur_time': video_cur_time, 'cur_cnt': video_cur_cnt}
            if video_cur_cnt % motion.read_step == 0:
                # frame_for_display is returned for debug, need to change later
                frame_for_display = detection_flow(log, motion, dnn_model, frame_info)

                # the rest is added only for debug
                show_camera(motion, frame_for_display, parking_draw)

    camera.release_video_cap()
    config.stop_status_report_timer()
    config.ftp_upload_enable = False