Example #1
0
    def __init__(self):
        self.frame_count = 0
        self.motion_frames = 0
        self.mode = 'uncalibrated'
        self._set_led = freenect.LED_BLINK_YELLOW
        self._set_video = None
        self._last_img = 0
        self._snapshot = False
        self._last_setting_check = 0

        self.settings_delay = 10
        self.snapshot_secs = 5

        self.debug = True
        self.nightvision = None
        self.dsum_buffer = RingBuffer(100)
        self.running_avg = AvgMatrix(15)
        self.load_settings()

        self.s3bucket = boto.connect_s3(settings.AWS_KEY,
                                        settings.AWS_SECRET).create_bucket(
                                            settings.AWS_BUCKET)
Example #2
0
    def run_lightning(config):

        if config['use_RCP_buffer']:
            train_buffer = RingBuffer(
                obs_dim=env_params['STORED_STATE_SIZE'],
                act_dim=env_params['STORED_ACTION_SIZE'],
                size=config['max_buffer_size'],
                use_td_lambda_buf=config['desire_advantage'])
            test_buffer = RingBuffer(
                obs_dim=env_params['STORED_STATE_SIZE'],
                act_dim=env_params['STORED_ACTION_SIZE'],
                size=config['batch_size'] * 10,
                use_td_lambda_buf=config['desire_advantage'])
        else:
            config['max_buffer_size'] *= env_params['avg_episode_length']
            train_buffer = SortedBuffer(
                obs_dim=env_params['STORED_STATE_SIZE'],
                act_dim=env_params['STORED_ACTION_SIZE'],
                size=config['max_buffer_size'],
                use_td_lambda_buf=config['desire_advantage'])
            test_buffer = SortedBuffer(
                obs_dim=env_params['STORED_STATE_SIZE'],
                act_dim=env_params['STORED_ACTION_SIZE'],
                size=config['batch_size'] * 10,
                use_td_lambda_buf=config['desire_advantage'])

        model = LightningTemplate(game_dir, config, train_buffer, test_buffer)

        if args.reload or args.eval_agent:
            # load in trained model:
            # get name from either eval or reload:
            if args.reload:
                load_name = args.reload
            else:
                load_name = args.eval_agent
            state_dict = torch.load(load_name)['state_dict']
            if args.implementation == 'RCP-A':
                # need to handle the advantage model also existing here.
                # [len('model.'):] strips the type of model from the front of the name.
                model_state_dict = {
                    k[len('model.'):]: v
                    for k, v in state_dict.items()
                    if 'model' in k[:len('model.')]
                }
                adv_state_dict = {
                    k[len('advantage_model.'):]: v
                    for k, v in state_dict.items() if 'advantage_model.' in k
                }
                model.advantage_model.load_state_dict(adv_state_dict)
            else:
                # strips the name 'model' from the front of the strings.
                model_state_dict = {
                    k[len('model.'):]: v
                    for k, v in state_dict.items()
                }
            model.model.load_state_dict(model_state_dict)
            print("Loaded in Model!")

        if args.eval_agent:
            print(
                'Ensure the desires for your agent (approx line 76 of lightning_trainer.py) \
                correspond to those your agent learned.')
            # calls part of lightning.
            model.eval_agent()

        else:
            trainer = Trainer(deterministic=True,
                              logger=logger,
                              default_root_dir=game_dir,
                              max_epochs=epochs,
                              profiler=False,
                              checkpoint_callback=every_checkpoint_callback,
                              callbacks=callback_list,
                              gradient_clip_val=config['grad_clip_val'],
                              progress_bar_refresh_rate=0)
            trainer.fit(model)
Example #3
0
class Watcher(object):


    def __init__(self):
        self.frame_count = 0
        self.motion_frames = 0
        self.mode = 'uncalibrated'
        self._set_led = freenect.LED_BLINK_YELLOW
        self._set_video = None
        self._last_img = 0
        self._snapshot = False
        self._last_setting_check = 0

        self.settings_delay = 10
        self.snapshot_secs = 5

        self.debug = True
        self.nightvision = None
        self.dsum_buffer = RingBuffer(100)
        self.running_avg = AvgMatrix(15)
        self.load_settings()

        self.s3bucket = boto.connect_s3(settings.AWS_KEY,
                                        settings.AWS_SECRET).create_bucket(
                                            settings.AWS_BUCKET)

    def load_settings(self):
        profile = UserProfile.objects.get()

        if self.debug:
            print 'load_settings', profile.motion_sensitivity, profile.nightvision_on

        # low
        if profile.motion_sensitivity < 10:
            self.change_threshold = 1.5
            self.min_report_event = 30
        # high
        elif profile.motion_sensitivity > 10:
            self.change_threshold = 0.7
            self.min_report_event = 15
        # normal
        else:
            self.change_threshold = 1.0
            self.min_report_event = 20

        if profile.nightvision_on != self.nightvision:
            self.nightvision = profile.nightvision_on
            self._set_video = (freenect.VIDEO_IR_8BIT if self.nightvision
                               else freenect.VIDEO_RGB)


    def set_mode(self, mode):
        if mode == 'nomotion':
            self._set_led = freenect.LED_GREEN
        elif mode == 'motion':
            self._set_led = freenect.LED_RED
        else:
            raise ValueError('unknown mode')
        self.mode = mode


    def depth_callback(self, dev, data, timestamp):
        # add the data to our running average
        self.running_avg.add(data)

        if self.mode == 'uncalibrated':
            self.frame_count += 1
            if self.frame_count == self.dsum_buffer.size():
                self.set_mode('nomotion')
        else:
            mean_array = self.running_avg.mean()

            # difference from sum of buffer
            dsum = mean_array.sum()
            self.dsum_buffer.add(dsum)
            delta = self.dsum_buffer.std_delta(dsum)

            # frame will count as a motion frame
            if delta > self.change_threshold:
                self.motion_frames += 1
                if (self.motion_frames == self.min_report_event and
                    self.mode == 'nomotion'):
                    self.set_mode('motion')
                    alert = Alert.objects.create(event_type='motion')
                    self._snapshot = alert.id
            else:
                # don't let motion_frames drop below 0
                self.motion_frames = max(self.motion_frames-1, 0)
                if self.motion_frames == 0 and self.mode == 'motion':
                    self.set_mode('nomotion')
                    # could log how long the event was and its intensity here

            if self.debug:
                cv.ShowImage('Depth', simplify_cv(mean_array.astype(numpy.uint16)))
                cv.WaitKey(1)


    def body_callback(self, dev, ctx):
        # _set_led hackery is required because for some reason calling set_led
        # from update loop hangs the process
        if self._set_led:
            freenect.set_led(dev, self._set_led)
            self._set_led = None

        if self._set_video is not None:
            freenect.stop_video(dev)
            freenect.set_video_mode(dev, freenect.RESOLUTION_MEDIUM,
                                    self._set_video)
            freenect.start_video(dev)
            self._set_video = None

        if self._last_setting_check + self.settings_delay < time.time():
            self.load_settings()
            self._last_setting_check = time.time()


    def video_callback(self, dev, data, timestamp):
        if self.nightvision:
            cv_data = simplify_cv(data)
        else:
            cv_data = video_cv(data)

        if self.debug:
            cv.ShowImage('Video', cv_data)


        if (self._last_img + self.snapshot_secs < time.time() or
            self._snapshot):
            cv.SaveImage('babby-current.jpg', cv_data)
            k = boto.s3.key.Key(self.s3bucket)
            if self._snapshot:
                k.key = '/babby/snapshot-%s.jpg' % self._snapshot
                self._snapshot = False
            else:
                k.key = '/babby/current.jpg'
            k.set_contents_from_filename('babby-current.jpg')
            k.set_acl('public-read')
            self._last_img = time.time()