def main_button_click(self):
     self.play_beep()
     self.tick_timer()
     Recorder.start_recording()
     self.full_file_path = "test.wav"
     self.l_selected_file_name_var.set("[Selected file name]: " + "test.wav")
     self.b_spectrogram['state'] = 'active'
예제 #2
0
def main():
    parser = OptionParser()
    parser.add_option('-v', '--verbose',
                      dest='verbose', action='store_true', default=False,
                      help='verbose output')
    (options, args) = parser.parse_args()
    verbose = options.verbose
    if verbose:
        print 'verbose output mode.'

    if len(args) == 0:
        print 'no input files.'
        exit

    filename = args[0]
    if verbose:
        print 'filename: ' + filename
    p = Player(filename)

    r = Recorder()
    r.start()

    pl = plotter(p, r)
    th = Thread(target=pl)
    th.daemon = True
    th.start()

    p.run()
예제 #3
0
 def test_SocketPair1(self):
     rec = Recorder()
     rec.createSocketPair(1,2)
     socDetails = rec._getSocketDetails(1)
     self.assertEqual(socDetails["socketPair"], 2)
     self.assertEqual(socDetails["isForwardChannel"], False)
     socDetails = rec._getSocketDetails(2)
     self.assertEqual(socDetails["socketPair"], 1)
     self.assertEqual(socDetails["isForwardChannel"], True)
예제 #4
0
    def test_record_when_title_and_command_given(self):
        recorder = Recorder(self.pty_recorder)

        asciicast = recorder.record('ls -l', 'the title')

        assert_equal('the title', asciicast.title)
        assert_equal('ls -l', asciicast.command)
        assert_equal(('ls -l',), self.pty_recorder.record_call_args())
        assert_equal(123.45, asciicast.duration)
        assert_equal(self.pty_recorder.stdout, asciicast.stdout)
예제 #5
0
def start_recording():
    try:
        config = read_config()
        r = Recorder(config.get('directories', 'sessions'))
        r._run()
    except KeyboardInterrupt:
        logger.info("KeyboardInterrupt")
    except:
        logger.error(sys.exc_info()[1])
    logger.info("Finished recording.")
예제 #6
0
    def test_record_when_no_title_nor_command_given(self):
        env = { 'SHELL': '/bin/blush' }
        recorder = Recorder(self.pty_recorder, env)

        asciicast = recorder.record(None, None)

        assert_equal(None, asciicast.title)
        assert_equal(None, asciicast.command)
        assert_equal(('/bin/blush',), self.pty_recorder.record_call_args())
        assert_equal(123.45, asciicast.duration)
        assert_equal(self.pty_recorder.stdout, asciicast.stdout)
예제 #7
0
파일: main.py 프로젝트: NISH1001/humT
def main():
    # start recording -> 5 sec default
    recorder = Recorder()
    frames = recorder.start(5)
    recorder.write_wav(frames, "input.wav")

    # load the hum and process
    hum = track_pitch("input.wav")
    timeseries(hum)
    process(hum)

    """
예제 #8
0
 def submit(self, table_name, form_obj, id):
     db = self.db
     r = Recorder(db, table_name)
     updated = True
     if id:
         result_msg = r.update(id, form_obj)
         form_obj['id'] = id
     else:
         result_msg = r.input(form_obj)
         updated = False
         form_obj['id'] = r.get_last_id()
     self.submitted.emit(updated, table_name, form_obj)
     return result_msg
예제 #9
0
파일: app.py 프로젝트: petri/mp3recorder
class MyApp( wxApp ):
   def OnInit( self ):
      self.recorder=Recorder()
      self.res   = wxXmlResource( GUI_FILENAME )
      self.frame = self.res.LoadFrame( None, GUI_MAINFRAME_NAME )
      self.frame.Show()
        
      EVT_BUTTON(self.frame, XRCID("button_quit"), self.onClick)
      EVT_BUTTON(self.frame, XRCID("button_start"), self.onClick)
      EVT_BUTTON(self.frame, XRCID("button_stop"), self.onClick)

      in_src = XRCCTRL(self.frame, "choice_inputsource")
      for n in self.recorder.getDeviceNames():
         in_src.Append(n)
      in_src.SetSelection(0)
      
      self.frame.Bind(EVT_IDLE, self.onIdle)
      
      return 1

   def onIdle(self, evt):
      if self.recorder.status == RECORDING:
         self.recorder.process()
         XRCCTRL(self.frame, "main_frame_statusbar").SetStatusText("bytes written: " +str(self.recorder.bytesWritten()),0)
      evt.Skip()
        
   def onClick(self, evt):
      btn = evt.GetEventObject()
      name = evt.GetEventObject().GetName()
       
      if name == "button_start":
         self.startRecording()
         btn.Disable()
         XRCCTRL(self.frame, "button_stop").Enable()
          
      elif name == "button_stop":
         self.stopRecording()
         btn.Disable()
         XRCCTRL(self.frame, "button_start").Enable()

      else:
         self.quitApplication()
         
   def quitApplication(self):
      self.frame.Close(true)
       
   def startRecording(self):
      self.recorder.setDevice(XRCCTRL(self.frame, "choice_inputsource").GetStringSelection())
      
      file = wxFileSelector("Select file to save mp3 audio to")
      self.recorder.start(open(file,"wb"))
       
   def stopRecording(self):
      self.recorder.stop()
예제 #10
0
class ButtonRecorder(object): 
    def __init__(self, filename): 
        self.filename = filename 
        gpio.setup(23, gpio.IN, pull_up_down=gpio.PUD_UP) 
        self.rec = Recorder(channels=2) 

    def start(self): 
        gpio.add_event_detect(23, gpio.FALLING, callback=self.falling, bouncetime=10) 

    def rising(self, channel): 
        gpio.remove_event_detect(23) 
        print 'Button up' 
        gpio.add_event_detect(23, gpio.FALLING, callback=self.falling, bouncetime=10) 
        #TODO: dim red LED
        self.recfile.stop_recording() 
        self.recfile.close() 
    
    def falling(self, channel): 
        ts = time.time()
	gpio.remove_event_detect(23) 
        print 'Button down' 
        gpio.add_event_detect(23, gpio.RISING, callback=self.rising, bouncetime=10) 
        #TODO: lit green LED        
        play = PlayFile(vloeken.keys()[random.randint(0,len(vloeken))])
        play.start()
        #TODO: dim green, lite red LED
        vloeken[self.filename + str(ts) + '.wav'] = {'inputfor':[]}
        with open('vloeken.log', 'wb') as handle:
            pickle.dump(vloeken, handle)
        self.recfile = self.rec.open(self.filename + str(ts) + '.wav', 'wb')    
        self.recfile.start_recording() 
예제 #11
0
파일: sts-moses.py 프로젝트: katzoo/amu
    def __init__(self, config_path):
        # read config file
        config = ConfigParser.RawConfigParser()
        config.read(config_path)

        # general
        self.tmp_file = config.get('general', 'tmp_file')

        # gsr api
        self.gsr_api_lang = config.get('gsr_api', 'language')

        # moses
        self.e = config.get('moses', 'e')
        self.f = config.get('moses', 'f')
        self.moses_config_orig = config.get('moses', 'config_orig')
        self.moses_config_fix = config.get('moses', 'config_fix')
        self.moses_dir = config.get('moses', 'moses_dir')
        self.model_dir = config.get('moses', 'model_dir')
        self.model_dir_jenkins = config.get('moses', 'model_dir_jenkins')
        self.threads = config.getint('moses', 'threads')

        # pyttsx
        self.tts_voice_id = config.get('pyttsx', 'voice')
        self.tts_voice_rate = config.getint('pyttsx', 'rate')

        # create recorder
        self.recorder = Recorder(language=self.gsr_api_lang)
예제 #12
0
 def __init__(self,url):
     self.url = url          # 要分析的url
     self.block_li = []      # 网页所包含的文本块列表
     self.title = ''
     #重置记录
     self.recorder = Recorder()
     self.recorder.reset()
예제 #13
0
파일: lqr.py 프로젝트: Chunting/control
    def __init__(self, solve_continuous=False, **kwargs): 

        super(Control, self).__init__(**kwargs)

        self.DOF = 2 # task space dimensionality 
        self.u = None
        self.solve_continuous = solve_continuous

        if self.write_to_file is True:
            from recorder import Recorder
            # set up recorders
            self.u_recorder = Recorder('control signal', self.task, 'lqr')
            self.xy_recorder = Recorder('end-effector position', self.task, 'lqr')
            self.dist_recorder = Recorder('distance from target', self.task, 'lqr')
            self.recorders = [self.u_recorder, 
                            self.xy_recorder, 
                            self.dist_recorder]
예제 #14
0
    def sendValue():
        rec = Recorder()
        data = rec.read()
        vol = rec.rms(data) * 50
        bam = np.fft.fft(data)




        bam = rec.prepare_fft(data, CHUNK)

        result_dict = {"volume": vol, "frequency": bam}
        for client in clients:
            # client.write_message(vol.__str__())
            # client.write_message(bam.__str__())
            # client.write_message(value.__str__())
            client.write_message(tornado.escape.json_encode(result_dict))
예제 #15
0
 def __init__(self, pos = (0, 0)):
     filename = datetime.datetime.fromtimestamp(time.time())\
                .strftime('%Y-%m-%d %H.%M.%S')
     from recorder import Recorder
     self.path = Recorder.traceFileToPath(filename)
     widget_pos = snapshot.snapWidgetFromPoint(pos, self.path)
     # SmartClick does not support Orient
     self.pos = (pos[0] - widget_pos[0], pos[1] - widget_pos[1])
예제 #16
0
파일: osc.py 프로젝트: Chunting/control
    def __init__(self, null_control=True, **kwargs): 
        """
        null_control boolean: apply second controller in null space or not
        """

        super(Control, self).__init__(**kwargs)

        self.DOF = 2 # task space dimensionality 
        self.null_control = null_control

        if self.write_to_file is True:
            from recorder import Recorder
            # set up recorders
            self.u_recorder = Recorder('control signal', self.task, 'osc')
            self.xy_recorder = Recorder('end-effector position', self.task, 'osc')
            self.dist_recorder = Recorder('distance from target', self.task, 'osc')
            self.recorders = [self.u_recorder, 
                            self.xy_recorder, 
                            self.dist_recorder]
예제 #17
0
	def __init__(self):
		"""Initializes cameras"""
		spyral.scene.Scene.__init__(self)
		self.rootCamera = spyral.director.get_camera()
		self.screenCamera=  self.rootCamera.make_child();
		self.theaterCamera= self.rootCamera.make_child(virtual_size= geom['theater'].size,
													real_size= geom['theater'].size,
													offset= geom['theater'].topleft,
													layers=['upstage','stage','faces','downstage']);
		self.recorder= Recorder()
		self.preventEscapeLock = False
예제 #18
0
 def start_recording(self):
     """ Start recording data. """
     if self._recorder == None:
         logger.info("Starting recorder...")
         self._restarts = 0
         self._recorder = Recorder(self._path)
         self._recorder.start()
         self.menu.current_item = self._root_item
         self._toggle_item.header = "Stop\nRecording"
         self._root_item.header = "DreamCatcher\nRecording..."
         self.menu.lcd.backlight(self.menu.lcd.RED)
예제 #19
0
파일: gui.py 프로젝트: ImageEngine/xcorder
    def startRecorder(self):

        strftime = time.strftime('%Y%m%d%H%M%S')
        self.workdir = '/tmp/xcorder/{s}/'.format(s=strftime)

        if not os.path.isdir(self.workdir):
            os.makedirs(self.workdir)

        self.recorder = Recorder(
                self.video.screen, self.audio.device, self.workdir)

        self.recorder.start()
예제 #20
0
class CustomSimulation(Simulation): #Custom
    def __init__(self, code=None):
        self.code = code
        self.record_every_stock = False
        
    def setting(self, params, fname=None):
        """複数の株だけど、いくつかの株に絞ってシミュレーションする場合は
        _fnameにファイル名を指定
        """
        if 'from' in params:
            self.set_date_from(params['from'])
        if 'to' in params:
            self.set_date_to(params['to'])
        print 'from:', self.date_from
        print 'to:', self.date_to
        if self.code is not None:
            self.simulate_a_stock(self.code) # privateメソッドのため、親クラスのメソッドを直接呼び出す
        else:
            self.simulate_all_stocks(fname)
    
    def set_trading_system(self, params):
        self.trading_system = TradingSystem(params)
        
    def set_date_from(self, date):
        self.date_from = date
        
    def set_date_to(self, date):
        self.date_to = date
        
    def set_data_loader(self, data_loader):
        Simulation.data_loader = data_loader
        
    def set_record_dir(self, record_dir, setting_file_name, system_name, version):
        self.recorder = \
            Recorder(os.path.join(record_dir,system_name,version))
        self.recorder.create_record_folder()
        self.recorder.record_setting(setting_file_name)
        
    def set_record_every_stock(self, true_or_false):
        self.record_every_stock = true_or_false
예제 #21
0
파일: __main__.py 프로젝트: mre/timelapse
 def startStopRecording_(self, notification):
     if self.recording:
         self.recorder.join()
         # Create timelapse after recording?
         if encode:
             self.encoder = Encoder(
                 self.image_dir, self.encoder_output_basedir)
             self.encoder.start()
     else:
         self.recorder = Recorder(self.image_dir, screenshot_interval)
         self.recorder.start()
     self.recording = not self.recording
     self.setStatus()
예제 #22
0
파일: ahf.py 프로젝트: himlen1990/control
    def __init__(self, **kwargs): 

        super(Control, self).__init__(**kwargs)

        self.old_target = [None, None]

        # load up our network
        import glob

        # this code goes into the weights folder, finds the most 
        # recent trial, and loads up the weights
        files = sorted(glob.glob('controllers/weights/rnn*'))
        print 'loading weights from %s'%files[-1]
        W = np.load(files[-1])['arr_0']
        num_states = 4
        self.rnn = RNNet(shape=[num_states * 2, 32, 32, num_states, num_states], 
                     layers=[Linear(), Tanh(), Tanh(), Linear(), Linear()],
                     rec_layers=[1,2],
                     conns={0:[1, 2], 1:[2], 2:[3], 3:[4]},
                     load_weights=W,
                     use_GPU=False)

        offset, W_end, b_end = self.rnn.offsets[(3,4)]
        self.rnn.mask = np.zeros(self.rnn.W.shape, dtype=bool)
        self.rnn.mask[offset:b_end] = True
        self.rnn.W[offset:W_end] = np.eye(4).flatten()

        self.joint_targets = None
        self.act = None

        # set up recorders
        if self.write_to_file is True:
            from recorder import Recorder
            self.u_recorder = Recorder('control signal', self.task, 'hf')
            self.xy_recorder = Recorder('end-effector position', self.task, 'hf')
            self.dist_recorder = Recorder('distance from target', self.task, 'hf')
            self.recorders = [self.u_recorder, 
                            self.xy_recorder, 
                            self.dist_recorder]
예제 #23
0
    def insertPolygon(self, geometry):
        
        # Convert geometry
        wktGeom = geometry.exportToWkt()
        # To force 2D geometry
        if len(wktGeom.split('Z')) > 1:
            wktGeom = wktGeom.split('Z')[0] + wktGeom.split('Z')[1]
            wktGeom = wktGeom.replace(" 0,", ",")
            wktGeom = wktGeom.replace(" 0)", ")")
        geom = "GeomFromText('"
        geom += wktGeom
        geom += "', 2154)"

        geomObj = {}
        geomObj['the_geom'] = geom
        
        db = DbManager(CarhabLayerRegistry.instance().getCurrentCarhabLayer().dbPath)
        
        r = Recorder(db, 'polygon')
        r.input(geomObj)
        
        db.commit()
        db.close()
예제 #24
0
 def __init__(self):
     logging.basicConfig(
         level=logging.INFO,
         filename=Dashcam.LOG_FILE,
         format="%(asctime)-15s %(name)-12s %(levelname)s %(message)s",
         datefmt="%y-%m-%d %H:%M:%S",
     )
     self.logger = logging.getLogger(self.__class__.__name__)
     current_time = time.localtime()
     if current_time < Dashcam.JAN_1ST_2016:
         # we probably doesnt have RTC, do not raise error for now
         self.logger.error("No RTC? Current time is " + str(datetime.datetime.now()))
         # raise ValueError("There doesn't seem to be a RTC")
     self.recorder = Recorder()
예제 #25
0
    def play(self):
        from recorder import Recorder
        if len(self.filenames) == 1:
            img = Recorder.imageFileToPath(self.filenames[0])
            if not os.path.exists(img):
                snapshot.snapWindow(self.title, img)
                return "message: " + "check point image file " + img + \
                       " doesn't exist and has been created"

            tempfile = 'snapshots/checkpoint.png'
            snapshot.snapWindow(self.title, tempfile)

            if not snapshot.compareSnapshots(img, tempfile):
                return "error: " + "checkpoint fails: " + tempfile + \
                       " is inconsistent with " + img

            # delete temp file
            os.remove(os.path.realpath(tempfile))
            
        else: # this is a temporary behavior modification
            match = False
            tempfile = 'snapshots/checkpoint.png'
            snapshot.snapWindow(self.title, tempfile)
            
            for i in self.filenames:
                img = Recorder.imageFileToPath(i)
                if not os.path.exists(img):
                    return "message: check point image file " + img + \
                           " doesn't exist"

                if snapshot.compareSnapshots(img, tempfile):
                   match = True
                   break
                
            if not match:
                return "error: checkpoint fails: " + tempfile + \
                       " matches none of the image files"
예제 #26
0
    def __init__(self, **kwargs): 

        super(Control, self).__init__(**kwargs)

        self.DOF = 2 # task space dimensionality 
        self.u = None

        # specify which gradient approximation method to use
        self.gradient_approximation = self.spsa

        if self.write_to_file is True:
            from recorder import Recorder
            # set up recorders
            self.xy_recorder = Recorder('end-effector position', self.task, 'gradient_spsa')
            self.recorders = [self.xy_recorder]
예제 #27
0
파일: app.py 프로젝트: petri/mp3recorder
   def OnInit( self ):
      self.recorder=Recorder()
      self.res   = wxXmlResource( GUI_FILENAME )
      self.frame = self.res.LoadFrame( None, GUI_MAINFRAME_NAME )
      self.frame.Show()
        
      EVT_BUTTON(self.frame, XRCID("button_quit"), self.onClick)
      EVT_BUTTON(self.frame, XRCID("button_start"), self.onClick)
      EVT_BUTTON(self.frame, XRCID("button_stop"), self.onClick)

      in_src = XRCCTRL(self.frame, "choice_inputsource")
      for n in self.recorder.getDeviceNames():
         in_src.Append(n)
      in_src.SetSelection(0)
      
      self.frame.Bind(EVT_IDLE, self.onIdle)
      
      return 1
예제 #28
0
파일: run.py 프로젝트: somaproject/recorder
    def __init__(self):
        gladefile = "main.glade"
        self.windowname = "main"
        
        self.wTree = gtk.glade.XML(gladefile, self.windowname)   

        
        self.wTree.signal_autoconnect(self)

        # load popup
        self.popupEpochTree = gtk.glade.XML(gladefile, "menuPopupEpochs")
        self.popupEpoch = self.popupEpochTree.get_widget("menuPopupEpochs")
        self.popupEpochTree.signal_autoconnect(self)
        self.recorder = Recorder()
        self.setupEpochs()

        # now connect the new-experiment and new-epoch widgets
        self.recorder.connect('experiment-create', self.experimentCreate)
        
        
        self.propertyPanes = {}
예제 #29
0
    def start(self):
        # Init the amp
        print 'Initializing the amp...'
        recorder = Recorder('lslamp', params.FREQ_S, params.LEN_REC_BUF_SEC, params.NUM_CHANNELS)
        thread_rec = threading.Thread(target=recorder.record)
        thread_rec.start()

        # Start plotting
        plt.show()
        #thread_plot = threading.Thread(target=self.plot_live)
        #thread_plot.start()

        # Get and display data from the amp
        counter = 0
        while not is_terminate_requested:

            data_last_chunk = recorder.get_new_data(params.LEN_DATA_CHUNK_READ, params.AMP_WAIT_SEC)
            recorder.acknowledge_new_data()
            print 'recorder.new_data_counter:', recorder.new_data_counter
            #print 'data_last_chunk.shape: ', data_last_chunk.shape

            # Position the older data to the beginning of the buffer
            self.X_buf[0:(self.len_buf - params.LEN_DATA_CHUNK_READ)]\
                    = self.X_buf[params.LEN_DATA_CHUNK_READ:]

            # Insert the new data into the buffer
            self.X_buf[(self.len_buf - params.LEN_DATA_CHUNK_READ):]\
                    = data_last_chunk

            #i_row_from = int((counter*params.LEN_DATA_CHUNK_READ) % self.len_buf)
            #i_row_to = int(((counter+1)*params.LEN_DATA_CHUNK_READ) % self.len_buf)
            #if i_row_to == 0:
            #    i_row_to = self.len_buf
            #print 'i_row_from, i_row_to:', i_row_from, i_row_to
            #self.X_buf[i_row_from: i_row_to] = data_last_chunk
            #print 'X_buf[i_row_from: i_row_to]:\n', self.X_buf[i_row_from: i_row_to]

            if counter % 2 == 0:
                self.plot_live()

            counter += 1

        # End of while

        # Stop the amp
        recorder.stop_recording()
예제 #30
0
    def sync(self):
        """ Send all data in source db but not in the
        destination db to the destination db.
        """
        from recorder import Recorder

        for file in self.files:
            srcpath = os.path.join(self.srcdir, file)
            dstpath = os.path.join(self.dstdir, file)
            srcRec = Recorder(srcpath)
            srcdb = srcRec.opendb()
            dstRec = Recorder(dstpath)
            dstdb = dstRec.opendb()
            keys = [k for k in srcdb if k not in dstdb]
            count = 0
            for key in keys:
                dstdb[key] = srcdb[key]
                print("transferring %s" % key)
                count += 1
            dstRec.persist()
            print("done, %s records transferred to %s" % (count, dstpath))
예제 #31
0
 def __init__(self):
     """Init class with attributes."""
     self.website = Website()
     self.collector = Collector()
     self.cleaner = Cleaner()
     self.recorder = Recorder()
예제 #32
0
class PpoAgent(object):
    envs = None

    def __init__(
        self,
        *,
        scope,
        ob_space,
        ac_space,
        stochpol_fn,
        nsteps,
        nepochs=4,
        nminibatches=1,
        gamma=0.99,
        gamma_ext=0.99,
        lam=0.95,
        ent_coef=0,
        cliprange=0.2,
        max_grad_norm=1.0,
        vf_coef=1.0,
        lr=30e-5,
        adam_hps=None,
        testing=False,
        comm=None,
        comm_train=None,
        use_news=False,
        update_ob_stats_every_step=True,
        int_coeff=None,
        ext_coeff=None,
        obs_save_flag=False,
    ):
        self.lr = lr
        self.ext_coeff = ext_coeff
        self.int_coeff = int_coeff
        self.use_news = use_news
        self.update_ob_stats_every_step = update_ob_stats_every_step
        self.abs_scope = (tf.get_variable_scope().name + '/' +
                          scope).lstrip('/')
        self.testing = testing
        self.comm_log = MPI.COMM_SELF
        if comm is not None and comm.Get_size() > 1:
            self.comm_log = comm
            assert not testing or comm.Get_rank(
            ) != 0, "Worker number zero can't be testing"
        if comm_train is not None:
            self.comm_train, self.comm_train_size = comm_train, comm_train.Get_size(
            )
        else:
            self.comm_train, self.comm_train_size = self.comm_log, self.comm_log.Get_size(
            )
        self.is_log_leader = self.comm_log.Get_rank() == 0
        self.is_train_leader = self.comm_train.Get_rank() == 0
        self.obs_save_flag = obs_save_flag
        if self.is_log_leader:
            self.obs_rec = [{'acs': [], 'obs': []} for i in range(100)]

        with tf.variable_scope(scope):
            self.best_ret = -np.inf
            self.local_best_ret = -np.inf
            self.rooms = []
            self.local_rooms = []
            self.scores = []
            self.ob_space = ob_space
            self.ac_space = ac_space
            self.stochpol = stochpol_fn()
            self.nepochs = nepochs
            self.cliprange = cliprange
            self.nsteps = nsteps
            self.nminibatches = nminibatches
            self.gamma = gamma
            self.gamma_ext = gamma_ext
            self.lam = lam
            self.adam_hps = adam_hps or dict()
            self.ph_adv = tf.placeholder(tf.float32, [None, None])
            self.ph_ret_int = tf.placeholder(tf.float32, [None, None])
            self.ph_ret_ext = tf.placeholder(tf.float32, [None, None])
            self.ph_oldnlp = tf.placeholder(tf.float32, [None, None])
            self.ph_oldvpred = tf.placeholder(tf.float32, [None, None])
            self.ph_lr = tf.placeholder(tf.float32, [])
            self.ph_lr_pred = tf.placeholder(tf.float32, [])
            self.ph_cliprange = tf.placeholder(tf.float32, [])

            #Define loss.
            neglogpac = self.stochpol.pd_opt.neglogp(self.stochpol.ph_ac)
            entropy = tf.reduce_mean(self.stochpol.pd_opt.entropy())
            vf_loss_int = (0.5 * vf_coef) * tf.reduce_mean(
                tf.square(self.stochpol.vpred_int_opt - self.ph_ret_int))
            vf_loss_ext = (0.5 * vf_coef) * tf.reduce_mean(
                tf.square(self.stochpol.vpred_ext_opt - self.ph_ret_ext))
            vf_loss = vf_loss_int + vf_loss_ext
            ratio = tf.exp(self.ph_oldnlp - neglogpac)  # p_new / p_old
            negadv = -self.ph_adv
            pg_losses1 = negadv * ratio
            pg_losses2 = negadv * tf.clip_by_value(
                ratio, 1.0 - self.ph_cliprange, 1.0 + self.ph_cliprange)
            pg_loss = tf.reduce_mean(tf.maximum(pg_losses1, pg_losses2))
            ent_loss = (-ent_coef) * entropy
            approxkl = .5 * tf.reduce_mean(
                tf.square(neglogpac - self.ph_oldnlp))
            maxkl = .5 * tf.reduce_max(tf.square(neglogpac - self.ph_oldnlp))
            clipfrac = tf.reduce_mean(
                tf.to_float(tf.greater(tf.abs(ratio - 1.0),
                                       self.ph_cliprange)))
            loss = pg_loss + ent_loss + vf_loss + self.stochpol.aux_loss

            #Create optimizer.
            params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                       scope=self.abs_scope)
            logger.info("PPO: using MpiAdamOptimizer connected to %i peers" %
                        self.comm_train_size)
            trainer = MpiAdamOptimizer(self.comm_train,
                                       learning_rate=self.ph_lr,
                                       **self.adam_hps)
            grads_and_vars = trainer.compute_gradients(loss, params)
            grads, vars = zip(*grads_and_vars)
            if max_grad_norm:
                _, _grad_norm = tf.clip_by_global_norm(grads, max_grad_norm)
            global_grad_norm = tf.global_norm(grads)
            grads_and_vars = list(zip(grads, vars))
            self._train = trainer.apply_gradients(grads_and_vars)

        #Quantities for reporting.
        self._losses = [
            loss, pg_loss, vf_loss, entropy, clipfrac, approxkl, maxkl,
            self.stochpol.aux_loss, self.stochpol.feat_var,
            self.stochpol.max_feat, global_grad_norm
        ]
        self.loss_names = [
            'tot', 'pg', 'vf', 'ent', 'clipfrac', 'approxkl', 'maxkl',
            "auxloss", "featvar", "maxfeat", "gradnorm"
        ]
        self.I = None
        self.disable_policy_update = None
        allvars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                    scope=self.abs_scope)
        if self.is_log_leader:
            tf_util.display_var_info(allvars)
        tf.get_default_session().run(tf.variables_initializer(allvars))
        sync_from_root(tf.get_default_session(),
                       allvars)  #Syncs initialization across mpi workers.
        self.t0 = time.time()
        self.global_tcount = 0

    def start_interaction(self, venvs, disable_policy_update=False):
        self.I = InteractionState(ob_space=self.ob_space,
                                  ac_space=self.ac_space,
                                  nsteps=self.nsteps,
                                  gamma=self.gamma,
                                  venvs=venvs,
                                  stochpol=self.stochpol,
                                  comm=self.comm_train)
        self.disable_policy_update = disable_policy_update
        self.recorder = Recorder(nenvs=self.I.nenvs,
                                 score_multiple=venvs[0].score_multiple)

    def collect_random_statistics(self, num_timesteps):
        #Initializes observation normalization with data from random agent.
        all_ob = []
        for lump in range(self.I.nlump):
            all_ob.append(self.I.venvs[lump].reset())
        for step in range(num_timesteps):
            for lump in range(self.I.nlump):
                acs = np.random.randint(low=0,
                                        high=self.ac_space.n,
                                        size=(self.I.lump_stride, ))
                self.I.venvs[lump].step_async(acs)
                ob, _, _, _ = self.I.venvs[lump].step_wait()
                all_ob.append(ob)
                if len(all_ob) % (128 * self.I.nlump) == 0:
                    ob_ = np.asarray(all_ob).astype(np.float32).reshape(
                        (-1, *self.ob_space.shape))
                    self.stochpol.ob_rms.update(ob_[:, :, :, -1:])
                    all_ob.clear()

    def stop_interaction(self):
        self.I.close()
        self.I = None

    @logger.profile("update")
    def update(self):

        #Some logic gathering best ret, rooms etc using MPI.
        temp = sum(MPI.COMM_WORLD.allgather(self.local_rooms), [])
        temp = sorted(list(set(temp)))
        self.rooms = temp

        temp = sum(MPI.COMM_WORLD.allgather(self.scores), [])
        temp = sorted(list(set(temp)))
        self.scores = temp

        temp = sum(MPI.COMM_WORLD.allgather([self.local_best_ret]), [])
        self.best_ret = max(temp)

        eprews = MPI.COMM_WORLD.allgather(
            np.mean(list(self.I.statlists["eprew"])))
        local_best_rets = MPI.COMM_WORLD.allgather(self.local_best_ret)
        n_rooms = sum(MPI.COMM_WORLD.allgather([len(self.local_rooms)]), [])

        if MPI.COMM_WORLD.Get_rank() == 0:
            logger.info(f"Rooms visited {self.rooms}")
            logger.info(f"Best return {self.best_ret}")
            logger.info(f"Best local return {sorted(local_best_rets)}")
            logger.info(f"eprews {sorted(eprews)}")
            logger.info(f"n_rooms {sorted(n_rooms)}")
            logger.info(f"Extrinsic coefficient {self.ext_coeff}")
            logger.info(f"Gamma {self.gamma}")
            logger.info(f"Gamma ext {self.gamma_ext}")
            logger.info(f"All scores {sorted(self.scores)}")

        #Normalize intrinsic rewards.
        rffs_int = np.array(
            [self.I.rff_int.update(rew) for rew in self.I.buf_rews_int.T])
        self.I.rff_rms_int.update(rffs_int.ravel())
        rews_int = self.I.buf_rews_int / np.sqrt(self.I.rff_rms_int.var)
        self.mean_int_rew = np.mean(rews_int)
        self.max_int_rew = np.max(rews_int)

        #Don't normalize extrinsic rewards.
        rews_ext = self.I.buf_rews_ext

        rewmean, rewstd, rewmax = self.I.buf_rews_int.mean(
        ), self.I.buf_rews_int.std(), np.max(self.I.buf_rews_int)

        #Calculate intrinsic returns and advantages.
        lastgaelam = 0
        for t in range(self.nsteps - 1, -1, -1):  # nsteps-2 ... 0
            if self.use_news:
                nextnew = self.I.buf_news[:, t +
                                          1] if t + 1 < self.nsteps else self.I.buf_new_last
            else:
                nextnew = 0.0  #No dones for intrinsic reward.
            nextvals = self.I.buf_vpreds_int[:, t +
                                             1] if t + 1 < self.nsteps else self.I.buf_vpred_int_last
            nextnotnew = 1 - nextnew
            delta = rews_int[:,
                             t] + self.gamma * nextvals * nextnotnew - self.I.buf_vpreds_int[:,
                                                                                             t]
            self.I.buf_advs_int[:,
                                t] = lastgaelam = delta + self.gamma * self.lam * nextnotnew * lastgaelam
        rets_int = self.I.buf_advs_int + self.I.buf_vpreds_int

        #Calculate extrinsic returns and advantages.
        lastgaelam = 0
        for t in range(self.nsteps - 1, -1, -1):  # nsteps-2 ... 0
            nextnew = self.I.buf_news[:, t +
                                      1] if t + 1 < self.nsteps else self.I.buf_new_last
            #Use dones for extrinsic reward.
            nextvals = self.I.buf_vpreds_ext[:, t +
                                             1] if t + 1 < self.nsteps else self.I.buf_vpred_ext_last
            nextnotnew = 1 - nextnew
            delta = rews_ext[:,
                             t] + self.gamma_ext * nextvals * nextnotnew - self.I.buf_vpreds_ext[:,
                                                                                                 t]
            self.I.buf_advs_ext[:,
                                t] = lastgaelam = delta + self.gamma_ext * self.lam * nextnotnew * lastgaelam
        rets_ext = self.I.buf_advs_ext + self.I.buf_vpreds_ext

        #Combine the extrinsic and intrinsic advantages.
        self.I.buf_advs = self.int_coeff * self.I.buf_advs_int + self.ext_coeff * self.I.buf_advs_ext

        #Collects info for reporting.
        info = dict(
            advmean=self.I.buf_advs.mean(),
            advstd=self.I.buf_advs.std(),
            retintmean=rets_int.mean(),  # previously retmean
            retintstd=rets_int.std(),  # previously retstd
            retextmean=rets_ext.mean(),  # previously not there
            retextstd=rets_ext.std(),  # previously not there
            rewintmean_unnorm=rewmean,  # previously rewmean
            rewintmax_unnorm=rewmax,  # previously not there
            rewintmean_norm=self.mean_int_rew,  # previously rewintmean
            rewintmax_norm=self.max_int_rew,  # previously rewintmax
            rewintstd_unnorm=rewstd,  # previously rewstd
            vpredintmean=self.I.buf_vpreds_int.mean(),  # previously vpredmean
            vpredintstd=self.I.buf_vpreds_int.std(),  # previously vrpedstd
            vpredextmean=self.I.buf_vpreds_ext.mean(),  # previously not there
            vpredextstd=self.I.buf_vpreds_ext.std(),  # previously not there
            ev_int=np.clip(
                explained_variance(self.I.buf_vpreds_int.ravel(),
                                   rets_int.ravel()), -1, None),
            ev_ext=np.clip(
                explained_variance(self.I.buf_vpreds_ext.ravel(),
                                   rets_ext.ravel()), -1, None),
            rooms=SemicolonList(self.rooms),
            n_rooms=len(self.rooms),
            best_ret=self.best_ret,
            reset_counter=self.I.reset_counter)

        info[f'mem_available'] = psutil.virtual_memory().available

        to_record = {
            'acs': self.I.buf_acs,
            'rews_int': self.I.buf_rews_int,
            'rews_int_norm': rews_int,
            'rews_ext': self.I.buf_rews_ext,
            'vpred_int': self.I.buf_vpreds_int,
            'vpred_ext': self.I.buf_vpreds_ext,
            'adv_int': self.I.buf_advs_int,
            'adv_ext': self.I.buf_advs_ext,
            'ent': self.I.buf_ent,
            'ret_int': rets_int,
            'ret_ext': rets_ext,
        }
        if self.I.venvs[0].record_obs:
            to_record['obs'] = self.I.buf_obs[None]

        self.recorder.record(bufs=to_record, infos=self.I.buf_epinfos)

        #Create feeddict for optimization.
        envsperbatch = self.I.nenvs // self.nminibatches
        ph_buf = [
            (self.stochpol.ph_ac, self.I.buf_acs),
            (self.ph_ret_int, rets_int),
            (self.ph_ret_ext, rets_ext),
            (self.ph_oldnlp, self.I.buf_nlps),
            (self.ph_adv, self.I.buf_advs),
        ]
        if self.I.mem_state is not NO_STATES:
            ph_buf.extend([
                (self.stochpol.ph_istate, self.I.seg_init_mem_state),
                (self.stochpol.ph_new, self.I.buf_news),
            ])

        verbose = True
        if verbose and self.is_log_leader:
            samples = np.prod(self.I.buf_advs.shape)
            logger.info(
                "buffer shape %s, samples_per_mpi=%i, mini_per_mpi=%i, samples=%i, mini=%i "
                % (str(self.I.buf_advs.shape), samples, samples //
                   self.nminibatches, samples * self.comm_train_size,
                   samples * self.comm_train_size // self.nminibatches))
            logger.info(" " * 6 + fmt_row(13, self.loss_names))

        epoch = 0
        start = 0
        #Optimizes on current data for several epochs.
        while epoch < self.nepochs:
            end = start + envsperbatch
            mbenvinds = slice(start, end, None)

            fd = {ph: buf[mbenvinds] for (ph, buf) in ph_buf}
            fd.update({self.ph_lr: self.lr, self.ph_cliprange: self.cliprange})
            fd[self.stochpol.ph_ob[None]] = np.concatenate([
                self.I.buf_obs[None][mbenvinds],
                self.I.buf_ob_last[None][mbenvinds, None]
            ], 1)
            assert list(fd[self.stochpol.ph_ob[None]].shape) == [self.I.nenvs//self.nminibatches, self.nsteps + 1] + list(self.ob_space.shape), \
                [fd[self.stochpol.ph_ob[None]].shape, [self.I.nenvs//self.nminibatches, self.nsteps + 1] + list(self.ob_space.shape)]
            fd.update({
                self.stochpol.ph_mean: self.stochpol.ob_rms.mean,
                self.stochpol.ph_std: self.stochpol.ob_rms.var**0.5
            })

            ret = tf.get_default_session().run(self._losses + [self._train],
                                               feed_dict=fd)[:-1]
            if not self.testing:
                lossdict = dict(zip([n for n in self.loss_names], ret), axis=0)
            else:
                lossdict = {}
            #Synchronize the lossdict across mpi processes, otherwise weights may be rolled back on one process but not another.
            _maxkl = lossdict.pop('maxkl')
            lossdict = dict_gather(self.comm_train, lossdict, op='mean')
            maxmaxkl = dict_gather(self.comm_train, {"maxkl": _maxkl},
                                   op='max')
            lossdict["maxkl"] = maxmaxkl["maxkl"]
            if verbose and self.is_log_leader:
                logger.info(
                    "%i:%03i %s" %
                    (epoch, start,
                     fmt_row(13, [lossdict[n] for n in self.loss_names])))
            start += envsperbatch
            if start == self.I.nenvs:
                epoch += 1
                start = 0

        if self.is_train_leader:
            self.I.stats["n_updates"] += 1
            info.update([('opt_' + n, lossdict[n]) for n in self.loss_names])
            tnow = time.time()
            info['tps'] = self.nsteps * self.I.nenvs / (tnow -
                                                        self.I.t_last_update)
            info['time_elapsed'] = time.time() - self.t0
            self.I.t_last_update = tnow
        self.stochpol.update_normalization(  # Necessary for continuous control tasks with odd obs ranges, only implemented in mlp policy,
            ob=self.I.buf_obs  # NOTE: not shared via MPI
        )
        return info

    def env_step(self, l, acs):
        self.I.venvs[l].step_async(acs)
        self.I.env_results[l] = None

    def env_get(self, l):
        """
        Get most recent (obs, rews, dones, infos) from vectorized environment
        Using step_wait if necessary
        """
        if self.I.step_count == 0:  # On the zeroth step with a new venv, we need to call reset on the environment
            ob = self.I.venvs[l].reset()
            out = self.I.env_results[l] = (ob, None,
                                           np.ones(self.I.lump_stride,
                                                   bool), {})
        else:
            if self.I.env_results[l] is None:
                out = self.I.env_results[l] = self.I.venvs[l].step_wait()
            else:
                out = self.I.env_results[l]
        return out

    @logger.profile("step")
    def step(self):
        #Does a rollout.
        t = self.I.step_count % self.nsteps
        epinfos = []
        for l in range(self.I.nlump):
            obs, prevrews, news, infos = self.env_get(l)
            if self.is_log_leader:
                if self.obs_save_flag:
                    while self.I.stats['epcount'] + 1 > len(self.obs_rec):
                        self.obs_rec.append({'obs': [], 'acs': []})
                    self.obs_rec[self.I.stats['epcount']]['obs'].append(obs)
            for env_pos_in_lump, info in enumerate(infos):
                if 'episode' in info:
                    #Information like rooms visited is added to info on end of episode.
                    epinfos.append(info['episode'])
                    info_with_places = info['episode']
                    try:
                        info_with_places['places'] = info['episode'][
                            'visited_rooms']
                    except:
                        import ipdb
                        ipdb.set_trace()
                    self.I.buf_epinfos[
                        env_pos_in_lump +
                        l * self.I.lump_stride][t] = info_with_places

            sli = slice(l * self.I.lump_stride, (l + 1) * self.I.lump_stride)
            memsli = slice(None) if self.I.mem_state is NO_STATES else sli
            dict_obs = self.stochpol.ensure_observation_is_dict(obs)

            with logger.profile_kv("policy_inference"):
                #Calls the policy and value function on current observation.
                acs, vpreds_int, vpreds_ext, nlps, self.I.mem_state[
                    memsli], ent = self.stochpol.call(
                        dict_obs,
                        news,
                        self.I.mem_state[memsli],
                        update_obs_stats=self.update_ob_stats_every_step)
            if self.is_log_leader:

                if self.obs_save_flag:

                    while self.I.stats['epcount'] + 1 > len(self.obs_rec):
                        self.obs_rec.append({'obs': [], 'acs': []})
                    self.obs_rec[self.I.stats['epcount']]['acs'].append(acs)
            self.env_step(l, acs)

            #Update buffer with transition.
            for k in self.stochpol.ph_ob_keys:
                self.I.buf_obs[k][sli, t] = dict_obs[k]
            self.I.buf_news[sli, t] = news
            self.I.buf_vpreds_int[sli, t] = vpreds_int
            self.I.buf_vpreds_ext[sli, t] = vpreds_ext
            self.I.buf_nlps[sli, t] = nlps
            self.I.buf_acs[sli, t] = acs
            self.I.buf_ent[sli, t] = ent

            if t > 0:
                self.I.buf_rews_ext[sli, t - 1] = prevrews

        self.I.step_count += 1
        if t == self.nsteps - 1 and not self.disable_policy_update:
            #We need to take one extra step so every transition has a reward.
            for l in range(self.I.nlump):
                sli = slice(l * self.I.lump_stride,
                            (l + 1) * self.I.lump_stride)
                memsli = slice(None) if self.I.mem_state is NO_STATES else sli
                nextobs, rews, nextnews, _ = self.env_get(l)
                dict_nextobs = self.stochpol.ensure_observation_is_dict(
                    nextobs)
                for k in self.stochpol.ph_ob_keys:
                    self.I.buf_ob_last[k][sli] = dict_nextobs[k]
                self.I.buf_new_last[sli] = nextnews
                with logger.profile_kv("policy_inference"):
                    _, self.I.buf_vpred_int_last[
                        sli], self.I.buf_vpred_ext_last[
                            sli], _, _, _ = self.stochpol.call(
                                dict_nextobs,
                                nextnews,
                                self.I.mem_state[memsli],
                                update_obs_stats=False)
                self.I.buf_rews_ext[sli, t] = rews

            #Calcuate the intrinsic rewards for the rollout.
            fd = {}
            fd[self.stochpol.ph_ob[None]] = np.concatenate(
                [self.I.buf_obs[None], self.I.buf_ob_last[None][:, None]], 1)
            fd.update({
                self.stochpol.ph_mean: self.stochpol.ob_rms.mean,
                self.stochpol.ph_std: self.stochpol.ob_rms.var**0.5
            })
            fd[self.stochpol.ph_ac] = self.I.buf_acs
            self.I.buf_rews_int[:] = tf.get_default_session().run(
                self.stochpol.int_rew, fd)

            if not self.update_ob_stats_every_step:
                #Update observation normalization parameters after the rollout is completed.
                obs_ = self.I.buf_obs[None].astype(np.float32)
                self.stochpol.ob_rms.update(
                    obs_.reshape((-1, *obs_.shape[2:]))[:, :, :, -1:])
            if not self.testing:
                update_info = self.update()
            else:
                update_info = {}
            self.I.seg_init_mem_state = copy(self.I.mem_state)
            global_i_stats = dict_gather(self.comm_log, self.I.stats, op='sum')
            global_deque_mean = dict_gather(
                self.comm_log,
                {n: np.mean(dvs)
                 for n, dvs in self.I.statlists.items()},
                op='mean')
            update_info.update(global_i_stats)
            update_info.update(global_deque_mean)
            self.global_tcount = global_i_stats['tcount']
            for infos_ in self.I.buf_epinfos:
                infos_.clear()
        else:
            update_info = {}

        #Some reporting logic.
        for epinfo in epinfos:
            if self.testing:
                self.I.statlists['eprew_test'].append(epinfo['r'])
                self.I.statlists['eplen_test'].append(epinfo['l'])
            else:
                if "visited_rooms" in epinfo:
                    self.local_rooms += list(epinfo["visited_rooms"])
                    self.local_rooms = sorted(list(set(self.local_rooms)))
                    score_multiple = self.I.venvs[0].score_multiple
                    if score_multiple is None:
                        score_multiple = 1000
                    rounded_score = int(
                        epinfo["r"] / score_multiple) * score_multiple
                    self.scores.append(rounded_score)
                    self.scores = sorted(list(set(self.scores)))
                    self.I.statlists['eprooms'].append(
                        len(epinfo["visited_rooms"]))

                self.I.statlists['eprew'].append(epinfo['r'])
                if self.local_best_ret is None:
                    self.local_best_ret = epinfo["r"]
                elif epinfo["r"] > self.local_best_ret:
                    self.local_best_ret = epinfo["r"]

                self.I.statlists['eplen'].append(epinfo['l'])
                self.I.stats['epcount'] += 1
                self.I.stats['tcount'] += epinfo['l']
                self.I.stats['rewtotal'] += epinfo['r']
                # self.I.stats["best_ext_ret"] = self.best_ret

        return {'update': update_info}
예제 #33
0
    def __init__(self):

        logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
                            level=logging.DEBUG)
        logging.info('Start Robotic Arm Control')

        self.robotic_arm = RoboticArm()
        self.robotic_arm.recorder = Recorder(usb_arm=self.robotic_arm.usb_arm)
        self.file_path = None

        Gtk.Window.__init__(self, title='Robotic Arm Control')
        self.connect('delete_event',  self.delete_event)
        self.connect('destroy',  self.destroy)
        self.set_border_width(10)

        self.base_label = Gtk.Label('Base: ')
        self.base_label.set_alignment(0, 0)
        self.base_label.show()

        self.base_clockwise = Gtk.Button('Clockwise')
        self.base_clockwise.connect('clicked',
                                    self._action_base_clockwise, None)
        self.base_clockwise.show()

        self.base_anticlockwise = Gtk.Button('AntiClockwise')
        self.base_anticlockwise.connect('clicked',
                                        self._action_base_anticlockwise, None)
        self.base_anticlockwise.show()

        self.base_stop = Gtk.Button('Stop')
        self.base_stop.connect('clicked', self._action_base_stop, None)
        self.base_stop.show()

        self.arm_label = Gtk.Label('Shoulder: ')
        self.arm_label.set_alignment(0, 0)
        self.arm_label.show()

        self.arm_up = Gtk.Button('Up')
        self.arm_up.connect('clicked', self._action_arm_up, None)
        self.arm_up.show()

        self.arm_down = Gtk.Button('Down')
        self.arm_down.connect('clicked', self._action_arm_down, None)
        self.arm_down.show()

        self.arm_stop = Gtk.Button('Stop')
        self.arm_stop.connect('clicked', self._action_arm_stop, None)
        self.arm_stop.show()

        self.elbow_label = Gtk.Label('Elbow: ')
        self.elbow_label.set_alignment(0, 0)
        self.elbow_label.show()

        self.elbow_up = Gtk.Button('Up')
        self.elbow_up.connect('clicked', self._action_elbow_up, None)
        self.elbow_up.show()

        self.elbow_down = Gtk.Button('Down')
        self.elbow_down.connect('clicked', self._action_elbow_down, None)
        self.elbow_down.show()

        self.elbow_stop = Gtk.Button('Stop')
        self.elbow_stop.connect('clicked', self._action_elbow_stop, None)
        self.elbow_stop.show()

        self.wrist_label = Gtk.Label('Wrist: ')
        self.wrist_label.set_alignment(0, 0)
        self.wrist_label.show()

        self.wrist_up = Gtk.Button('Up')
        self.wrist_up.connect('clicked', self._action_wrist_up, None)
        self.wrist_up.show()

        self.wrist_down = Gtk.Button('Down')
        self.wrist_down.connect('clicked', self._action_wrist_down, None)
        self.wrist_down.show()

        self.wrist_stop = Gtk.Button('Stop')
        self.wrist_stop.connect('clicked', self._action_wrist_stop, None)
        self.wrist_stop.show()

        self.gripper_label = Gtk.Label('Gripper: ')
        self.gripper_label.set_alignment(0, 0)
        self.gripper_label.show()

        self.gripper_open = Gtk.Button('Open')
        self.gripper_open.connect('clicked', self._action_gripper_open, None)
        self.gripper_open.show()

        self.gripper_close = Gtk.Button('Close')
        self.gripper_close.connect('clicked', self._action_gripper_close, None)
        self.gripper_close.show()

        self.gripper_stop = Gtk.Button('Stop')
        self.gripper_stop.connect('clicked', self._action_gripper_stop, None)
        self.gripper_stop.show()

        self.led_label = Gtk.Label('Light: ')
        self.led_label.set_alignment(0, 0)
        self.led_label.show()

        self.led = Gtk.Button('Off')
        self.led.connect('clicked', self._action_led, None)
        self.led.show()

        self.stop_all = Gtk.Button('Stop All')
        self.stop_all.connect('clicked', self._action_stop_all, None)
        self.stop_all.show()

        self.image = Gtk.Image()
        self.image.set_from_file('arm.jpg')
        self.image.show()

        self.space_label = Gtk.Label(' ')
        self.space_label.set_alignment(0, 0)
        self.space_label.show()

        self.rec_label = Gtk.Label('Recorded Program:')
        self.rec_label.set_alignment(0, 0)
        self.rec_label.show()

        self.record_switch = Gtk.Button('Record')
        self.record_switch.connect('clicked', self._action_record_switch, None)
        self.record_switch.show()

        self.record_play = Gtk.Button('Play')
        self.record_play.connect('clicked', self._action_record_play, None)
        self.record_play.show()

        self.record_play_reverse = Gtk.Button('Play Reversed')
        self.record_play_reverse.connect('clicked',
                                         self._action_record_play_reverse,
                                         None)
        self.record_play_reverse.show()

        self.record_clear = Gtk.Button('Clear Program')
        self.record_clear.connect('clicked', self._action_record_clear, None)
        self.record_clear.show()

        self.file_label = Gtk.Label(' - unsaved - ')
        self.file_label.set_alignment(0, 0)
        self.file_label.show()

        self.nsteps_label = Gtk.Label('# Steps: ')
        self.nsteps_label.set_alignment(0, 0)
        self.nsteps_label.show()

        self.runtime_label = Gtk.Label('Runtime: ')
        self.runtime_label.set_alignment(0, 0)
        self.runtime_label.show()

        self.nsteps_value_label = Gtk.Label('0')
        self.nsteps_value_label.set_alignment(0, 0)
        self.nsteps_value_label.show()

        self.runtime_value_label = Gtk.Label('00:00')
        self.runtime_value_label.set_alignment(0, 0)
        self.runtime_value_label.show()

        self.save = Gtk.Button('Save')
        self.save.connect('clicked', self._action_save, None)
        self.save.show()

        self.open = Gtk.Button('Open')
        self.open.connect('clicked', self._action_open, None)
        self.open.show()

        adj = Gtk.Adjustment(2.0, 0.0, 100.0, 0.1)
        self.run_4_entry = Gtk.SpinButton()
        self.run_4_entry.configure(adj, 0.1, 2)
        self.run_4_entry.show()

        self.run_4_toggle = Gtk.CheckButton('Run for sec:')
        self.run_4_toggle.set_active(False)
        self.run_4_toggle.connect('toggled', self._action_run_4_toggle, None)
        self.run_4_toggle.show()

        self.controls = Gtk.Grid()
        self.controls.attach(self.base_label, 0, 0, 1, 1)
        self.controls.attach(self.base_clockwise, 1, 0, 1, 1)
        self.controls.attach(self.base_stop, 2, 0, 1, 1)
        self.controls.attach(self.base_anticlockwise, 3, 0, 1, 1)
        self.controls.attach(self.arm_label, 0, 1, 1, 1)
        self.controls.attach(self.arm_up, 1, 1, 1, 1)
        self.controls.attach(self.arm_stop, 2, 1, 1, 1)
        self.controls.attach(self.arm_down, 3, 1, 1, 1)
        self.controls.attach(self.elbow_label, 0, 2, 1, 1)
        self.controls.attach(self.elbow_up, 1, 2, 1, 1)
        self.controls.attach(self.elbow_stop, 2, 2, 1, 1)
        self.controls.attach(self.elbow_down, 3, 2, 1, 1)
        self.controls.attach(self.wrist_label, 0, 3, 1, 1)
        self.controls.attach(self.wrist_up, 1, 3, 1, 1)
        self.controls.attach(self.wrist_stop, 2, 3, 1, 1)
        self.controls.attach(self.wrist_down, 3, 3, 1, 1)
        self.controls.attach(self.gripper_label, 0, 4, 1, 1)
        self.controls.attach(self.gripper_open, 1, 4, 1, 1)
        self.controls.attach(self.gripper_stop, 2, 4, 1, 1)
        self.controls.attach(self.gripper_close, 3, 4, 1, 1)
        self.controls.attach(self.led_label, 0, 5, 1, 1)
        self.controls.attach(self.led, 1, 5, 3, 1)
        self.controls.attach(self.stop_all, 1, 6, 3, 1)
        self.controls.attach(self.image, 4, 0, 1, 7)
        self.controls.attach(self.run_4_toggle, 0, 7, 2, 1)
        self.controls.attach(self.run_4_entry, 2, 7, 2, 1)
        self.controls.attach(self.space_label, 0, 8, 5, 1)
        self.controls.attach(self.rec_label, 0, 9, 5, 1)
        self.controls.attach(self.file_label, 0, 10, 5, 1)
        self.controls.attach(self.record_switch, 4, 10, 1, 1)
        self.controls.attach(self.record_play, 4, 11, 1, 1)
        self.controls.attach(self.record_play_reverse, 4, 12, 1, 1)
        self.controls.attach(self.record_clear, 4, 13, 1, 1)
        self.controls.attach(self.nsteps_label, 0, 12, 2, 1)
        self.controls.attach(self.runtime_label, 0, 13, 2, 1)
        self.controls.attach(self.nsteps_value_label, 2, 12, 2, 1)
        self.controls.attach(self.runtime_value_label, 2, 13, 2, 1)
        self.controls.attach(self.save, 0, 14, 4, 1)
        self.controls.attach(self.open, 4, 14, 1, 1)
        self.controls.show()

        self.add(self.controls)
        self.show()
        logging.info('Started Robotic Arm Control')
예제 #34
0
파일: pitcher.py 프로젝트: 4g/pitcher
def pitch2(bank, dependency):
    odir = setup_dir(dependency)
    section_title = "--------{title}---------"
    question = "Q.{n}: {q}"

    total_time = 0
    pitch_start = time()
    q_times = {}
    next = 1
    q_done = set()

    while next != 0:
        q_index = next
        q_done.add(q_index)
        q = bank[q_index]
        print(question.format(n=q_index, q=q))
        q_start = time()

        q_fname = string_to_filename(q)
        rec_path = f"{odir}/{q_fname}.wav"
        rec = Recorder(rec_path)

        rec.start()

        while next in q_done:
            print(set(bank) - set(q_done))
            inp = input("Next question:")
            try:
                inp = int(inp)
            except:
                pass
            if inp in q_done:
                print(f"repeated q {q_done}")
            elif inp not in bank:
                print("invalid q no.")
            else:
                next = inp

        rec.stop()
        rec.join()

        q_end = time()
        q_time = int(q_end - q_start)
        # print (f"Duration:{q_time}s TotalTime:{total_time}s")
        total_time += q_time
        q_times[q_index] = q_time

    wall_time = int(time() - pitch_start)

    print("++++++++++++PITCH Finished++++++{wall_time} secs+++++++")

    for section in dependency:
        total_time = sum(q_times[q] for q in dependency[section])
        num_questions = len(dependency[section])
        print(
            f"---------{section}---duration {total_time} seconds --- #{num_questions} questions ----"
        )

    for q_index in sorted(q_times, reverse=True):
        print(f"{q_times[q_index]} secs: {bank[q_index]}")

    shutil.copytree(odir, "latest")
예제 #35
0
파일: scene.py 프로젝트: tstg/mtslidar2
	def __init__(self):
		self.sampler = GridSampler()
		self.recorder = Recorder()
		self.lidar = Lidar()
		self.tracer = Tracer(self.lidar, self.recorder)
예제 #36
0
def getFlightPrice(driver, url, id, worker_num):
    """
    This function send url to remote server and get the result.
    Save the result into file.
    Input parameter: 
        dirver: The web driver.
        url: type[string] . The url address.
        id: type[int] The flight id.
        worker_num: type[int] the worker number.
    """
    #     flight_module_class_name='flight-module.segment.offer-listing'

    flight_id = str(id)

    re = Recorder(flight_id, recorder.RecorderMode.binary)

    if runDriver(driver, url, id) == True:
        #Write the fight id.
        flight_id = "<flight_id>" + flight_id
        re.writeN(flight_id)

        #Write the url
        re.write("<url>")
        re.writeN(url)

        #Write the search date
        t = datetime.datetime.now().strftime("%Y-%m-%d %H %M %S")
        search_date = "<search_date>" + t
        re.writeN(search_date)

        #Write the worker number
        re.write("<worker_num>")
        re.writeN(str(worker_num))

        time.sleep(1)
        body_element = driver.find_element_by_tag_name('body')
        re.writeN(body_element.text)

        time.sleep(1)
        re.finish()
    else:
        print("worker[%d] failed to handle flight_id[%d]" % (worker_num, id))
예제 #37
0
    def __init__(self, config, pnhandler, inlet_amp, stimulator = None):
        self.config = config
        self.kalman_time = 0
        
        # init pnhandler, inlet_amp, stimulator and decoder/filter
        self.pnhandler = pnhandler
        self.inlet_amp = inlet_amp
        self.stimulator = stimulator
        self.recorder = Recorder(self.config, prediction = True)
        self.decoder = None
        self.filter = None
    
        
        # params of amp data stream
        self.numCh = self.config['amp_config'].getint('n_channels_amp')
        # if feedback, last channel is stimulation data
        if self.config['amp_config'].getboolean('feedback'):
            self.numCh -= 1
        self.offCh = self.numCh
        self.srate = self.config['amp_config'].getint('fs_amp')

        # time of realtime experiment (inf if it set to -1)
        experiment_time = self.config['realtime'].getint('experiment_time_realtime')
        self.experiment_time = float('inf') if experiment_time < 0 else experiment_time * self.srate



        avatar_freq = self.config['avatar'].getint('avatar_freq')
        self.avatar_period = 1 / avatar_freq
        self.avatar_buffer_size = self.config['avatar'].getint('avatar_buffer_size')
        
        # buffer for plotting
        self.coordbuff = []
        
        # PN channels for finger joints
        self.pn_fingers_range = np.asarray(list(map(int, self.config['avatar']['pn_fingers_coords'].split())))
        
        # avatar channels for finger joints
        self.avatar_fingers_range = np.asarray(list(map(int, self.config['avatar']['avatar_fingers_coords'].split())))
        
        # buffer
        self.avatar_buffer = np.zeros(self.avatar_buffer_size)
        
        # initializing Avatar connection
        self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        self.server_address = ('127.0.0.1', 12001)
        self.client_address = ('127.0.0.1', 9001)
        self.sock.bind(self.server_address)
        self._printm('UDP from {} {} established'.format(*self.server_address))
        
        # get parameters of stimulation and setup redractory period
        self.stimulator.configurate(self.config['stimulation'].getint('electrode_start'),
                                    self.config['stimulation'].getint('electrode_stop'))
        self.stimulation_time = self.config['stimulation'].getint('stimulation_time')
        self.refractory_time = self.config['stimulation'].getint('refractory_time')
        self.refractory_start = 0
        self.avatar_bias_thumb = self.config['avatar'].getint('avatar_bias_thumb')
        self.avatar_scalar_thumb = self.config['avatar'].getfloat('avatar_scalar_thumb')
        self.avatar_bias_index = self.config['avatar'].getint('avatar_bias_index')
        self.avatar_scalar_index = self.config['avatar'].getfloat('avatar_scalar_index')

        self.avatar_parameters = refresh_avatar_parameters()
        
        # control from keyboard
        self.queue_keyboard = Queue()
        self.avatar_data_type = False
        self.realtime_close = False
        on_press = lambda key: keyboard_on_press(self.queue_keyboard, key)
        self.listener = Listener(on_press=on_press, on_release=keyboard_on_release)
        self.listener.start()
예제 #38
0
class Control(control.Control):
    """
    A controller that implements operational space control.
    Controls the (x,y) position of a robotic arm end-effector.
    """
    def __init__(self, solve_continuous=False, **kwargs): 

        super(Control, self).__init__(**kwargs)

        self.DOF = 2 # task space dimensionality 
        self.u = None
        self.solve_continuous = solve_continuous

        if self.write_to_file is True:
            from recorder import Recorder
            # set up recorders
            self.u_recorder = Recorder('control signal', self.task, 'lqr')
            self.xy_recorder = Recorder('end-effector position', self.task, 'lqr')
            self.dist_recorder = Recorder('distance from target', self.task, 'lqr')
            self.recorders = [self.u_recorder, 
                            self.xy_recorder, 
                            self.dist_recorder]

    def calc_derivs(self, x, u):
        eps = 0.00001  # finite difference epsilon
        #----------- compute xdot_x and xdot_u using finite differences --------
        # NOTE: here each different run is in its own column
        x1 = np.tile(x, (self.arm.DOF*2,1)).T + np.eye(self.arm.DOF*2) * eps
        x2 = np.tile(x, (self.arm.DOF*2,1)).T - np.eye(self.arm.DOF*2) * eps
        uu = np.tile(u, (self.arm.DOF*2,1))
        f1 = self.plant_dynamics(x1, uu)
        f2 = self.plant_dynamics(x2, uu)
        xdot_x = (f1 - f2) / 2 / eps
   
        xx = np.tile(x, (self.arm.DOF,1)).T 
        u1 = np.tile(u, (self.arm.DOF,1)) + np.eye(self.arm.DOF) * eps
        u2 = np.tile(u, (self.arm.DOF,1)) - np.eye(self.arm.DOF) * eps
        f1 = self.plant_dynamics(xx, u1)
        f2 = self.plant_dynamics(xx, u2)
        xdot_u = (f1 - f2) / 2 / eps

        return xdot_x, xdot_u

    def control(self, arm, x_des=None):
        """Generates a control signal to move the 
        arm to the specified target.
            
        arm Arm: the arm model being controlled
        des list: the desired system position
        x_des np.array: desired task-space force, 
                        system goes to self.target if None
        """
        if self.u is None:
            self.u = np.zeros(arm.DOF)

        self.Q = np.zeros((arm.DOF*2, arm.DOF*2))
        self.Q[:arm.DOF, :arm.DOF] = np.eye(arm.DOF) * 1000.0 
        self.R = np.eye(arm.DOF) * 0.001 

        # calculate desired end-effector acceleration
        if x_des is None:
            self.x = arm.x 
            x_des = self.x - self.target 

        self.arm, state = self.copy_arm(arm)
        A, B = self.calc_derivs(state, self.u)

        if self.solve_continuous is True:
            X = sp_linalg.solve_continuous_are(A, B, self.Q, self.R)
            K = np.dot(np.linalg.pinv(self.R), np.dot(B.T, X))
        else: 
            X = sp_linalg.solve_discrete_are(A, B, self.Q, self.R)
            K = np.dot(np.linalg.pinv(self.R + np.dot(B.T, np.dot(X, B))), np.dot(B.T, np.dot(X, A)))

        # transform the command from end-effector space to joint space
        J = self.arm.gen_jacEE()

        u = np.hstack([np.dot(J.T, x_des), arm.dq])

        self.u = -np.dot(K, u)

        if self.write_to_file is True:
            # feed recorders their signals
            self.u_recorder.record(0.0, self.u)
            self.xy_recorder.record(0.0, self.x)
            self.dist_recorder.record(0.0, self.target - self.x)

        # add in any additional signals 
        for addition in self.additions:
            self.u += addition.generate(self.u, arm)

        return self.u
 
    def copy_arm(self, real_arm):
        """ Make a copy of the arm for local simulation. """
        arm = real_arm.__class__()
        arm.dt = real_arm.dt

        # reset arm position to x_0
        arm.reset(q = real_arm.q, dq = real_arm.dq)

        return arm, np.hstack([real_arm.q, real_arm.dq])

    def plant_dynamics(self, x, u):
        """ Simulate the arm dynamics locally. """
        if x.ndim == 1:
            x = x[:,None]
            u = u[None,:]

        xnext = np.zeros((x.shape))
        for ii in range(x.shape[1]):
            # set the arm position to x
            self.arm.reset(q=x[:self.arm.DOF, ii], 
                          dq=x[self.arm.DOF:self.arm.DOF*2, ii])

            # apply the control signal
            # TODO: should we be using a constant timestep here instead of arm.dt?
            # to even things out when running at different dts? 
            self.arm.apply_torque(u[ii], self.arm.dt)
            # get the system state from the arm
            xnext[:,ii] = np.hstack([np.copy(self.arm.q), 
                                   np.copy(self.arm.dq)])

        if self.solve_continuous is True:
            xdot = ((np.asarray(xnext) - np.asarray(x)) / self.arm.dt).squeeze()
            return xdot
        return xnext

    def gen_target(self, arm):
        gain = np.sum(arm.L) * .75
        bias = -np.sum(arm.L) * 0
        
        self.target = np.random.random(size=(2,)) * gain + bias

        return self.target.tolist()
예제 #39
0
class Control(control.Control):
    """
    A controller that implements operational space control.
    Controls the (x,y) position of a robotic arm end-effector.
    """
    def __init__(self, null_control=True, **kwargs):
        """
        null_control boolean: apply second controller in null space or not
        """

        super(Control, self).__init__(**kwargs)

        self.DOF = 2  # task space dimensionality
        self.null_control = null_control

        if self.write_to_file is True:
            from recorder import Recorder
            # set up recorders
            self.u_recorder = Recorder('control signal', self.task, 'osc')
            self.xy_recorder = Recorder('end-effector position', self.task,
                                        'osc')
            self.dist_recorder = Recorder('distance from target', self.task,
                                          'osc')
            self.recorders = [
                self.u_recorder, self.xy_recorder, self.dist_recorder
            ]

    def control(self, arm, x_des=None):
        """Generates a control signal to move the 
        arm to the specified target.
            
        arm Arm: the arm model being controlled
        des list: the desired system position
        x_des np.array: desired task-space force, 
                        system goes to self.target if None
        """

        # calculate desired end-effector acceleration
        if x_des is None:
            self.x = arm.x
            x_des = self.kp * (self.target - self.x)

        # generate the mass matrix in end-effector space
        Mq = arm.gen_Mq()
        Mx = arm.gen_Mx()

        # calculate force
        Fx = np.dot(Mx, x_des)

        # calculate the Jacobian
        JEE = arm.gen_jacEE()
        # tau = J^T * Fx + tau_grav, but gravity = 0
        # add in velocity compensation in GC space for stability
        self.u = np.dot(JEE.T, Fx).reshape(-1, ) - np.dot(Mq, self.kv * arm.dq)

        # if null_control is selected and the task space has
        # fewer DOFs than the arm, add a control signal in the
        # null space to try to move the arm to its resting state
        if self.null_control and self.DOF < len(arm.L):

            # calculate our secondary control signal
            # calculated desired joint angle acceleration
            prop_val = (
                (arm.rest_angles - arm.q) + np.pi) % (np.pi * 2) - np.pi
            q_des = (self.kp * prop_val + \
                     self.kv * -arm.dq).reshape(-1,)

            Mq = arm.gen_Mq()
            u_null = np.dot(Mq, q_des)

            # calculate the null space filter
            Jdyn_inv = np.dot(Mx, np.dot(JEE, np.linalg.inv(Mq)))
            null_filter = np.eye(len(arm.L)) - np.dot(JEE.T, Jdyn_inv)

            null_signal = np.dot(null_filter, u_null).reshape(-1, )

            self.u += null_signal

        if self.write_to_file is True:
            # feed recorders their signals
            self.u_recorder.record(0.0, self.u)
            self.xy_recorder.record(0.0, self.x)
            self.dist_recorder.record(0.0, self.target - self.x)

        # add in any additional signals
        for addition in self.additions:
            self.u += addition.generate(self.u, arm)

        return self.u

    def gen_target(self, arm):
        """Generate a random target"""
        gain = np.sum(arm.L) * .75
        bias = -np.sum(arm.L) * 0

        self.target = np.random.random(size=(2, )) * gain + bias

        return self.target.tolist()
예제 #40
0
from player import Player
from sys import argv
import mido

### Call python test_recorder.py
##
## Options:
##      -i      Select input device
##      -o      Select output device

### Options
verbose = "-v" in argv

### Create instance of Piano, Recorder and Player
p = Piano()
rec = Recorder()
player = Player()
player.start()  # Start thread

### Connecting player with piano
player.setPiano(p)

### Select input and output devices
[ins, outs] = p.listDevices()
if "-i" in argv:
    print "[+] List of input devices"
    for i in range(len(ins)):
        print "%d: %s" % (i + 1, ins[i])
    inDev = ins[int(raw_input("Select input device: ")) - 1]
else:
    inDev = mido.get_input_names()[0]
예제 #41
0
    print("Usage:")
    print("python test_track.py -f filename")
    exit()
except IndexError:
    print("Missing arguments")
    exit()

### Quantization
if "-q" in argv:
    quant = int(argv[argv.index("-q") + 1])
else:
    quant = 16

### Create instance of Piano, Recorder, Player and Track
p = Piano()
rec = Recorder()
track = Track()
player = Player()
player.start()  # Start thread

### Connecting player with piano
player.setPiano(p)

### Select input and output devices
[ins, outs] = p.listDevices()
if "-i" in argv:
    print("[+] List of input devices")
    for i in range(len(ins)):
        print("%d: %s" % (i + 1, ins[i]))
    inDev = ins[int(input("Select input device: ")) - 1]
else:
예제 #42
0
import pprint

envs = ENVIORNMENTS()

parents = POPULATION(c.popSize)
parents.Initialize()
parents.Evaluate(envs, pb=True, pp=False)
parents.Print()
startTimes = []
copyTimes = []
evalTimes = []
generations = []
fillTimes = []
entire_fill = []

recorder = Recorder()

children = None
for g in range(1, c.numGens):
    start = time.time()
    # generations.append(g)
    starttime = time.time() - start
    if g > 0:
        del children
    children = POPULATION(c.popSize)
    entire_fill_temp = children.Fill_From(parents)
    fillTime = time.time() - starttime
    # pp.pformat(children.pop[0])
    children.Evaluate(envs, pb=True, pp=False)
    evaluateTime = time.time() - fillTime
    parents = copy.deepcopy(children)
예제 #43
0
from server import Api
from recorder import Recorder
import settings

if __name__ == '__main__':
    recorder = Recorder()
    api = Api(recorder=recorder)
    api.demonize(port=settings.PORT)

    while True:
        recorder.update_frame_rate()
        recorder.handle_frame()
예제 #44
0
class ExperimentRealtime():
    def __init__(self, config, pnhandler, inlet_amp, stimulator = None):
        self.config = config
        self.kalman_time = 0
        
        # init pnhandler, inlet_amp, stimulator and decoder/filter
        self.pnhandler = pnhandler
        self.inlet_amp = inlet_amp
        self.stimulator = stimulator
        self.recorder = Recorder(self.config, prediction = True)
        self.decoder = None
        self.filter = None
    
        
        # params of amp data stream
        self.numCh = self.config['amp_config'].getint('n_channels_amp')
        # if feedback, last channel is stimulation data
        if self.config['amp_config'].getboolean('feedback'):
            self.numCh -= 1
        self.offCh = self.numCh
        self.srate = self.config['amp_config'].getint('fs_amp')

        # time of realtime experiment (inf if it set to -1)
        experiment_time = self.config['realtime'].getint('experiment_time_realtime')
        self.experiment_time = float('inf') if experiment_time < 0 else experiment_time * self.srate



        avatar_freq = self.config['avatar'].getint('avatar_freq')
        self.avatar_period = 1 / avatar_freq
        self.avatar_buffer_size = self.config['avatar'].getint('avatar_buffer_size')
        
        # buffer for plotting
        self.coordbuff = []
        
        # PN channels for finger joints
        self.pn_fingers_range = np.asarray(list(map(int, self.config['avatar']['pn_fingers_coords'].split())))
        
        # avatar channels for finger joints
        self.avatar_fingers_range = np.asarray(list(map(int, self.config['avatar']['avatar_fingers_coords'].split())))
        
        # buffer
        self.avatar_buffer = np.zeros(self.avatar_buffer_size)
        
        # initializing Avatar connection
        self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        self.server_address = ('127.0.0.1', 12001)
        self.client_address = ('127.0.0.1', 9001)
        self.sock.bind(self.server_address)
        self._printm('UDP from {} {} established'.format(*self.server_address))
        
        # get parameters of stimulation and setup redractory period
        self.stimulator.configurate(self.config['stimulation'].getint('electrode_start'),
                                    self.config['stimulation'].getint('electrode_stop'))
        self.stimulation_time = self.config['stimulation'].getint('stimulation_time')
        self.refractory_time = self.config['stimulation'].getint('refractory_time')
        self.refractory_start = 0
        self.avatar_bias_thumb = self.config['avatar'].getint('avatar_bias_thumb')
        self.avatar_scalar_thumb = self.config['avatar'].getfloat('avatar_scalar_thumb')
        self.avatar_bias_index = self.config['avatar'].getint('avatar_bias_index')
        self.avatar_scalar_index = self.config['avatar'].getfloat('avatar_scalar_index')

        self.avatar_parameters = refresh_avatar_parameters()
        
        # control from keyboard
        self.queue_keyboard = Queue()
        self.avatar_data_type = False
        self.realtime_close = False
        on_press = lambda key: keyboard_on_press(self.queue_keyboard, key)
        self.listener = Listener(on_press=on_press, on_release=keyboard_on_release)
        self.listener.start()

        
        
    # fit data from file
    def fit(self):
        self._printm('fitting data from:\n{}'.format(self.config['paths']['train_data_to_fit_path']))
        # fit the model from file and initialize filter
        self.decoder = EMGDecoder()
        self.decoder.fit(X = None, Y = None,
                       file=self.config['paths']['train_data_to_fit_path'], 
                       numCh = self.numCh, offCh = self.offCh, 
                       pn_channels = self.pn_fingers_range, 
                       lag = 2, forward = 0, downsample = 0)
        self.filter = envelopeFilter()
        
    
    # decode realtime data
    def decode(self):
        self._printm('decoding...')
        
        # initialize pn_buffer and KalmanCoords
        pn_buffer = np.zeros(len(self.pn_fingers_range))
        KalmanCoords = np.zeros((1, len(self.pn_fingers_range)))
        
        # get chunks, decode and sent to avatar
        start_time = time.time()
        counter = 0
        counter_messages = 0
        while (time.time() - start_time < self.experiment_time):
            # handle keyboard events
            self._handle_keyboard()
            # if you press esc - save data and close
            if self.realtime_close:
                self.recorder.save_data()
                return
            
            cycle_start_time = time.time()
        
            # get chunks of data from inlets
            chunk_pn, timestamp_pn = self.pnhandler.get_next_chunk_pn()
            ampchunk, amptimestemp = self.inlet_amp.pull_chunk(max_samples=20)
            chunk_amp, timestamp_amp = np.asarray(ampchunk), np.asarray(amptimestemp)
        
            # process chunks, if no chunks - previous data will be used
            if chunk_pn is not None:
                pn_buffer = self._process_chunk_pn(chunk_pn, pn_buffer)
            else:
                self._printm('empty pn chunk encountered')
                
            if chunk_amp.shape[0] > 3:
                counter += chunk_amp.shape[0]
                WienerCoords, KalmanCoords = self._process_chunk_amp(chunk_amp)
            else:
                self._printm('empty amp chunk encountered')
            
            # get predictions and factual result and send them to avatar
            prediction = KalmanCoords[-1,:len(self.pn_fingers_range)]
            fact = np.copy(pn_buffer)
            self.coordbuff.append((prediction, fact))
            
            if self.avatar_data_type :
                thumb_coord, index_coord = prediction[0], prediction[1]
            else:
                thumb_coord, index_coord = fact[0], fact[1]
            
            thumb_angle = - self.avatar_scalar_thumb*thumb_coord - self.avatar_bias_thumb - 55
            index_angle = - self.avatar_scalar_index*index_coord + self.avatar_bias_index
            #print(thumb_angle, index_angle)
            if self.stimulator is not None and (thumb_angle < 0) and (index_angle > self.avatar_parameters['MaxIndexAngleMale']):
                self._stimulate()
                
            
            self._send_data_to_avatar(prediction, fact)
            self.recorder.record_data(chunk_amp, timestamp_amp, chunk_pn, timestamp_pn, prediction)
            
            # massege to see progress
            if counter // 10000 > counter_messages:
                counter_messages += 1
                self._printm('sent {} samples to avatar'.format(counter))
        
            # control time of each cycle of the loop 
            difference_time = self.avatar_period - (time.time() - cycle_start_time)
            if difference_time > 0:
                time.sleep(difference_time)
            else:
                self._printm('not enough time for chunks {} and {} processing, latency is {} s'.format(chunk_pn.shape, 
                                                                                                       chunk_amp.shape, 
                                                                                                       difference_time))
                self._printm('Kalman decoding time: {}'.format(self.kalman_time))
        
        # save realtime data
        self.recorder.save_data()
        
    
    def get_coordbuff(self):
        return self.coordbuff
    
    
    
    def close(self):
        self.listener.stop()
    
    
    def _process_chunk_pn(self, chunk_pn, pn_buffer):
        chunk = chunk_pn[:, self.pn_fingers_range]
        medians = np.nanmedian(chunk, axis=0)
        pn_buffer[~np.isnan(medians)] = medians[~np.isnan(medians)]
        return pn_buffer
    
    def _process_chunk_amp(self, chunk_amp):
        chunk = chunk_amp[:, :self.numCh]
        chunk = self.filter.filterEMG(chunk)
        t4 = time.time()
        WienerCoords, KalmanCoords = self.decoder.transform(chunk)
        self.kalman_time = time.time() - t4
        return WienerCoords, KalmanCoords
        
    def _send_data_to_avatar(self, prediction, fact):
        self.avatar_parameters = refresh_avatar_parameters()
        fact[0] = fact[0] *self.avatar_scalar_thumb + self.avatar_bias_thumb
        fact[1] = fact[1] *self.avatar_scalar_index - self.avatar_bias_index
        
        prediction[0] = prediction[0] *self.avatar_scalar_thumb + self.avatar_bias_thumb
        prediction[1] = prediction[1] *self.avatar_scalar_index - self.avatar_bias_index
    
    
        if self.avatar_data_type:
            self.avatar_buffer[self.avatar_fingers_range] = prediction
        else:
            self.avatar_buffer[self.avatar_fingers_range] = fact
        
        self.avatar_buffer[self.avatar_fingers_range + self.avatar_buffer_size//2] = prediction
        #print(self.avatar_buffer)
        data = struct.pack('%df' % len(self.avatar_buffer), *list(map(float, self.avatar_buffer)))
        self.sock.sendto(data, self.client_address)
        
    def _stimulate(self):
        if (time.time() - self.refractory_start)*1000 >= self.refractory_time:
            self.stimulator.stimulate(self.stimulation_time, 1)
            self.refractory_start = time.time()
    
    # helper to parse all commands from keyboard
    def _handle_keyboard(self):
        while not self.queue_keyboard.empty():
            k, _ = self.queue_keyboard.get()
            if k == 'realtime_close':
                self.realtime_close = not self.realtime_close
            elif k == 'avatar_data_type':
                self.avatar_data_type = not self.avatar_data_type
        
        
    def _printm(self, message):
        print('{} {}: '.format(time.strftime('%H:%M:%S'), type(self).__name__) + message)
예제 #45
0
파일: scene.py 프로젝트: tstg/mtslidar2
class Scene:

	def __init__(self):
		self.sampler = GridSampler()
		self.recorder = Recorder()
		self.lidar = Lidar()
		self.tracer = Tracer(self.lidar, self.recorder)

	def render(self):

		samples = [sample for sample in self.sampler.getSample()]
		# self.saveSample(samples)
		weights = self.getWeights(samples)
		num = len(samples)
		k = 0
		for sample, weight in zip(samples, weights):

			if k % 10000 == 0:
				print('%d / %d' % (k, num))
			k += 1

			ray = self.lidar.generateRay(sample)
			# initialize the energy of the ray
			ray.energy = self.lidar.pulseEnergy * weight
			self.tracer.trace(ray, 0, 0)

		# records = np.array(self.tracer.records)
		# np.savetxt('records.txt', records)
		self.recorder.save()

	def getWeights(self, samples):
		sampleRadii = np.array([np.linalg.norm(sample) for sample in samples])
		weights = np.exp(-sampleRadii * sampleRadii * 0.5 / self.lidar.sigmaBeam2)
		weights = weights / np.sum(weights)
		return weights

	def saveSample(self, sample):
		npsample = np.array(sample)
		np.savetxt('imPulseFile.txt', npsample)

	def renderUsingSimulatedPhoton(self):
		""" According to DART manual.

		:return:
		"""
		minPerSubcenter = 5

		samples = self.sampler.getSample()
		weights = self.getWeights(samples)
		minWeight = min(weights)

		# get photon number per subcenter
		subcenterPhotonNumbers = []
		for weight in weights:
			subcenterPhotonNumber = int(weight / minWeight * minPerSubcenter)  # TODO little number may not be divisor

			subcenterPhotonNumbers.append(subcenterPhotonNumber)

		# be sure that subcenter with smallest weight has minimun photon
		# and keep gaussian shape pulse

		actualPhotonNumber = sum(subcenterPhotonNumbers)
		#
		energy = self.lidar.pulseEnergy / actualPhotonNumber

		for sample, subcenterPhotonNumber in zip(samples, subcenterPhotonNumbers):
			for i in range(subcenterPhotonNumber):
				ray = self.lidar.generateRay(sample)
				ray.energy = energy
				self.tracer.trace(ray, 0, 0)

	def renderUsingMultiLaunch(self):
		n = 5

		samples = [sample for sample in self.sampler.getSample()]
		# self.saveSample(samples)
		weights = self.getWeights(samples)

		num = len(samples)

		k = 0
		for sample, weight in zip(samples, weights):

			if k % 10000 == 0:
				print('%d / %d' % (k, num))
			k += 1

			for i in range(n):
				ray = self.lidar.generateRay(sample)
				# initialize the energy of the ray
				ray.energy = self.lidar.pulseEnergy * weight / n
				self.tracer.trace(ray, 0, 0)

		# records = np.array(self.tracer.records)
		# np.savetxt('records.txt', records)
		self.recorder.save()
예제 #46
0
class App:
    def __init__(self):
        self.window = Tk()
        self.window.title("Wellcome To Long's App")
        self.window.geometry('350x200')
        self.recorder = Recorder()
        file = open("text.txt", mode="r", encoding="utf8")
        self.article = file.read()
        self.count = 1
        file.close()

    def initUI(self):
        self.scrolledText = scrolledtext.ScrolledText(self.window,
                                                      width=50,
                                                      height=30)
        self.scrolledText.grid(column=0, row=0)

        panelRight = PanedWindow(self.window)
        panelRight.grid(column=1, row=0)

        labelFolder = Label(panelRight, text="Folder Name: ")
        labelFolder.grid(column=0, row=0)

        self.folderNameInput = Entry(panelRight)
        self.folderNameInput.grid(column=0, row=1)

        labelFileName = Label(panelRight, text="File Name: ")
        labelFileName.grid(column=0, row=2)

        self.fileNameInput = Entry(panelRight)
        self.fileNameInput.grid(column=0, row=3)

        labelSentence = Label(panelRight, text="Sentence: ")
        labelSentence.grid(column=0, row=4)

        self.sentenceInput = Entry(panelRight)
        self.sentenceInput.grid(column=0, row=5)

        self.startButton = Button(
            panelRight,
            text="Start",
        )
        self.startButton.grid(column=0, row=6)

        self.stopButton = Button(panelRight, text="Stop")
        self.stopButton.grid(column=0, row=7)

        self.resetCountButton = Button(panelRight, text="Reset")
        self.resetCountButton.grid(column=0, row=8)

    def initLogic(self):
        def startClickHandler():
            self.recorder.startRecord()

        def stopClickHandler():
            frames = self.recorder.stopRecord()
            textFile = open("data/" + self.folderNameInput.get() +
                            "/index.txt",
                            mode="a",
                            encoding="utf8")

            textFile.write(self.fileNameInput.get() + ".wav" + '\n')
            textFile.write(self.sentenceInput.get() + '\n')

            self.recorder.writeFile(
                frames, "data/" + self.folderNameInput.get() + "/" +
                self.fileNameInput.get() + ".wav")

            self.count = self.count + 1
            self.fileNameInput.delete(0, END)
            self.fileNameInput.insert(
                0,
                self.folderNameInput.get() + "-" + str(self.count))

        def resetHandler():
            self.count = 1

        self.startButton.configure(command=startClickHandler)
        self.stopButton.configure(command=stopClickHandler)
        self.resetCountButton.configure(command=resetHandler)

        self.scrolledText.insert(INSERT, self.article)

    def mainLoop(self):
        self.window.mainloop()
예제 #47
0
 def __init__(self):
     self.recorder = Recorder()
class Rollout(object):
    def __init__(self, ob_space, ac_space, nenvs, nsteps_per_seg, nsegs_per_env, nlumps, envs, policy,
                 int_rew_coeff, ext_rew_coeff, record_rollouts, dynamics):
        self.nenvs = nenvs
        self.nsteps_per_seg = nsteps_per_seg
        self.nsegs_per_env = nsegs_per_env
        self.nsteps = self.nsteps_per_seg * self.nsegs_per_env
        self.ob_space = ob_space
        self.ac_space = ac_space
        self.nlumps = nlumps
        self.lump_stride = nenvs // self.nlumps
        self.envs = envs
        self.policy = policy
        self.dynamics = dynamics

        self.reward_fun = lambda ext_rew, int_rew: ext_rew_coeff * np.clip(ext_rew, -1., 1.) + int_rew_coeff * int_rew

        self.buf_vpreds = np.empty((nenvs, self.nsteps), np.float32)
        self.buf_nlps = np.empty((nenvs, self.nsteps), np.float32)
        self.buf_rews = np.empty((nenvs, self.nsteps), np.float32)
        self.buf_ext_rews = np.empty((nenvs, self.nsteps), np.float32)
        self.buf_acs = np.empty((nenvs, self.nsteps, *self.ac_space.shape), self.ac_space.dtype)
        self.buf_obs = np.empty((nenvs, self.nsteps, *self.ob_space.shape), self.ob_space.dtype)
        self.buf_obs_last = np.empty((nenvs, self.nsegs_per_env, *self.ob_space.shape), np.float32)

        self.buf_news = np.zeros((nenvs, self.nsteps), np.float32)
        self.buf_new_last = self.buf_news[:, 0, ...].copy()
        self.buf_vpred_last = self.buf_vpreds[:, 0, ...].copy()

        self.env_results = [None] * self.nlumps
        # self.prev_feat = [None for _ in range(self.nlumps)]
        # self.prev_acs = [None for _ in range(self.nlumps)]
        self.int_rew = np.zeros((nenvs,), np.float32)

        self.recorder = Recorder(nenvs=self.nenvs, nlumps=self.nlumps) if record_rollouts else None
        self.statlists = defaultdict(lambda: deque([], maxlen=100))
        self.stats = defaultdict(float)
        self.best_ext_ret = None
        self.all_visited_rooms = []
        self.all_scores = []
        self.all_ep_r = []

        self.step_count = 0

    def collect_rollout(self):
        self.ep_infos_new = []
        for t in range(self.nsteps):
            self.rollout_step()
        self.calculate_reward()
        self.update_info()

    def calculate_reward(self):
        int_rew = self.dynamics.calculate_loss(ob=self.buf_obs,
                                               last_ob=self.buf_obs_last,
                                               acs=self.buf_acs)
        self.buf_rews[:] = self.reward_fun(int_rew=int_rew, ext_rew=self.buf_ext_rews)

    def rollout_step(self):
        t = self.step_count % self.nsteps
        s = t % self.nsteps_per_seg
        for l in range(self.nlumps):
            obs, prevrews, news, infos = self.env_get(l)
            # if t > 0:
            #     prev_feat = self.prev_feat[l]
            #     prev_acs = self.prev_acs[l]
            for info in infos:
                epinfo = info.get('episode', {})
                mzepinfo = info.get('mz_episode', {})
                retroepinfo = info.get('retro_episode', {})
                epinfo.update(mzepinfo)
                epinfo.update(retroepinfo)
                if epinfo:
                    if "n_states_visited" in info:
                        epinfo["n_states_visited"] = info["n_states_visited"]
                        epinfo["states_visited"] = info["states_visited"]
                    self.ep_infos_new.append((self.step_count, epinfo))

            sli = slice(l * self.lump_stride, (l + 1) * self.lump_stride)

            acs, vpreds, nlps = self.policy.get_ac_value_nlp(obs)
            self.env_step(l, acs)

            # self.prev_feat[l] = dyn_feat
            # self.prev_acs[l] = acs
            self.buf_obs[sli, t] = obs
            self.buf_news[sli, t] = news
            self.buf_vpreds[sli, t] = vpreds
            self.buf_nlps[sli, t] = nlps
            self.buf_acs[sli, t] = acs
            if t > 0:
                self.buf_ext_rews[sli, t - 1] = prevrews
            # if t > 0:
            #     dyn_logp = self.policy.call_reward(prev_feat, pol_feat, prev_acs)
            #
            #     int_rew = dyn_logp.reshape(-1, )
            #
            #     self.int_rew[sli] = int_rew
            #     self.buf_rews[sli, t - 1] = self.reward_fun(ext_rew=prevrews, int_rew=int_rew)
            if self.recorder is not None:
                self.recorder.record(timestep=self.step_count, lump=l, acs=acs, infos=infos, int_rew=self.int_rew[sli],
                                     ext_rew=prevrews, news=news)
        self.step_count += 1
        if s == self.nsteps_per_seg - 1:
            for l in range(self.nlumps):
                sli = slice(l * self.lump_stride, (l + 1) * self.lump_stride)
                nextobs, ext_rews, nextnews, _ = self.env_get(l)
                self.buf_obs_last[sli, t // self.nsteps_per_seg] = nextobs
                if t == self.nsteps - 1:
                    self.buf_new_last[sli] = nextnews
                    self.buf_ext_rews[sli, t] = ext_rews
                    _, self.buf_vpred_last[sli], _ = self.policy.get_ac_value_nlp(nextobs)
                    # dyn_logp = self.policy.call_reward(self.prev_feat[l], last_pol_feat, prev_acs)
                    # dyn_logp = dyn_logp.reshape(-1, )
                    # int_rew = dyn_logp
                    #
                    # self.int_rew[sli] = int_rew
                    # self.buf_rews[sli, t] = self.reward_fun(ext_rew=ext_rews, int_rew=int_rew)

    def update_info(self):
        all_ep_infos = MPI.COMM_WORLD.allgather(self.ep_infos_new)
        all_ep_infos = sorted(sum(all_ep_infos, []), key=lambda x: x[0])
        if all_ep_infos:
            all_ep_infos = [i_[1] for i_ in all_ep_infos]  # remove the step_count
            keys_ = all_ep_infos[0].keys()
            all_ep_infos = {k: [i[k] for i in all_ep_infos] for k in keys_}

            self.statlists['eprew'].extend(all_ep_infos['r'])

            self.all_ep_r.append(np.mean(all_ep_infos['r']))
            
            if (len(self.all_ep_r) % 100 == 0 and self.all_ep_r != 0):
                np.save('all_ep_r_{}_pacman_basline'.format(len(self.all_ep_r) // 100), self.all_ep_r)


            self.stats['eprew_recent'] = np.mean(all_ep_infos['r'])
            self.statlists['eplen'].extend(all_ep_infos['l'])
            self.stats['epcount'] += len(all_ep_infos['l'])
            self.stats['tcount'] += sum(all_ep_infos['l'])
            if 'visited_rooms' in keys_:
                # Montezuma specific logging.
                self.stats['visited_rooms'] = sorted(list(set.union(*all_ep_infos['visited_rooms'])))
                self.stats['pos_count'] = np.mean(all_ep_infos['pos_count'])
                self.all_visited_rooms.extend(self.stats['visited_rooms'])
                self.all_scores.extend(all_ep_infos["r"])
                self.all_scores = sorted(list(set(self.all_scores)))
                self.all_visited_rooms = sorted(list(set(self.all_visited_rooms)))
                if MPI.COMM_WORLD.Get_rank() == 0:
                    print("All visited rooms")
                    print(self.all_visited_rooms)
                    print("All scores")
                    print(self.all_scores)
            if 'levels' in keys_:
                # Retro logging
                temp = sorted(list(set.union(*all_ep_infos['levels'])))
                self.all_visited_rooms.extend(temp)
                self.all_visited_rooms = sorted(list(set(self.all_visited_rooms)))
                if MPI.COMM_WORLD.Get_rank() == 0:
                    print("All visited levels")
                    print(self.all_visited_rooms)

            current_max = np.max(all_ep_infos['r'])
        else:
            current_max = None
        self.ep_infos_new = []

        if current_max is not None:
            if (self.best_ext_ret is None) or (current_max > self.best_ext_ret):
                self.best_ext_ret = current_max
        self.current_max = current_max

    def env_step(self, l, acs):
        self.envs[l].step_async(acs)
        self.env_results[l] = None

    def env_get(self, l):
        if self.step_count == 0:
            ob = self.envs[l].reset()
            out = self.env_results[l] = (ob, None, np.ones(self.lump_stride, bool), {})
        else:
            if self.env_results[l] is None:
                out = self.env_results[l] = self.envs[l].step_wait()
            else:
                out = self.env_results[l]
        return out
예제 #49
0
from pymouse import PyMouseEvent
from recorder import Recorder
import time, sys
import cmd, shlex


class ClickEventListener(PyMouseEvent):
    def click(self, x, y, button, press):
        if press:
            recorder.OnLeftDown((x, y))


mouseListener = ClickEventListener()
mouseListener.start()

recorder = Recorder()


class CLI(cmd.Cmd):
    running = False

    def do_clear(self, line):
        """clear
      Clear the operation list."""
        recorder.clear()

    def do_list(self, line):
        """list
      Print operation list."""
        recorder.printOpList()
예제 #50
0
 def __init__(self):
     self.recorder = Recorder()
     self.text = ""
예제 #51
0
    def startButtonClicked(self):
        self.stopButton.setEnabled(True)
        self.startButton.setEnabled(False)

        self.recorder = Recorder()
        self.recorder.start()
예제 #52
0
app = Flask(__name__)
try:
    with open("/etc/machine-id", "r") as fh:
        app.config['SECRET_KEY'] = fh.read()
except:
    app.config['SECRET_KEY'] = 'secret!'
sockets = Sockets(app)

# Build pipeline
grabber = PanoramaGrabber()  # config read from ~/.robovision/grabber.conf
image_recognizer = ImageRecognizer(grabber)
gameplay = Gameplay(image_recognizer)
rf = RemoteRF(gameplay, "/dev/ttyACM0")
visualizer = Visualizer(image_recognizer, framedrop=1)
recorder = Recorder(grabber)


def generator():
    visualizer.enable()
    queue = visualizer.get_queue()
    while True:
        try:
            buf, resized, frame, r = queue.get_nowait()
        except Empty:
            sleep(0.001)  # Fix this stupid thingie
            continue
        else:
            yield b'--frame\r\nContent-Type: image/jpeg\r\n\r\n'
            yield buf
            yield b'\r\n\r\n'
예제 #53
0
파일: pircam.py 프로젝트: camarel/modecam
class Pircam:
    image_width = 1280
    image_height = 720

    def __init__(self, bot, pir_pin, camera_port, audio_index):
        self.bot = bot
        self.pin = int(pir_pin)
        self.camera_port = int(camera_port)
        self.audio_index = int(audio_index)

        # init pir sensor
        GPIO.setwarnings(False)
        GPIO.setmode(GPIO.BOARD)
        GPIO.setup(self.pin, GPIO.IN)

        self.recorder = Recorder(self.bot, self.audio_index)

    def takePictures(self):
        logger.info('PIR movement detected: taking pictures')
        frames = 0
        loop = 0

        self.recorder.start()

        camera = cv2.VideoCapture(self.camera_port)
        camera.set(cv2.CAP_PROP_FRAME_WIDTH, self.image_width)
        camera.set(cv2.CAP_PROP_FRAME_HEIGHT, self.image_height)

        camera.set(cv2.CAP_PROP_SATURATION, 50.0)

        while 1 == GPIO.input(self.pin):
            return_value, image = camera.read()

            if frames == 0:
                encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 70]
                is_success, imbuffer = cv2.imencode('.jpg', image,
                                                    encode_param)
                stream = BytesIO(imbuffer)

                self.bot.sendPicture(stream)
                stream.close()

                if (loop < 2):
                    loop += 1
                    frames = 10
                else:
                    frames = 80

            else:
                frames -= 1

        camera.release()
        self.recorder.stop()

    def start(self):
        thread = Thread(target=self.observe)
        thread.start()

    def observe(self):
        logger.info('start PIR observing')
        self.observing = True

        while self.observing:
            if 0 == GPIO.input(self.pin):
                time.sleep(1)
            else:
                self.takePictures()

        logger.info('stop PIR observing')

    def stop(self):
        self.observing = False
예제 #54
0
class Rollout(object):
    def __init__(self, ob_space, ac_space, nenvs, nsteps_per_seg,
                 nsegs_per_env, nlumps, envs, policy, int_rew_coeff,
                 ext_rew_coeff, record_rollouts, dynamics, patience):
        self.nenvs = nenvs
        self.nsteps_per_seg = nsteps_per_seg
        self.nsegs_per_env = nsegs_per_env
        self.nsteps = self.nsteps_per_seg * self.nsegs_per_env
        self.ob_space = ob_space
        self.ac_space = ac_space
        self.nlumps = nlumps
        self.lump_stride = nenvs // self.nlumps
        self.envs = envs
        self.policy = policy
        self.dynamics = dynamics
        self.patience = patience
        self.reward_fun = lambda ext_rew, int_rew: ext_rew_coeff * np.clip(
            ext_rew, -1., 1.) + int_rew_coeff * int_rew

        self.buf_vpreds = np.empty((nenvs, self.nsteps), np.float32)
        self.buf_nlps = np.empty((nenvs, self.nsteps), np.float32)
        self.buf_rews = np.empty((nenvs, self.nsteps), np.float32)
        self.buf_ext_rews = np.empty((nenvs, self.nsteps), np.float32)
        self.buf_acs = np.empty((nenvs, self.nsteps, *self.ac_space.shape),
                                self.ac_space.dtype)
        self.buf_obs = np.empty((nenvs, self.nsteps, *self.ob_space.shape),
                                self.ob_space.dtype)
        self.buf_obs_last = np.empty(
            (nenvs, self.nsegs_per_env, *self.ob_space.shape), np.float32)

        self.buf_news = np.zeros((nenvs, self.nsteps), np.float32)
        self.buf_new_last = self.buf_news[:, 0, ...].copy()
        self.buf_vpred_last = self.buf_vpreds[:, 0, ...].copy()

        self.env_results = [None] * self.nlumps
        # self.prev_feat = [None for _ in range(self.nlumps)]
        # self.prev_acs = [None for _ in range(self.nlumps)]
        self.int_rew = np.zeros((nenvs, ), np.float32)

        self.recorder = Recorder(
            nenvs=self.nenvs, nlumps=self.nlumps) if record_rollouts else None
        self.statlists = defaultdict(lambda: deque([], maxlen=100))
        self.stats = defaultdict(float)
        self.best_ext_ret = None
        self.all_visited_rooms = []
        self.all_scores = []

        self.step_count = 0

        #########################################################
        self.ps_buf = self.int_rew = np.zeros((nenvs, ), np.float32)
        self.ps_buf_nold = self.int_rew = np.zeros((nenvs, ), np.float32)
        self.ac_buf = self.int_rew = np.zeros((nenvs, ), np.float32)
        self.buf_acs_nold = np.empty(
            (nenvs, self.nsteps, *self.ac_space.shape), self.ac_space.dtype)
        self.ac_buf_nold = np.empty((nenvs, self.nsteps, *self.ac_space.shape),
                                    self.ac_space.dtype)
        self.nthe = 2
        self.ac_list = []
        self.ps_list = []
        self.rew_list = []
        self.obs_list = []
        self.feat_list = []
        self.patience_pred = 0
        self.pat = 0
        self.mean = np.zeros((nenvs, ), np.float32)
        self.var = np.ones_like((nenvs, ), np.float32)
        #########################################################
    def collect_rollout(self):
        self.ep_infos_new = []
        for t in range(self.nsteps):
            self.rollout_step()
        self.calculate_reward()
        self.update_info()

    def calculate_reward(self):
        #여기도 수정
        ##############################################################################
        int_rew, ps, ac, feat = self.dynamics.calculate_loss(
            ob=self.buf_obs,
            last_ob=self.buf_obs_last,
            acs=self.buf_acs,
            feat_input=[],
            pat=0)
        print(int_rew.shape)
        self.ac_list.append(ac)
        self.ps_list.append(ps)
        self.rew_list.append(int_rew)
        self.obs_list.append(self.buf_obs)
        self.feat_list.append(feat)
        #self.feat_list = tf.stack([self.feat_list, feat])
        #print(self.feat_list[0])
        gpu_devices = tf.config.experimental.list_physical_devices('GPU')
        """
        for device in gpu_devices:
            tf.config.experimental.set_memory_growth(device, True)
        """

        if len(self.ac_list) > (self.nthe):
            self.ac_list = self.ac_list[1:]
            self.ps_list = self.ps_list[1:]
            self.rew_list = self.rew_list[1:]
            self.obs_list = self.obs_list[1:]

        if self.step_count / self.nsteps > self.nthe:
            #ac_spec = tf.one_hot(self.ac_list[0], 4, axis=2)
            #ac_spec = flatten_two_dims(ac_spec)
            #print("ac_speccccccccccccccccccc:",ac_spec.shape)
            int_rew_now, ps_now, ac_now, feat_now = self.dynamics.calculate_loss(
                ob=self.obs_list[0],
                last_ob=self.buf_obs_last,
                acs=self.ac_list[0],
                feat_input=self.feat_list[0],
                pat=1)  #ps_buf_nold S_1**

            print(
                "Sucess///////////////////////////////////////////////////////////////"
            )
            sess = tf.Session()

            coc = self.rew_list[0] - int_rew_now
            print("coc", coc)
            norm_coc = np.multiply((coc - self.mean),
                                   np.reciprocal(np.sqrt(self.var)))

            self.mean = 0.9 * self.mean + 0.1 * coc
            self.var = 0.9 * self.var + 0.1 * coc**2

            node = tf.math.sigmoid(norm_coc)

            self.pat = sess.run(node)
            print("int_rew_now", int_rew_now.shape)
            print("patshape", self.pat.shape)
            #여기다가 sigmoid function넣어놔야 0하고 1사이에 잘 들어간다.
            #diff = sess.run(node)
            #MontezumaRevengeNoFrameskip-v4
            #self.patience = 0.9 * self.patience + 0.1 * diff
            #########################
            #self.patience.train_step()
            # patience_pred
            #model = Patience(self.obs_list[0],self.buf_obs_last,self.ac_list[0],self.feat_list[0])
            #self.patience_pred = Patience(state = self.obs_list[0],action = self.ac_list[0], labels = self.pat, ac_space = self.ac_space, auxiliary_task = self.dynamics.auxiliary_task).get_loss()
            #print("hi",self.feat_list[0].shape,self.ac_list[0].shape)
            #self.patience_pred = model(self.feat_list[0],self.ac_list[0])
            pat_pred = self.patience.calculate_loss(
                ob=self.obs_list[0],
                last_ob=self.buf_obs_last,
                acs=self.ac_list[0],
                feat_input=self.feat_list[0],
                pat=1)

            # model.calculate_loss(ob=self.obs_list[0],
            #                                    last_ob=self.buf_obs_last,
            #                                    acs=self.ac_list[0], feat_input = self.feat_list[0])
            #train_step(model, self.feat_list[0],self.ac_list[0],self.pat)
            #########################
            #int_rew = int_rew * self.patience_pred
            # elementwise multiply
            #int_rew = np.multiply(int_rew, self.patience_pred)

        ################################################################################
        self.buf_rews[:] = self.reward_fun(int_rew=int_rew,
                                           ext_rew=self.buf_ext_rews)

    def rollout_step(self):
        t = self.step_count % self.nsteps
        s = t % self.nsteps_per_seg
        for l in range(self.nlumps):
            obs, prevrews, news, infos = self.env_get(l)
            # if t > 0:
            #     prev_feat = self.prev_feat[l]
            #     prev_acs = self.prev_acs[l]
            for info in infos:
                epinfo = info.get('episode', {})
                mzepinfo = info.get('mz_episode', {})
                retroepinfo = info.get('retro_episode', {})
                epinfo.update(mzepinfo)
                epinfo.update(retroepinfo)
                if epinfo:
                    if "n_states_visited" in info:
                        epinfo["n_states_visited"] = info["n_states_visited"]
                        epinfo["states_visited"] = info["states_visited"]
                    self.ep_infos_new.append((self.step_count, epinfo))

            sli = slice(l * self.lump_stride, (l + 1) * self.lump_stride)

            acs, vpreds, nlps = self.policy.get_ac_value_nlp(obs)
            self.env_step(l, acs)

            # self.prev_feat[l] = dyn_feat
            # self.prev_acs[l] = acs
            self.buf_obs[sli, t] = obs
            self.buf_news[sli, t] = news
            self.buf_vpreds[sli, t] = vpreds
            self.buf_nlps[sli, t] = nlps
            self.buf_acs[sli, t] = acs
            if t > 0:
                self.buf_ext_rews[sli, t - 1] = prevrews
            # if t > 0:
            #     dyn_logp = self.policy.call_reward(prev_feat, pol_feat, prev_acs)
            #
            #     int_rew = dyn_logp.reshape(-1, )
            #
            #     self.int_rew[sli] = int_rew
            #     self.buf_rews[sli, t - 1] = self.reward_fun(ext_rew=prevrews, int_rew=int_rew)
            if self.recorder is not None:
                self.recorder.record(timestep=self.step_count,
                                     lump=l,
                                     acs=acs,
                                     infos=infos,
                                     int_rew=self.int_rew[sli],
                                     ext_rew=prevrews,
                                     news=news)
        self.step_count += 1
        if s == self.nsteps_per_seg - 1:
            for l in range(self.nlumps):
                sli = slice(l * self.lump_stride, (l + 1) * self.lump_stride)
                nextobs, ext_rews, nextnews, _ = self.env_get(l)
                self.buf_obs_last[sli, t // self.nsteps_per_seg] = nextobs
                if t == self.nsteps - 1:
                    self.buf_new_last[sli] = nextnews
                    self.buf_ext_rews[sli, t] = ext_rews
                    _, self.buf_vpred_last[
                        sli], _ = self.policy.get_ac_value_nlp(nextobs)
                    # dyn_logp = self.policy.call_reward(self.prev_feat[l], last_pol_feat, prev_acs)
                    # dyn_logp = dyn_logp.reshape(-1, )
                    # int_rew = dyn_logp
                    #
                    # self.int_rew[sli] = int_rew
                    # self.buf_rews[sli, t] = self.reward_fun(ext_rew=ext_rews, int_rew=int_rew)

    def update_info(self):
        all_ep_infos = MPI.COMM_WORLD.allgather(self.ep_infos_new)
        all_ep_infos = sorted(sum(all_ep_infos, []), key=lambda x: x[0])
        if all_ep_infos:
            all_ep_infos = [i_[1]
                            for i_ in all_ep_infos]  # remove the step_count
            keys_ = all_ep_infos[0].keys()
            all_ep_infos = {k: [i[k] for i in all_ep_infos] for k in keys_}

            self.statlists['eprew'].extend(all_ep_infos['r'])
            self.stats['eprew_recent'] = np.mean(all_ep_infos['r'])
            self.statlists['eplen'].extend(all_ep_infos['l'])
            self.stats['epcount'] += len(all_ep_infos['l'])
            self.stats['tcount'] += sum(all_ep_infos['l'])
            if 'visited_rooms' in keys_:
                # Montezuma specific logging.
                self.stats['visited_rooms'] = sorted(
                    list(set.union(*all_ep_infos['visited_rooms'])))
                self.stats['pos_count'] = np.mean(all_ep_infos['pos_count'])
                self.all_visited_rooms.extend(self.stats['visited_rooms'])
                self.all_scores.extend(all_ep_infos["r"])
                self.all_scores = sorted(list(set(self.all_scores)))
                self.all_visited_rooms = sorted(
                    list(set(self.all_visited_rooms)))
                if MPI.COMM_WORLD.Get_rank() == 0:
                    print("All visited rooms")
                    print(self.all_visited_rooms)
                    print("All scores")
                    print(self.all_scores)
            if 'levels' in keys_:
                # Retro logging
                temp = sorted(list(set.union(*all_ep_infos['levels'])))
                self.all_visited_rooms.extend(temp)
                self.all_visited_rooms = sorted(
                    list(set(self.all_visited_rooms)))
                if MPI.COMM_WORLD.Get_rank() == 0:
                    print("All visited levels")
                    print(self.all_visited_rooms)

            current_max = np.max(all_ep_infos['r'])
        else:
            current_max = None
        self.ep_infos_new = []

        if current_max is not None:
            if (self.best_ext_ret is None) or (current_max >
                                               self.best_ext_ret):
                self.best_ext_ret = current_max
        self.current_max = current_max

    def env_step(self, l, acs):
        self.envs[l].step_async(acs)
        self.env_results[l] = None

    def env_get(self, l):
        if self.step_count == 0:
            ob = self.envs[l].reset()
            out = self.env_results[l] = (ob, None,
                                         np.ones(self.lump_stride, bool), {})
        else:
            if self.env_results[l] is None:
                out = self.env_results[l] = self.envs[l].step_wait()
            else:
                out = self.env_results[l]
        return out
예제 #55
0
class Timelapse(NSObject):
    """ Creates a timelapse video """

    def applicationDidFinishLaunching_(self, notification) -> None:
        self.check_dependencies()

        # Initialize recording
        self.recording: bool = start_recording
        self.recorder = None

        # Set correct output paths
        self.recorder_output_basedir: str = os.path.join(
            dir_base, dir_pictures, dir_app)
        self.encoder_output_basedir: str = os.path.join(dir_base, dir_movies)

        # Create a reference to the statusbar (menubar)
        self.statusbar = NSStatusBar.systemStatusBar()

        # Create item in statusbar
        self.statusitem = self.statusbar.statusItemWithLength_(
            NSVariableStatusItemLength)
        self.statusitem.setHighlightMode_(1)  # Highlight upon clicking

        # Create a simple menu and bind it to the status item
        self.menu = self.createMenu()
        self.statusitem.setMenu_(self.menu)

        # Load icons and show them in the statusbar
        self.loadIcons()
        self.setStatus()

    def loadIcons(self) -> None:
        self.icon_recording = NSImage.alloc().initWithContentsOfFile_(
            os.path.join(dir_resources, image_recording))
        self.icon_idle = NSImage.alloc().initWithContentsOfFile_(
            os.path.join(dir_resources, image_idle))

    def setStatus(self) -> None:
        """ Sets the image and menu text according to recording status """
        if self.recording:
            self.statusitem.setImage_(self.icon_recording)
            self.recordButton.setTitle_(text_recorder_running)
            self.statusitem.setToolTip_(tooltip_running)
        else:
            self.statusitem.setImage_(self.icon_idle)
            self.recordButton.setTitle_(text_recorder_idle)
            self.statusitem.setToolTip_(tooltip_idle)

    def createMenu(self) -> NSMenu:
        """ Status bar menu """
        menu = NSMenu.alloc().init()
        # Bind record event
        self.recordButton = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(
            text_recorder_idle, 'startStopRecording:', '')
        menu.addItem_(self.recordButton)
        # Quit event
        menuitem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(
            'Quit', 'terminate:', '')
        menu.addItem_(menuitem)
        return menu

    def startStopRecording_(self, notification) -> None:
        if self.recording:
            self.recorder.join(timeout=screenshot_interval*2)
            # Create timelapse after recording?
            if encode:
                self.encoder = Encoder(
                    self.image_dir, self.encoder_output_basedir)
                self.encoder.start()
        else:
            self.image_dir: str = self.create_dir(self.recorder_output_basedir)
            self.recorder = Recorder(self.image_dir, screenshot_interval)
            self.recorder.start()
            notify("Timelapse started", "The recording has started")
        self.recording: bool = not self.recording
        self.setStatus()

    @objc.python_method
    def create_dir(self, base_dir: str) -> str:
        """ Creates a specified directory and the path to it if necessary """
        # Create a new subdirectory
        output_dir: str = os.path.join(base_dir, self.get_sub_dir(base_dir))

        # Create path if it doesn't exist
        try:
            print(f"Output directory: {output_dir}")
            os.makedirs(output_dir)
        except OSError as e:
            notify("Timelapse error", f"Error while creating directory: {e}")
            exit()
        return output_dir

    @objc.python_method
    def get_sub_dir(self, base_dir: str) -> str:
        """ Returns the subdirectory for recording, relative to base_dir """
        subdir_suffix: str = "Session-" + \
            time.strftime("%Y-%m-%d_%H-%M-%S")
        return os.path.join(base_dir, subdir_suffix)

    def check_dependencies(self) -> None:
        try:
            subprocess.run(['ffmpeg'], check=True,
                           capture_output=True, timeout=10.0)
        except subprocess.CalledProcessError:
            print("ffmpeg command was found")
            pass  # ffmpeg is found, but returns non-zero exit as expected
            # This is a quick and dirty check; it leaves some spurious output
            # for the user to puzzle over.
        except OSError:
            print(not_found_msg)
예제 #56
0
from recorder import Recorder
import argparse
import matplotlib.pyplot as plt
import numpy as np
import sys

PRINTER = Recorder()
parser = argparse.ArgumentParser()
parser.add_argument("-file")
parser.add_argument("-type")
args = parser.parse_args()

if args.file == None or args.type == None:
    PRINTER.info_text(
        "Specify filename and datatype with '-file <filename> -type <datatype>'"
    )
    sys.exit()

FILE = args.file
TYPE = args.type.lower()

if TYPE == "aia171":
    plt.imshow(np.load(FILE), cmap="sdoaia171", origin="lower")
    plt.title("AIA171")
    plt.clim(0, 3500)
    plt.show()
elif TYPE == "aia304":
    plt.imshow(np.load(FILE), cmap="sdoaia304", origin="lower")
    plt.title("AIA304")
    plt.clim(0, 1500)
    plt.show()
import speech_recognition as sr
from recorder import Recorder
r = Recorder()
r.record(15, output='output.wav')
r.play('output.wav')

AUDIO_FILE = ("output.wav")
# obtain audio from the microphone
r = sr.Recognizer()
with sr.AudioFile(AUDIO_FILE) as source:
    audio = r.record(source)

# recognize speech using Sphinx
try:
    print(r.recognize_google(audio))
except sr.UnknownValueError:
    print("Google could not understand audio")
except sr.RequestError as e:
    print("Google error; {0}".format(e))
예제 #58
0
 def set_record_dir(self, record_dir, setting_file_name, system_name,
                    version):
     self.recorder = \
         Recorder(os.path.join(record_dir,system_name,version))
     self.recorder.create_record_folder()
     self.recorder.record_setting(setting_file_name)
예제 #59
0
    pmh.daemon = True
    pmh.start()

    pfile = PypoFile(media_q, config['pypo'])
    pfile.daemon = True
    pfile.start()

    pf = PypoFetch(pypoFetch_q, pypoPush_q, media_q, telnet_lock,
                   pypo_liquidsoap, config['pypo'])
    pf.daemon = True
    pf.start()

    pp = PypoPush(pypoPush_q, telnet_lock, pypo_liquidsoap, config['pypo'])
    pp.daemon = True
    pp.start()

    recorder = Recorder(recorder_q)
    recorder.daemon = True
    recorder.start()

    stat = ListenerStat(config)
    stat.daemon = True
    stat.start()

    # Just sleep the main thread, instead of blocking on pf.join().
    # This allows CTRL-C to work!
    while True:
        time.sleep(1)

    logger.info("System exit")
예제 #60
0
class Rollout(object):
    def __init__(self, ob_space, ac_space, nenvs, nsteps_per_seg, nsegs_per_env, nlumps, envs, policy,
                 int_rew_coeff, ext_rew_coeff, record_rollouts, dynamics,patience, nthe):
        self.nenvs = nenvs
        self.nsteps_per_seg = nsteps_per_seg
        self.nsegs_per_env = nsegs_per_env
        self.nsteps = self.nsteps_per_seg * self.nsegs_per_env
        self.ob_space = ob_space
        self.ac_space = ac_space
        self.nlumps = nlumps
        self.lump_stride = nenvs // self.nlumps
        self.envs = envs
        self.policy = policy
        self.dynamics = dynamics
        self.patience = patience
        self.reward_fun = lambda ext_rew, int_rew: ext_rew_coeff * np.clip(ext_rew, -1., 1.) + int_rew_coeff * int_rew

        self.buf_vpreds = np.empty((nenvs, self.nsteps), np.float32)
        self.buf_nlps = np.empty((nenvs, self.nsteps), np.float32)
        self.buf_rews = np.empty((nenvs, self.nsteps), np.float32)
        self.buf_ext_rews = np.empty((nenvs, self.nsteps), np.float32)
        self.buf_acs = np.empty((nenvs, self.nsteps, *self.ac_space.shape), self.ac_space.dtype)
        self.buf_obs = np.empty((nenvs, self.nsteps, *self.ob_space.shape), self.ob_space.dtype)
        self.buf_obs_last = np.empty((nenvs, self.nsegs_per_env, *self.ob_space.shape), np.float32)

        self.buf_news = np.zeros((nenvs, self.nsteps), np.float32)
        self.buf_new_last = self.buf_news[:, 0, ...].copy()
        self.buf_vpred_last = self.buf_vpreds[:, 0, ...].copy()

        self.env_results = [None] * self.nlumps
        # self.prev_feat = [None for _ in range(self.nlumps)]
        # self.prev_acs = [None for _ in range(self.nlumps)]
        self.int_rew = np.zeros((nenvs,), np.float32)

        self.recorder = Recorder(nenvs=self.nenvs, nlumps=self.nlumps) if record_rollouts else None
        self.statlists = defaultdict(lambda: deque([], maxlen=100))
        self.stats = defaultdict(float)
        self.best_ext_ret = None
        self.all_visited_rooms = []
        self.all_scores = []

        self.step_count = 0

        #########################################################
        self.ac_buf = self.int_rew = np.zeros((nenvs,), np.float32)
        self.buf_acs_nold = np.empty((nenvs, self.nsteps, *self.ac_space.shape), self.ac_space.dtype)
        self.ac_buf_nold = np.empty((nenvs, self.nsteps, *self.ac_space.shape), self.ac_space.dtype)
        self.nthe = nthe
        self.ac_list = []
        self.rew_list = []
        self.obs_list = []
        self.feat_list = []
        self.patience_pred = 0
        self.pat = 0
        self.mean = np.zeros((128,128), np.float32)
        self.var = np.ones((128,128), np.float32)
        self.model = 0
        self.flag = 1
        self.flag2 = 1
        #########################################################
    def collect_rollout(self):
        self.ep_infos_new = []
        for t in range(self.nsteps):
            self.rollout_step()
        self.calculate_reward()
        self.update_info()

    def calculate_reward(self):
        #여기도 수정
        ##############################################################################
        print("fffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
        int_rew, ac, feat = self.dynamics.calculate_loss(ob=self.buf_obs,
                                               last_ob=self.buf_obs_last,
                                               acs=self.buf_acs, feat_input = [],pat = 0)

        self.ac_list.append(ac)
        self.rew_list.append(int_rew)
        self.obs_list.append(self.buf_obs)
        self.feat_list.append(feat)
        print("ac_list", len(self.ac_list))
        print("rew_list", len(self.rew_list))
        print("obs_list", len(self.obs_list))
        print("feat_list", len(self.feat_list))
        #self.feat_list = tf.stack(self.feat_list, feat)
        # if len(self.ac_list) > (self.nthe):
        if len(self.ac_list) > (self.nthe):
            self.ac_list = self.ac_list[1:]
            self.rew_list = self.rew_list[1:]
            self.obs_list = self.obs_list[1:]
            self.feat_list = self.feat_list[1:]

        if self.step_count/self.nsteps > self.nthe and self.nthe:
            self.flag2 = 0
            print("ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss")
            int_rew_now, ac_now, feat_now = self.dynamics.calculate_loss(ob=self.obs_list[0],
                                                last_ob=self.buf_obs_last,
                                                acs=self.ac_list[0],feat_input = self.feat_list[0], pat = 1)               
            coc = 10*abs(self.rew_list[0] - int_rew_now)
            """
            if self.flag:
                self.mean = coc
                print("var shape: ",self.var.shape)
                norm_coc = np.multiply((coc - self.mean),np.reciprocal(np.sqrt(self.var)))
            else:
                norm_coc = np.multiply((coc - self.mean),np.reciprocal(np.sqrt(self.var)))
            """
            norm_coc = coc
            #norm_coc = np.multiply((coc - self.mean),np.reciprocal(np.sqrt(self.var)))
            #print("previous rew",self.rew_list[0])
            #print("now rew",int_rew_now)
            #self.mean = 0.8 * self.mean + 0.2 * coc
            #self.var = 0.8 * self.var + 0.2 * (coc-self.mean)**2
            #print("norm_coc",norm_coc)
            pat = norm_coc
            #pat = 1 / (1 + np.exp(-norm_coc))
            #MontezumaRevengeNoFrameskip-v4

            ##############################################################################
            # This is the training code
            if self.flag:
                self.model = tf.keras.models.Sequential([
                
                tf.keras.layers.Dense(256, activation="relu"),
                tf.keras.layers.Dense(128, activation="relu"),
                tf.keras.layers.Dropout(0.2),
                tf.keras.layers.Dense(64, activation="relu"),
                #tf.keras.layers.Dropout(0.3),
                tf.keras.layers.Dense(32, activation="relu"),
                tf.keras.layers.Dense(1),
                #tf.keras.layers.Flatten(input_shape=(84, 84,4)),
                #tf.keras.layers.Dense(128, activation='relu'),
                #tf.keras.layers.Dropout(0.2),
                #tf.keras.layers.Dense(1),
                #tf.keras.layers.Reshape(target_shape = (128,128,-1))
                ])

                self.flag = 0
            #x = flatten_two_dims(tf.convert_to_tensor(self.feat_list[0]))
            x = tf.convert_to_tensor(self.feat_list[0])
            x_test = tf.convert_to_tensor(self.feat_list[-1])

            #print("Feature dim before converting into Tensor: ", self.feat_list[0].shape)
            #print("Feature dim after converting into Tensor", tf.convert_to_tensor(self.feat_list[0].shape))
            #print("Flatten Two Dim ..Feature Dim..:", x.shape)
            ac = tf.one_hot(self.ac_list[0], self.ac_space.n, axis=2)
            ac_test = tf.one_hot(self.ac_list[-1], self.ac_space.n, axis=2)
            #print("Action Dim ..before one hot..: ",self.ac_list[0].shape, self.ac_space.n)
            #print("Action Dim ..After one hot: ", ac.shape) 
            sh = tf.shape(ac)
            #ac = flatten_two_dims(ac)
            #print("Action Dim after flatten", ac.shape)
            x_new = tf.concat([x, ac], axis=-1)
            x_test2 = tf.concat([x_test, ac_test], axis=-1)
            #print("Input value concat ..x,ac..: ",x_new.shape)
            #print("Output Dim of Patience Model: ", pat)
            x_new = flatten_two_dims(x_new)
            x_test2 = flatten_two_dims(x_test2)
            # xnew 16384, 16384, 512+4 (train_examples, train_labels)
            
            pat = pat.reshape((16384,1))
            train_dataset = tf.data.Dataset.from_tensor_slices((tf.stop_gradient(x_new),tf.stop_gradient(pat)))
            test_dataset = tf.data.Dataset.from_tensor_slices((x_new,pat))
            #pred_dataset = tf.data.Dataset.from_tensor_slices((x_test2))
            BATCH_SIZE = 64
            SHUFFLE_BUFFER_SIZE = 10

            train_dataset = train_dataset.shuffle(SHUFFLE_BUFFER_SIZE).batch(BATCH_SIZE)
            test_dataset = test_dataset.batch(BATCH_SIZE)
            #pred_dataset = pred_dataset.batch(BATCH_SIZE)
            #print("Train Dataset Shape : ", train_dataset.shape)
            
            self.model.compile(optimizer=keras.optimizers.Adam(learning_rate= 1e-4),loss='mean_absolute_error', metrics=['mean_absolute_error'])
            # feat_list 128 x 128 x 512 action 128 x 128 x 4 hidsize 512
            results = self.model.evaluate(test_dataset)
            #print('test loss, test acc:', results)
            
            
            #print("pred",pred)
            #print("Input Dim for Patience Model: ", x_new.shape)
            print("Output Dim of Patience Model: ", pat)
            self.model.fit(train_dataset, epochs=20,workers = 4, verbose = 0)
            print(self.model.summary())
           # tf.keras.utils.plot_model(self.model, to_file="model.png",show_shapes=True,show_layer_names=True,rankdir="TB",dpi=96)
            pred = self.model.predict(x_test2, steps = 1)
            #print("pred shape:",pred.shape)
            # elementwise multiply
            #print("int_rew shape",int_rew.shape)
            #print("ext rew bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", self.buf_ext_rews.shape)
            
            #print("pred:",np.reshape(np.array(pred),(128,128)))
            int_rew = np.array(int_rew) * np.reshape(np.array(pred),(128,128))
            #int_rew = np.multiply(int_rew,pat_pred)
            #print(int_rew.shape)
            self.flag2 = 1
        ################################################################################

        self.buf_rews[:] = self.reward_fun(int_rew=int_rew, ext_rew=self.buf_ext_rews)


    def rollout_step(self):
        t = self.step_count % self.nsteps
        s = t % self.nsteps_per_seg
        for l in range(self.nlumps):
            obs, prevrews, news, infos = self.env_get(l)
            # if t > 0:
            #     prev_feat = self.prev_feat[l]
            #     prev_acs = self.prev_acs[l]
            for info in infos:
                epinfo = info.get('episode', {})
                mzepinfo = info.get('mz_episode', {})
                retroepinfo = info.get('retro_episode', {})
                epinfo.update(mzepinfo)
                epinfo.update(retroepinfo)
                if epinfo:
                    if "n_states_visited" in info:
                        epinfo["n_states_visited"] = info["n_states_visited"]
                        epinfo["states_visited"] = info["states_visited"]
                    self.ep_infos_new.append((self.step_count, epinfo))

            sli = slice(l * self.lump_stride, (l + 1) * self.lump_stride)

            acs, vpreds, nlps = self.policy.get_ac_value_nlp(obs)
            self.env_step(l, acs)

            # self.prev_feat[l] = dyn_feat
            # self.prev_acs[l] = acs
            self.buf_obs[sli, t] = obs
            self.buf_news[sli, t] = news
            self.buf_vpreds[sli, t] = vpreds
            self.buf_nlps[sli, t] = nlps
            self.buf_acs[sli, t] = acs
            if t > 0:
                self.buf_ext_rews[sli, t - 1] = prevrews
            # if t > 0:
            #     dyn_logp = self.policy.call_reward(prev_feat, pol_feat, prev_acs)
            #
            #     self.int_rew[sli] = int_rew
            #     self.buf_rews[sli, t - 1] = self.reward_fun(ext_rew=prevrews, int_rew=int_rew)
            if self.recorder is not None:
                self.recorder.record(timestep=self.step_count, lump=l, acs=acs, infos=infos, int_rew=self.int_rew[sli],
                                     ext_rew=prevrews, news=news)
        self.step_count += 1
        if s == self.nsteps_per_seg - 1:
            for l in range(self.nlumps):
                sli = slice(l * self.lump_stride, (l + 1) * self.lump_stride)
                nextobs, ext_rews, nextnews, _ = self.env_get(l)
                self.buf_obs_last[sli, t // self.nsteps_per_seg] = nextobs
                if t == self.nsteps - 1:
                    self.buf_new_last[sli] = nextnews
                    self.buf_ext_rews[sli, t] = ext_rews
                    _, self.buf_vpred_last[sli], _ = self.policy.get_ac_value_nlp(nextobs)
                    # dyn_logp = self.policy.call_reward(self.prev_feat[l], last_pol_feat, prev_acs)
                    # dyn_logp = dyn_logp.reshape(-1, )
                    # int_rew = dyn_logp
                    #
                    # self.int_rew[sli] = int_rew
                    # self.buf_rews[sli, t] = self.reward_fun(ext_rew=ext_rews, int_rew=int_rew)

    def update_info(self):
        all_ep_infos = MPI.COMM_WORLD.allgather(self.ep_infos_new)
        all_ep_infos = sorted(sum(all_ep_infos, []), key=lambda x: x[0])
        if all_ep_infos:
            all_ep_infos = [i_[1] for i_ in all_ep_infos]  # remove the step_count
            keys_ = all_ep_infos[0].keys()
            all_ep_infos = {k: [i[k] for i in all_ep_infos] for k in keys_}

            self.statlists['eprew'].extend(all_ep_infos['r'])
            self.stats['eprew_recent'] = np.mean(all_ep_infos['r'])
            self.statlists['eplen'].extend(all_ep_infos['l'])
            self.stats['epcount'] += len(all_ep_infos['l'])
            self.stats['tcount'] += sum(all_ep_infos['l'])
            if 'visited_rooms' in keys_:
                # Montezuma specific logging.
                self.stats['visited_rooms'] = sorted(list(set.union(*all_ep_infos['visited_rooms'])))
                self.stats['pos_count'] = np.mean(all_ep_infos['pos_count'])
                self.all_visited_rooms.extend(self.stats['visited_rooms'])
                self.all_scores.extend(all_ep_infos["r"])
                self.all_scores = sorted(list(set(self.all_scores)))
                self.all_visited_rooms = sorted(list(set(self.all_visited_rooms)))
                if MPI.COMM_WORLD.Get_rank() == 0:
                    print("All visited rooms")
                    print(self.all_visited_rooms)
                    print("All scores")
                    print(self.all_scores)
            if 'levels' in keys_:
                # Retro logging
                temp = sorted(list(set.union(*all_ep_infos['levels'])))
                self.all_visited_rooms.extend(temp)
                self.all_visited_rooms = sorted(list(set(self.all_visited_rooms)))
                if MPI.COMM_WORLD.Get_rank() == 0:
                    print("All visited levels")
                    print(self.all_visited_rooms)

            current_max = np.max(all_ep_infos['r'])
        else:
            current_max = None
        self.ep_infos_new = []

        if current_max is not None:
            if (self.best_ext_ret is None) or (current_max > self.best_ext_ret):
                self.best_ext_ret = current_max
        self.current_max = current_max

    def env_step(self, l, acs):
        self.envs[l].step_async(acs)
        self.env_results[l] = None

    def env_get(self, l):
        if self.step_count == 0:
            ob = self.envs[l].reset()
            out = self.env_results[l] = (ob, None, np.ones(self.lump_stride, bool), {})
        else:
            if self.env_results[l] is None:
                out = self.env_results[l] = self.envs[l].step_wait()
            else:
                out = self.env_results[l]
        return out