def __init__(self, name, backTestingEngine, startCash=0): self._name = name self._currentCash = startCash self._currentPosition = {} self._strategy = None self._engine = backTestingEngine self._engine.registerAccount(self) self._orderCache = [] self._recorder = Recorder(self._engine, self) self._officialRecorder = Recorder(self._engine, self) logger.info('Initialize Account %s', name)
def test_actionsInIsolation(self): with Recorder() as recorder: recorder.isRunning() # with Recorder() as recorder: # recorder.start() with Recorder() as recorder: recorder.isRunning() # with Recorder() as recorder: # recorder.stop() with Recorder() as recorder: recorder.isRunning()
def run(self, checked=False): """Load the project, display actions and tracking windows :param checked: decide wether to load or unload the project :type checked: bool """ if checked: p = self.project.read() self.project.load(p) self.tracking.setMobiles(self.project.mobileItems) self.guidance.setMobiles(self.project.mobileItems) self.tracking.setProviders(self.project.dataProviders) self.recorder = Recorder(self.project.recorderPath) self.recorder.setMobiles(self.project.mobileItems) self.recorder.recordingStarted.connect(self.recordingStarted) self.tracking.show() if self.guidanceVisible: self.guidance.show() self.iface.mainWindow().statusBar().insertPermanentWidget(1, self.positionDisplay) self.positionDisplay.show() else: self.actions['trackingAction'].setChecked(False) self.actions['recordAction'].setChecked(False) self.recorder = None self.tracking.removeMobiles() self.tracking.removeProviders() self.tracking.hide() self.guidanceVisible = self.guidance.isVisible() self.guidance.hide() self.project.unload() self.iface.mainWindow().statusBar().removeWidget(self.positionDisplay)
def _init(self): torch.manual_seed(self._seed) torch.cuda.manual_seed(self._seed) torch.cuda.manual_seed_all(self._seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False np.random.seed(self._seed) random.seed(self._seed) self._id = str(int(time.time())) self._env = gym.make(self._env_name) self._env.seed(self._seed) name = f'{self._algo_name}' name += f'_{self._env_name}' name += f'_{str(self._runner_params)}' if self._name_postfix: name += f'_{self._name_postfix}' name += f'_{self._id}' if self._video_record_interval: self._recorder = Recorder(self._env, False, f'./videos/{name}') self._env = self._recorder.wrapped_env() if not isinstance(self._env.action_space, gym.spaces.discrete.Discrete): raise Exception('discrete space만 지원됨.') if self._save_check_log: self._writer = SummaryWriter(log_dir=f'runs/{name}') self._logger = Logger('logs', name)
def buildIt(): rec = Recorder(channels=2) poop = rec.open( './savedRecordings/' + datetime.now().strftime("%d_%m_%Y_%H_%M_%S") + ".wav", 'wb') poop.start_recording() return poop
def main(): # Load recorder config from file and update displayed status on UI recr = Recorder() recr.read_config() recr.update_status() # Load clipper config from file and update displayed status on UI clpr = Clipper() clpr.read_config() clpr.update_status() app = QApplication(sys.argv) app.setApplicationName('Live Audio Tool') app.setApplicationDisplayName('Live Audio Tool') ex = UI(clpr, recr) ex.show() app.exec_() # Ensure recorder is stopped recr.is_recording = False # Wait to ensure all clip exports are finished time.sleep(clpr.post_buffer) sys.exit()
def __init__(self, transitions, states, debug): self.payload = {} self._logger = logging.getLogger(__name__) self._logger.info('logging under name {}.'.format(__name__)) self._logger.info('Starting Component') self.debug = debug self.app = None self.lock = Lock() self.recorder = Recorder(self) self.text_to_speech = Speaker() self.overwrote_last = False self.uuid = uuid4().hex if self.debug: self.uuid = "122ec9e8edda48f8a6dd290747acfa8c" self.channel = "{server}{uuid}".format(server=MQTT_TOPIC_BASE,uuid=self.uuid) self.name = "jonas" stm_walkie_talkie_name = "{}_walkie_talkie".format(self.name) walkie_talkie_machine = Machine(transitions=transitions, states=states, obj=self, name=stm_walkie_talkie_name) self.stm = walkie_talkie_machine recognizer_stm = get_state_machine('stm_recognizer', [stm_walkie_talkie_name]) self.stm_driver = Driver() self.stm_driver.add_machine(walkie_talkie_machine) self.stm_driver.add_machine(recognizer_stm) self.stm_driver.start() self._logger.debug('Component initialization finished')
def main(): parser = OptionParser() parser.add_option('-v', '--verbose', dest='verbose', action='store_true', default=False, help='verbose output') (options, args) = parser.parse_args() verbose = options.verbose if verbose: print 'verbose output mode.' if len(args) == 0: print 'no input files.' exit filename = args[0] if verbose: print 'filename: ' + filename p = Player(filename) r = Recorder() r.start() pl = plotter(p, r) th = Thread(target=pl) th.daemon = True th.start() p.run()
def on_press_startbutton(self): if not self.working_t: self.config_file_box.disable() self.ipaddr_file_box.disable() self.log_folder_box.disable() config = utils.load_config(self.config_file_box.get_text()) ipaddr_list = utils.load_ipaddr(self.ipaddr_file_box.get_text()) self.recorder = Recorder(self.log_folder_box.get_text()) for idx in range(len(ipaddr_list)): self.status_list.append( "%s(%s)" % (ipaddr_list[idx]['name'], ipaddr_list[idx]['ip'])) self.status_list.set_red(idx) self.ipaddr_to_status[ipaddr_list[idx]['ip']] = { 'name': ipaddr_list[idx]['name'], 'status': utils.MSG_STATUS.invalid, 'index': idx } print(self.ipaddr_to_status) self.controller.initialize_worker(config, ipaddr_list) self.working_t = threading.Thread(target=self.controller.do_work) self.working_t.start()
def live_plot(): plotter = Plotter() recorder = Recorder() recorder.add_recording_listener(plotter) recorder.record() plotter.plot() recorder.stop()
def __init__(self, gym_name, agent): self.agent = agent self.env = gym.make(gym_name) self.score = 0 self.recorder = Recorder() self.lives = 0 self.agent.learn_how_to_play(self.env)
def car_controller(predictor_conn, alternating_autonomous=False, record_dir="replays/test"): global img_lock, lock, car # how many seconds it takes before it switches between being controlled by model and human. seconds_between_switch = 1.0 # when it is controlled by the model it should not record any images. # how often an image and degree is to be stored. seconds_between_capture = 0.2 rec = Recorder(record_dir) tlast_switch = time.time() tlast_capture = time.time() currently_autonomous = False while True: time.sleep(0.01) if (time.time() - tlast_switch ) > seconds_between_switch and alternating_autonomous: tlast_switch = time.time() currently_autonomous = not currently_autonomous img_lock.acquire() if cur_img is None: img_lock.release() print("continue") continue gray = cv2.cvtColor(cur_img, cv2.COLOR_BGR2GRAY) img = cv2.resize(gray, (30, 30)) img_lock.release() lock.acquire() local_motor_status = motor_status if not currently_autonomous: local_deg = deg #print("human "+str(deg)) car.motor(motor_status) local_motor_status = motor_status else: predictor_conn.send(img) pred = predictor_conn.recv() #print("predicted "+str(pred)) local_deg = pred car.turn(local_deg) if (time.time() - tlast_capture ) > seconds_between_capture and local_motor_status: tlast_capture = time.time() rec.store(img, deg=local_deg) if currently_autonomous: print("auto " + str(local_deg)) else: print("human " + str(local_deg)) lock.release()
def __init__(self, server_ip, server_port): self.client = socket(AF_INET, SOCK_DGRAM) self.recorder = Recorder() self.server_ip = server_ip self.server_port = server_port self.people = {} self.server_address = (self.server_ip, self.server_port)
def __init__(self, parent=None, bitrate=pyaudio.paInt16, sample_rate=48000, num_channels=2): super().__init__(parent) # recorder self.recorder = Recorder(parent=None, bitrate=bitrate, sample_rate=sample_rate, num_channels=num_channels) # worker thread self.worker_thread = QThread() self.recorder.moveToThread(self.worker_thread) self.worker_thread.start() self.timer = QTimer() self.timer.setInterval((1000 / sample_rate) * self.recorder.CHUNK) # signals self.timer.timeout.connect(self.recorder.write_frames) self.start_recording.connect(self.recorder.start_recording) self.start_recording.connect(self.timer.start) self.stop_recording.connect(self.recorder.stop_recording) self.stop_recording.connect(self.timer.stop)
def __init__(self, battery_capacity=7.5, initial_SOC=100, min_max_SOC=(0,100), cn=2.55, vn=3.7, dco=3.0, cco=4.2, max_c_rate=10): """ Default properties of battery cell: Li-ion CGR18650E Panasonic """ self.battery_capacity = battery_capacity # Capacity of battery [kWh] self.initial_SOC = initial_SOC # initial state of charge [%] self.min_max_SOC = min_max_SOC # interval for buffer-grid strategy [%] self.cn = cn # nominal capacity of Li.ion cell [Ah] self.vn = vn # nominal voltage of single [V] self.dco = dco # discharge cut-off [V] self.cco = cco # charge cut-off [V] self.max_c_rate = max_c_rate # determines max allowed current self.recorder = Recorder( 'P', # Store Power accepted by battery [kW] 'p_reject', # Store Power rejected by battery [kW] 'Q', # Store charge of cell [Ah] 'V1', # Store voltage of RC-1st of cell [V] 'V2', # Store voltage of RC-2nd of cell [V] 'Vcell', # Store cell voltage [V] 'battery_SOC', # Store cell SOC [%] ) self.ncells = self.battery_capacity/(self.cn*self.vn)*1000 # number of cells in battery pack
def __init__(self, hps, ob_space, ac_space, nenvs, nsteps_per_seg, nsegs_per_env, nlumps, envs, policy, int_rew_coeff, ext_rew_coeff, record_rollouts, dynamics): self.hps = hps self.nenvs = nenvs self.nsteps_per_seg = nsteps_per_seg self.nsegs_per_env = nsegs_per_env self.nsteps = self.nsteps_per_seg * self.nsegs_per_env self.ob_space = ob_space self.ac_space = ac_space self.nlumps = nlumps self.lump_stride = nenvs // self.nlumps self.envs = envs self.policy = policy self.dynamics = dynamics # self.reward_fun = lambda ext_rew, int_rew: ext_rew_coeff * np.clip(ext_rew, -1., 1.) + int_rew_coeff * int_rew self.reward_fun = lambda ext_rew, int_rew: ext_rew_coeff * ext_rew + int_rew_coeff * int_rew self.buf_vpreds = np.empty((nenvs, self.nsteps), np.float32) # Separate value fcn for intrisic and extrinsic rewards respectively self.buf_vpreds_int = np.empty((nenvs, self.nsteps), np.float32) self.buf_vpreds_ext = np.empty((nenvs, self.nsteps), np.float32) self.buf_nlps = np.empty((nenvs, self.nsteps), np.float32) self.buf_rews = np.empty((nenvs, self.nsteps), np.float32) self.buf_ext_rews = np.empty((nenvs, self.nsteps), np.float32) self.buf_int_rews = np.empty((nenvs, self.nsteps), np.float32) self.buf_acs = np.empty((nenvs, self.nsteps, *self.ac_space.shape), self.ac_space.dtype) self.buf_obs = np.empty((nenvs, self.nsteps, *self.ob_space.shape), self.ob_space.dtype) self.buf_obs_last = np.empty( (nenvs, self.nsegs_per_env, *self.ob_space.shape), np.float32) self.buf_news = np.zeros((nenvs, self.nsteps), np.float32) self.buf_new_last = self.buf_news[:, 0, ...].copy() self.buf_vpred_last = self.buf_vpreds[:, 0, ...].copy() # Separate value fcn for intrisic and extrinsic rewards respectively self.buf_vpred_int_last = self.buf_vpreds_int[:, 0, ...].copy() self.buf_vpred_ext_last = self.buf_vpreds_ext[:, 0, ...].copy() self.env_results = [None] * self.nlumps # self.prev_feat = [None for _ in range(self.nlumps)] # self.prev_acs = [None for _ in range(self.nlumps)] self.int_rew = np.zeros((nenvs, ), np.float32) self.recorder = Recorder( nenvs=self.nenvs, nlumps=self.nlumps) if record_rollouts else None self.statlists = defaultdict(lambda: deque([], maxlen=100)) self.stats = defaultdict(float) self.best_ext_ret = None self.best_eplen = None self.all_visited_rooms = [] self.all_scores = [] self.step_count = 0
def __init__(self): """ Initialize button and recorder * Connect a push button to GPIO 17 and GROUND * OPeration -> Button Pressed -> Start Recording Button Hold -> Record Button Depressed -> Stop Recording and save the file with wave format """ self.button = Button(17, pull_up=True) # For mono channels = 1, for stereo channels = 2 # Optional arguments rate=16000 (Bit Rate), frames_per_buffer=1024*4 (Buffer Size) self.recorderHandler = Recorder(channels=1) self.recfile = None # path where audio files will be saved if os.path.isdir(ROOT_PATH + "/Audio"): print("Audio Directory is Available") else: print("Audio Directory is Unavailable, so that's why creating one") os.makedirs(ROOT_PATH + "/Audio") self.path = ROOT_PATH + "/Audio/" self.button.when_pressed = self.pressed
def record_start(): global recorder_id recorder_id += 1 file_name = 'video' + str(recorder_id) + str(datetime.datetime.now()) + '.avi' recorder = Recorder(camera,file_name, (FRAME_WIDTH,FRAME_HEIGHT)) recorders[recoder_id] = recorder return str(recorder_id)
def init_recording_branch(self): """ Creates the branch, that will be responsible for on demand recording of audio to file :return: Queue element """ # Create a queue element and link it to tee (required by tee) queue = Gst.ElementFactory.make('queue') if config.recording['prerecord']: # Set maximums to about 10 minutes of data - the maximum song duration we expect to prebuffer queue.set_property('max-size-buffers', 20 * 12 * 200) queue.set_property('max-size-bytes', 20 * 12 * 10485760) queue.set_property('max-size-time', 20 * 60 * (10**9)) queue.set_property('min-threshold-time', 10 * 60 * (10**9)) queue.set_property('leaky', 2) self._bin.add(queue) tee_pad = self._tee.get_request_pad("src_%u") queue_pad = queue.get_static_pad('sink') tee_pad.link(queue_pad) # Recording elements are created and managed by RecorderBin object self._recorder = Recorder(self._bin) queue.link(self._recorder.get_src_element()) return queue
def run(): if os.path.exists(PIPE_PATH): subprocess.call(['echo "stop" > ' + PIPE_PATH], shell=True) return os.mkfifo(PIPE_PATH) pipe_fd = lambda: os.fdopen(os.open(PIPE_PATH, os.O_RDONLY | os.O_NONBLOCK)) rec = Recorder(channels=CHANNELS) write_stream = lambda: rec.open(FILE_NAME, 'wb') with write_stream() as record, pipe_fd() as pipe: record.start_recording() print('start capture...') start = time.time() while True: try: elapsed = time.time() - start if elapsed > TIMEOUT: print('timed out') break pipe.read() except IOError: break os.remove(PIPE_PATH) record.stop_recording() print('audio captured!')
def main(args, log): DBusGMainLoop(set_as_default=True) encoder = get_encoder(args.encoder) log.debug("Encoder set to: %s" % encoder) ext = args.extension.strip('.') if args.extension else get_ext( args.encoder) log.debug("Using extension: .%s" % ext) recorder = Recorder(log, get_sink_by_name(args.program), encoder, ext) message_filter = partial(notifications, recorder, log) bus = dbus.SessionBus() bus.add_match_string_non_blocking( "interface='org.freedesktop.DBus.Properties'") bus.add_message_filter(message_filter) def signal_handler(signal, frame): log.info('Bye bye!') recorder.save() sys.exit(0) signal.signal(signal.SIGINT, signal_handler) mainloop = glib.MainLoop() try: mainloop.run() except KeyboardInterrupt: log.info('Shutting down')
def __init__(self, basedir=None): """ Auto detect for the settings, and guide for initialization if needed. Settings are: base_dir : base directory book_name : book name book_file : book file base name or abs path end_page : last page number of the book log_file : log db base name note_file : note db base name errata_file: errata db base name init_done : flag to signified if settings are set In database, store the base name of file, when loaded, the base directory will be added to build a full path, book_file is an exception. """ if basedir and os.path.isdir(basedir): self.base_dir = os.path.normpath(basedir) else: self.base_dir = os.path.dirname(os.path.realpath(__file__)) config_path = os.path.join(self.base_dir, self.config_file) rec = Recorder(config_path) db = rec.opendb() if not db.get('init_done'): self.init(db) rec.persist() self.load(db) self.logger = Logger(self.log_path) rec.closedb()
def __init__(self, url): self.url = url # 要分析的url self.block_li = [] # 网页所包含的文本块列表 self.title = '' #重置记录 self.recorder = Recorder() self.recorder.reset()
def test(args): print("start test ...") recorder = Recorder(args) autotest.test_time_case(recorder, args) recorder.output(AutoTest.error_log, -1) print("test finish, cases: {0} errors: {1} accuracy: {2}%".format( recorder.case_count, recorder.failed_count, recorder.accuracy * 100.0))
def startPlayback(self, gap): if self.playback != None: return self.playback = LEDBlinker(self.recorder.recordings, gap) self.playback.start() GPIO.output(yellowLED, 1) self.recorder = Recorder( ) # create new recorder; drop old recording and status
def __init__(self, solve_continuous=False, **kwargs): super(Control, self).__init__(**kwargs) self.DOF = 2 # task space dimensionality self.u = None self.solve_continuous = solve_continuous if self.write_to_file is True: from recorder import Recorder # set up recorders self.u_recorder = Recorder('control signal', self.task, 'lqr') self.xy_recorder = Recorder('end-effector position', self.task, 'lqr') self.dist_recorder = Recorder('distance from target', self.task, 'lqr') self.recorders = [self.u_recorder, self.xy_recorder, self.dist_recorder]
def __init__(self, parent=None, bitrate=pyaudio.paInt16, sample_rate=48000, num_channels=2): QThread.__init__(self, parent) self._recorder = Recorder(bitrate, sample_rate, num_channels)
def __init__(self, ob_space, ac_space, nenvs, nsteps_per_seg, nsegs_per_env, nlumps, envs, policy, int_rew_coeff, ext_rew_coeff, record_rollouts, dynamics,patience, nthe): self.nenvs = nenvs self.nsteps_per_seg = nsteps_per_seg self.nsegs_per_env = nsegs_per_env self.nsteps = self.nsteps_per_seg * self.nsegs_per_env self.ob_space = ob_space self.ac_space = ac_space self.nlumps = nlumps self.lump_stride = nenvs // self.nlumps self.envs = envs self.policy = policy self.dynamics = dynamics self.patience = patience self.reward_fun = lambda ext_rew, int_rew: ext_rew_coeff * np.clip(ext_rew, -1., 1.) + int_rew_coeff * int_rew self.buf_vpreds = np.empty((nenvs, self.nsteps), np.float32) self.buf_nlps = np.empty((nenvs, self.nsteps), np.float32) self.buf_rews = np.empty((nenvs, self.nsteps), np.float32) self.buf_ext_rews = np.empty((nenvs, self.nsteps), np.float32) self.buf_acs = np.empty((nenvs, self.nsteps, *self.ac_space.shape), self.ac_space.dtype) self.buf_obs = np.empty((nenvs, self.nsteps, *self.ob_space.shape), self.ob_space.dtype) self.buf_obs_last = np.empty((nenvs, self.nsegs_per_env, *self.ob_space.shape), np.float32) self.buf_news = np.zeros((nenvs, self.nsteps), np.float32) self.buf_new_last = self.buf_news[:, 0, ...].copy() self.buf_vpred_last = self.buf_vpreds[:, 0, ...].copy() self.env_results = [None] * self.nlumps # self.prev_feat = [None for _ in range(self.nlumps)] # self.prev_acs = [None for _ in range(self.nlumps)] self.int_rew = np.zeros((nenvs,), np.float32) self.recorder = Recorder(nenvs=self.nenvs, nlumps=self.nlumps) if record_rollouts else None self.statlists = defaultdict(lambda: deque([], maxlen=100)) self.stats = defaultdict(float) self.best_ext_ret = None self.all_visited_rooms = [] self.all_scores = [] self.step_count = 0 ######################################################### self.ac_buf = self.int_rew = np.zeros((nenvs,), np.float32) self.buf_acs_nold = np.empty((nenvs, self.nsteps, *self.ac_space.shape), self.ac_space.dtype) self.ac_buf_nold = np.empty((nenvs, self.nsteps, *self.ac_space.shape), self.ac_space.dtype) self.nthe = nthe self.ac_list = [] self.rew_list = [] self.obs_list = [] self.feat_list = [] self.patience_pred = 0 self.pat = 0 self.mean = np.zeros((128,128), np.float32) self.var = np.ones((128,128), np.float32) self.model = 0 self.flag = 1 self.flag2 = 1
def record_audio(self): rec = Recorder(channels=1, rate=16000) with rec.open(self.current_directory + '/intent.wav', 'wb') as recfile2: recfile2.start_recording() rospy.loginfo("Speak") time.sleep(5.0) recfile2.stop_recording() rospy.loginfo("End of file")
def __init__(self): self.window = Tk() self.window.title("Wellcome To Long's App") self.window.geometry('350x200') self.recorder = Recorder() file = open("text.txt", mode="r", encoding="utf8") self.article = file.read() self.count = 1 file.close()