def setup_video(self, video_mapping, with_extras=None, video_callback=None): logging.debug("setting up video #%d (%s)" % (video_mapping.index, video_mapping.src)) # load video into omxplayer on fifo. this will block. start_daemon(video_mapping.d_files) setup_cmd = self.OMX_CMD['setup'] if with_extras is not None: setup_cmd = setup_cmd.replace("-I", "-I %s " % " ".join(\ ["--%s %s" % (e, with_extras[e]) for e in with_extras.keys()])) logging.debug("setup command: %s" % setup_cmd) p = Popen(setup_cmd % (video_mapping.src, video_mapping.fifo), \ shell=True, stdout=PIPE, stderr=STDOUT) while True: duration_line = re.findall(r'Duration\:\s+(.*),.*', p.stdout.readline()) if len(duration_line) == 1: duration_str = duration_line[0].split(",")[0] duration = { 'millis' : time_str_to_millis(duration_str), 'str' : duration_str } if video_callback is not None: video_callback({'index' : video_mapping.index, \ 'info' : {'duration' : duration}}) break stop_daemon(video_mapping.d_files)
def _parse_typing_notify(self, frm): try: self.__composing_container.remove(frm) except ValueError: pass time.sleep(0.2) self.mmp_handler_composing_start(frm) self.__composing_container.append(frm) utils.start_daemon(self._typing_notifier, (frm,))
def test_queued_runs(tmpdir, foo_example_repo): dagster_home_path = tmpdir.strpath with setup_instance( dagster_home_path, """run_coordinator: module: dagster.core.run_coordinator class: QueuedRunCoordinator config: dequeue_interval_seconds: 1 """, ) as instance: with start_daemon(): external_pipeline = foo_example_repo.get_full_external_pipeline( "foo_pipeline") run = create_run(instance, external_pipeline) instance.submit_run(run.run_id, external_pipeline) poll_for_finished_run(instance, run.run_id) logs = instance.all_logs(run.run_id) assert_events_in_order( logs, [ "PIPELINE_ENQUEUED", "PIPELINE_DEQUEUED", "PIPELINE_STARTING", "PIPELINE_START", "PIPELINE_SUCCESS", ], )
def test_queue_from_schedule_and_sensor(instance, foo_example_workspace, foo_example_repo): external_schedule = foo_example_repo.get_external_schedule("always_run_schedule") external_sensor = foo_example_repo.get_external_sensor("always_on_sensor") external_pipeline = foo_example_repo.get_full_external_pipeline("foo_pipeline") instance.start_schedule_and_update_storage_state(external_schedule) instance.start_sensor(external_sensor) with start_daemon(timeout=180): run = create_run(instance, external_pipeline) instance.submit_run(run.run_id, foo_example_workspace) runs = [ poll_for_finished_run(instance, run.run_id), poll_for_finished_run(instance, run_tags=PipelineRun.tags_for_sensor(external_sensor)), poll_for_finished_run( instance, run_tags=PipelineRun.tags_for_schedule(external_schedule), timeout=90, ), ] for run in runs: logs = instance.all_logs(run.run_id) assert_events_in_order( logs, [ "PIPELINE_ENQUEUED", "PIPELINE_DEQUEUED", "PIPELINE_STARTING", "PIPELINE_START", "PIPELINE_SUCCESS", ], )
def start_api(self): """Starts API, initializes the redis database, and daemonizes all processes so they may be restarted or stopped. """ self.db.set('MODE', RESPOND_MODE) tornado.web.Application.__init__(self, self.routes) server = tornado.httpserver.HTTPServer(self) try: server.bind(self.conf['api_port']) except Exception as e: import re from vars import KILL_RX, NO_KILL_RX with settings(hide('everything'), warn_only=True): print "killing whatever is on port %d" % self.conf['api_port'] kill_list = local("ps -ef | grep %s.py" % self.conf['rpi_id'], capture=True) for k in [k.strip() for k in kill_list.splitlines()]: for r in NO_KILL_RX: if re.match(r, k) is not None: continue # TODO: does this work on rpi, too? (should...) has_tty = [t for t in k.split(" ") if len(t) != 0][5] if re.match(r'pts/\d+', has_tty): continue pid = re.findall(re.compile(KILL_RX % self.conf['rpi_id']), k) if len(pid) == 1 and len(pid[0]) >= 1: try: pid = int(pid[0]) except Exception as e: continue local("kill -9 %d" % pid) server.bind(self.conf['api_port']) start_daemon(self.conf['d_files']['api']) server.start(self.conf['num_processes']) tornado.ioloop.IOLoop.instance().start()
def start_audio_pad(self): start_daemon(self.conf["d_files"]["audio"]) with settings(warn_only=True): local("rm ~/.asoundrc") local("ln -s %s ~/.asoundrc" % os.path.join(BASE_DIR, "core", "lib", "alsa-config", "asoundrc")) self.restore_audio() audio_receiver = self.db.pubsub() audio_receiver.subscribe(["audio_receiver"]) pygame.mixer.pre_init(frequency=RATE, size=ENDIAN, channels=2, buffer=AUDIO_BIN_SIZE) pygame.init() while True: for command in audio_receiver.listen(): if not command["data"]: continue try: command = json.loads(command["data"]) except Exception as e: continue print "COMMAND: ", command res = {"command": command, "ok": False} if "press" in command.keys(): res["ok"] = self.press(command["press"]) elif "play" in command.keys(): res["ok"] = self.play(command["play"], interruptable=command["interruptable"]) elif "start_recording" in command.keys(): res["ok"] = self.start_recording(command["start_recording"]) elif "stop_recording" in command.keys(): res["ok"] = self.stop_recording() elif "stop_audio" in command.keys(): res["ok"] = self.stop_audio() self.db.publish("audio_responder", json.dumps(res))
def test_heartbeat(): with instance_for_test() as instance: assert all_daemons_healthy(instance) is False with start_daemon(): time.sleep(5) assert all_daemons_healthy(instance) is True frozen_datetime = pendulum.now().add( seconds=DEFAULT_HEARTBEAT_INTERVAL_SECONDS + DEFAULT_DAEMON_HEARTBEAT_TOLERANCE_SECONDS + 5) with pendulum.test(frozen_datetime): assert all_daemons_healthy(instance) is False
def test_queue_from_schedule_and_sensor(tmpdir, foo_example_repo): dagster_home_path = tmpdir.strpath with setup_instance( dagster_home_path, """run_coordinator: module: dagster.core.run_coordinator class: QueuedRunCoordinator config: dequeue_interval_seconds: 1 """, ) as instance: external_schedule = foo_example_repo.get_external_schedule( "always_run_schedule") external_sensor = foo_example_repo.get_external_sensor( "always_on_sensor") external_pipeline = foo_example_repo.get_full_external_pipeline( "foo_pipeline") instance.start_schedule_and_update_storage_state(external_schedule) instance.start_sensor(external_sensor) with start_daemon(timeout=180): run = create_run(instance, external_pipeline) instance.submit_run(run.run_id, external_pipeline) runs = [ poll_for_finished_run(instance, run.run_id), poll_for_finished_run( instance, run_tags=PipelineRun.tags_for_sensor(external_sensor)), poll_for_finished_run( instance, run_tags=PipelineRun.tags_for_schedule(external_schedule), timeout=90, ), ] for run in runs: logs = instance.all_logs(run.run_id) assert_events_in_order( logs, [ "PIPELINE_ENQUEUED", "PIPELINE_DEQUEUED", "PIPELINE_STARTING", "PIPELINE_START", "PIPELINE_SUCCESS", ], )
def test_heartbeat(tmpdir, ): dagster_home_path = tmpdir.strpath with setup_instance(dagster_home_path, "") as instance: assert all_daemons_healthy(instance) is False with start_daemon(): time.sleep(5) assert all_daemons_healthy(instance) is True frozen_datetime = pendulum.now().add( seconds=DEFAULT_HEARTBEAT_INTERVAL_SECONDS + DEFAULT_DAEMON_HEARTBEAT_TOLERANCE_SECONDS + 5) with pendulum.test(frozen_datetime): assert all_daemons_healthy(instance) is False
def send_sms(email, passwd, phone_number, text): if type(text) != type(unicode()): raise UnicodeError logger = logger_init('critical') mmp = MMPConnection(email, passwd, logger) daemon = utils.start_daemon(asyncore_loop, (), 'asyncore_loop') time.sleep(5) # без таймаута оно бывает глючит run = 1 send = 0 while run: if mmp.state == "closed": run = 0 break if mmp.state == "session_established": if in_contacs(mmp, phone_number): try: mmp.mmp_send_sms(phone_number, text.encode("cp1251")) except: run = 0 break send = 1 run = 0 break else: try: mmp.mmp_add_sms_contact(phone_number[1:]) except: run = 0 break time.sleep(3) try: mmp.mmp_send_sms(phone_number, text.encode("cp1251")) except: run = 0 break send = 1 run = 0 break mmp.close() return send
def ask_daemon(view, callback, ask_type, location=None): logger.info('JEDI ask daemon for "{0}"'.format(ask_type)) window_id = view.window().id() if window_id not in DAEMONS: # there is no api to get current project's name # so force user to enter it in settings or use first folder in project first_folder = '' if view.window().folders(): first_folder = os.path.split(view.window().folders()[0])[-1] project_name = get_settings_param( view, 'project_name', first_folder, ) daemon = start_daemon( window_id=window_id, interp=get_settings_param(view, 'python_interpreter_path', 'python'), extra_packages=get_settings_param(view, 'python_package_paths', []), project_name=project_name, complete_funcargs=get_settings_param(view, 'auto_complete_function_params', 'all'), ) DAEMONS[window_id] = daemon if location is None: location = view.sel()[0].begin() current_line, current_column = view.rowcol(location) source = view.substr(sublime.Region(0, view.size())) if PY3: uuid = uuid1().hex else: uuid = uuid1().get_hex() data = { 'source': source, 'line': current_line + 1, 'offset': current_column, 'filename': view.file_name() or '', 'type': ask_type, 'uuid': uuid, } DAEMONS[window_id].stdin.put_nowait((callback, data))
def test_queued_runs(instance, foo_example_workspace, foo_example_repo): with start_daemon(): external_pipeline = foo_example_repo.get_full_external_pipeline("foo_pipeline") run = create_run(instance, external_pipeline) instance.submit_run(run.run_id, foo_example_workspace) poll_for_finished_run(instance, run.run_id) logs = instance.all_logs(run.run_id) assert_events_in_order( logs, [ "PIPELINE_ENQUEUED", "PIPELINE_DEQUEUED", "PIPELINE_STARTING", "PIPELINE_START", "PIPELINE_SUCCESS", ], )
def run_script(self): start_daemon(self.conf['d_files']['module'])
def mmp_send_avatar_request(self, mail, ackf, acka={}): utils.start_daemon(self._get_avatar, (mail, ackf, acka))
def mmp_send_wp_request(self, fields, ackf=None, acka={}, add=False): self.wp_req_pool.append([fields,ackf,acka,add]) if len(self.wp_req_pool) == 1: utils.start_daemon(self._send_ws_req, (), 'anketa')