def control_loop(): '''Main loop, updating the capture agent state. ''' set_service_status(Service.AGENTSTATE, ServiceStatus.BUSY) while not terminate(): update_agent_state() next_update = timestamp() + config()['agent']['update_frequency'] while not terminate() and timestamp() < next_update: time.sleep(0.1) logger.info('Shutting down agentstate service') set_service_status(Service.AGENTSTATE, ServiceStatus.STOPPED)
def start_capture(upcoming_event): '''Start the capture process, creating all necessary files and directories as well as ingesting the captured files if no backup mode is configured. ''' logger.info('Start recording') # First move event to recording_event table db = get_session() event = db.query(RecordedEvent)\ .filter(RecordedEvent.uid == upcoming_event.uid)\ .filter(RecordedEvent.start == upcoming_event.start)\ .first() if not event: event = RecordedEvent(upcoming_event) db.add(event) db.commit() duration = event.end - timestamp() try_mkdir(config()['capture']['directory']) os.mkdir(event.directory()) # Set state set_service_status_immediate(Service.CAPTURE, ServiceStatus.BUSY) recording_state(event.uid, 'capturing') update_event_status(event, Status.RECORDING) # Recording tracks = recording_command(event.directory(), event.name(), duration) event.set_tracks(tracks) db.commit() # Set status set_service_status_immediate(Service.CAPTURE, ServiceStatus.IDLE) update_event_status(event, Status.FINISHED_RECORDING)
def control_loop(): '''Main loop of the capture agent, retrieving and checking the schedule as well as starting the capture process if necessry. ''' set_service_status(Service.CAPTURE, ServiceStatus.IDLE) while not terminate(): # Get next recording event = get_session().query(UpcomingEvent)\ .filter(UpcomingEvent.start <= timestamp())\ .filter(UpcomingEvent.end > timestamp())\ .first() if event: safe_start_capture(event) time.sleep(1.0) logger.info('Shutting down capture service') set_service_status(Service.CAPTURE, ServiceStatus.STOPPED)
def control_loop(): '''Main loop, retrieving the schedule. ''' set_service_status(Service.SCHEDULE, ServiceStatus.BUSY) while not terminate(): # Try getting an updated schedule get_schedule() q = get_session().query(UpcomingEvent)\ .filter(UpcomingEvent.end > timestamp()) if q.count(): logger.info('Next scheduled recording: %s', datetime.fromtimestamp(q[0].start)) else: logger.info('No scheduled recording') next_update = timestamp() + config()['agent']['update_frequency'] while not terminate() and timestamp() < next_update: time.sleep(0.1) logger.info('Shutting down schedule service') set_service_status(Service.SCHEDULE, ServiceStatus.STOPPED)
def get_schedule(): '''Try to load schedule from the Matterhorn core. Returns a valid schedule or None on failure. ''' params = {'agentid': config('agent', 'name').encode('utf8')} lookahead = config('agent', 'cal_lookahead') * 24 * 60 * 60 if lookahead: params['cutoff'] = str((timestamp() + lookahead) * 1000) uri = '%s/calendars?%s' % (service('scheduler')[0], urlencode(params)) try: vcal = http_request(uri) UpstreamState.update_sync_time(config('server', 'url')) except pycurl.error as e: logger.error('Could not get schedule: %s', e) return try: cal = parse_ical(vcal.decode('utf-8')) except Exception: logger.error('Could not parse ical') logger.error(traceback.format_exc()) return db = get_session() db.query(UpcomingEvent).delete() for event in cal: # Ignore events that have already ended if event['dtend'] <= timestamp(): continue e = UpcomingEvent() e.start = event['dtstart'] e.end = event['dtend'] e.uid = event.get('uid') e.title = event.get('summary') e.set_data(event) db.add(e) db.commit()
def get_schedule(): '''Try to load schedule from the Matterhorn core. Returns a valid schedule or None on failure. ''' try: uri = '%s/calendars?agentid=%s' % (config()['service-scheduler'][0], config()['agent']['name']) lookahead = config()['agent']['cal_lookahead'] * 24 * 60 * 60 if lookahead: uri += '&cutoff=%i' % ((timestamp() + lookahead) * 1000) vcal = http_request(uri) except Exception as e: # Silently ignore the error if the capture agent is not yet registered if e.args[1] != 404: logger.error('Could not get schedule') logger.error(traceback.format_exc()) return try: cal = parse_ical(vcal.decode('utf-8')) except Exception: logger.error('Could not parse ical') logger.error(traceback.format_exc()) return db = get_session() db.query(UpcomingEvent).delete() for event in cal: # Ignore events that have already ended if event['dtend'] <= timestamp(): continue e = UpcomingEvent() e.start = event['dtstart'] e.end = event['dtend'] e.uid = event.get('uid') e.set_data(event) db.add(e) db.commit()
def setUp(self): ingest.http_request = lambda x, y=False: b'xxx' self.fd, self.dbfile = tempfile.mkstemp() self.cadir = tempfile.mkdtemp() config.config('agent')['database'] = 'sqlite:///' + self.dbfile config.config('capture')['directory'] = self.cadir config.config()['services']['org.opencastproject.ingest'] = [''] config.config()['services']['org.opencastproject.capture.admin'] = [''] # Mock event db.init() event = db.RecordedEvent() event.uid = '123123' event.status = db.Status.FINISHED_RECORDING event.start = utils.timestamp() event.end = event.start + 1 prop = 'org.opencastproject.capture.agent.properties' dcns = 'http://www.opencastproject.org/xsd/1.0/dublincore/' data = [{ 'data': u'äü%sÄÜß' % dcns, 'fmttype': 'application/xml', 'x-apple-filename': 'episode.xml' }, { 'data': u'äü%sÄÜß' % dcns, 'fmttype': 'application/xml', 'x-apple-filename': 'series.xml' }, { 'data': u'event.title=äüÄÜß\n' + u'org.opencastproject.workflow.config.x=123\n' + u'org.opencastproject.workflow.definition=fast', 'fmttype': 'application/text', 'x-apple-filename': prop }] event.set_data({'attach': data}) # Create recording os.mkdir(event.directory()) trackfile = os.path.join(event.directory(), 'test.mp4') open(trackfile, 'wb').close() event.set_tracks([('presenter/source', trackfile)]) session = db.get_session() session.add(event) session.commit() self.event = db.RecordedEvent(event)
def logs(): '''Serve a JSON representation of logs. ''' cmd = config('ui', 'log_command') if not cmd: return make_error_response('Logs are disabled.', 404) logs = subprocess.run(cmd, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\ .stdout\ .decode('utf-8')\ .rstrip()\ .split('\n') return make_data_response({ 'id': str(timestamp()), 'type': 'logs', 'attributes': { 'lines': logs } })
def setUp(self): utils.http_request = lambda x, y=False: b'xxx' self.fd, self.dbfile = tempfile.mkstemp() self.cadir = tempfile.mkdtemp() preview = os.path.join(self.cadir, 'preview.png') open(preview, 'a').close() config.config()['agent']['database'] = 'sqlite:///' + self.dbfile config.config()['capture']['command'] = 'touch {{dir}}/{{name}}.mp4' config.config()['capture']['directory'] = self.cadir config.config()['capture']['preview'] = [preview] config.config()['services']['org.opencastproject.capture.admin'] = [''] # Mock event db.init() self.event = db.BaseEvent() self.event.uid = '123123' self.event.title = u'äüÄÜß' self.event.start = utils.timestamp() self.event.end = self.event.start self.event.status = db.Status.UPCOMING data = [{ 'data': u'äüÄÜß', 'fmttype': 'application/xml', 'x-apple-filename': 'episode.xml' }, { 'data': u'äüÄÜß', 'fmttype': 'application/xml', 'x-apple-filename': 'series.xml' }, { 'data': u'event.title=äüÄÜß\n' + u'org.opencastproject.workflow.config.x=123\n' + u'org.opencastproject.workflow.definition=fast', 'fmttype': 'application/text', 'x-apple-filename': 'org.opencastproject.capture.agent' + '.properties' }] self.event.set_data({'attach': data})
def setUp(self): utils.http_request = lambda x, y=False: b'xxx' ingest.http_request = lambda x, y=False: b'xxx' self.fd, self.dbfile = tempfile.mkstemp() self.cadir = tempfile.mkdtemp() config.config()['agent']['database'] = 'sqlite:///' + self.dbfile config.config()['capture']['directory'] = self.cadir config.config()['service-ingest'] = [''] config.config()['service-capture.admin'] = [''] # Mock event db.init() self.event = db.RecordedEvent() self.event.uid = '123123' self.event.start = utils.timestamp() self.event.end = self.event.start + 1 data = [{'data': u'äüÄÜß', 'fmttype': 'application/xml', 'x-apple-filename': 'episode.xml'}, {'data': u'äüÄÜß', 'fmttype': 'application/xml', 'x-apple-filename': 'series.xml'}, {'data': u'event.title=äüÄÜß\n' + u'org.opencastproject.workflow.config.x=123\n' + u'org.opencastproject.workflow.definition=fast', 'fmttype': 'application/text', 'x-apple-filename': 'org.opencastproject.capture.agent' + '.properties'}] self.event.set_data({'attach': data}) # Create recording os.mkdir(self.event.directory()) trackfile = os.path.join(self.event.directory(), 'test.mp4') with open(trackfile, 'wb') as f: f.write(b'123') self.event.set_tracks([('presenter/source', trackfile)])
def recording_command(event): '''Run the actual command to record the a/v material. ''' conf = config('capture') # Prepare command line cmd = conf['command'] cmd = cmd.replace('{{time}}', str(event.remaining_duration(timestamp()))) cmd = cmd.replace('{{dir}}', event.directory()) cmd = cmd.replace('{{name}}', event.name()) cmd = cmd.replace('{{previewdir}}', conf['preview_dir']) # Signal configuration sigterm_time = conf['sigterm_time'] sigkill_time = conf['sigkill_time'] sigcustom_time = conf['sigcustom_time'] sigcustom_time = 0 if sigcustom_time < 0 else event.end + sigcustom_time sigterm_time = 0 if sigterm_time < 0 else event.end + sigterm_time sigkill_time = 0 if sigkill_time < 0 else event.end + sigkill_time # Launch capture command logger.info(cmd) args = shlex.split(cmd) DEVNULL = getattr(subprocess, 'DEVNULL', os.open(os.devnull, os.O_RDWR)) captureproc = subprocess.Popen(args, stdin=DEVNULL) hasattr(subprocess, 'DEVNULL') or os.close(DEVNULL) # Set systemd status notify.notify('STATUS=Capturing') # Check process while captureproc.poll() is None: notify.notify('WATCHDOG=1') if sigcustom_time and timestamp() > sigcustom_time: logger.info("Sending custom signal to capture process") captureproc.send_signal(conf['sigcustom']) sigcustom_time = 0 # send only once if sigterm_time and timestamp() > sigterm_time: logger.info("Terminating capture process") captureproc.terminate() sigterm_time = 0 # send only once elif sigkill_time and timestamp() > sigkill_time: logger.warning("Killing capture process") captureproc.kill() sigkill_time = 0 # send only once time.sleep(0.1) # Remove preview files: for preview in conf['preview']: try: os.remove(preview.replace('{{previewdir}}', conf['preview_dir'])) except OSError: logger.warning('Could not remove preview files') logger.warning(traceback.format_exc()) # Check process for errors exitcode = config()['capture']['exit_code'] if captureproc.poll() > 0 and captureproc.returncode != exitcode: raise RuntimeError('Recording failed (%i)' % captureproc.returncode) # Reset systemd status notify.notify('STATUS=Waiting') # Return [(flavor,path),…] files = (f.replace('{{dir}}', event.directory()) for f in conf['files']) files = (f.replace('{{name}}', event.name()) for f in files) return list(zip(conf['flavors'], files))
def recording_command(event): '''Run the actual command to record the a/v material. ''' conf = config('capture') # Prepare command line cmd = conf['command'] cmd = cmd.replace('{{time}}', str(event.remaining_duration(timestamp()))) cmd = cmd.replace('{{dir}}', event.directory()) cmd = cmd.replace('{{name}}', event.name()) cmd = cmd.replace('{{previewdir}}', conf['preview_dir']) # Parse files into list files = (f.replace('{{dir}}', event.directory()) for f in conf['files']) files = [f.replace('{{name}}', event.name()) for f in files] # Move existing files from previous failed recordings for f in files: if not os.path.exists(f): continue # New filename i = 0 while True: new_filename = f'{f}-part-{i}' if not os.path.exists(new_filename): break i += 1 # Move file os.rename(f, new_filename) logger.warning("Moved file %s to %s to keep it", f, new_filename) # Signal configuration sigterm_time = conf['sigterm_time'] sigkill_time = conf['sigkill_time'] sigcustom_time = conf['sigcustom_time'] sigcustom_time = 0 if sigcustom_time < 0 else event.end + sigcustom_time sigterm_time = 0 if sigterm_time < 0 else event.end + sigterm_time sigkill_time = 0 if sigkill_time < 0 else event.end + sigkill_time # Launch capture command logger.info(cmd) args = shlex.split(cmd) DEVNULL = getattr(subprocess, 'DEVNULL', os.open(os.devnull, os.O_RDWR)) captureproc = subprocess.Popen(args, stdin=DEVNULL) hasattr(subprocess, 'DEVNULL') or os.close(DEVNULL) # Set systemd status notify.notify('STATUS=Capturing') # Check process while captureproc.poll() is None: notify.notify('WATCHDOG=1') if sigcustom_time and timestamp() > sigcustom_time: logger.info("Sending custom signal to capture process") captureproc.send_signal(conf['sigcustom']) sigcustom_time = 0 # send only once if sigterm_time and timestamp() > sigterm_time: logger.info("Terminating capture process") captureproc.terminate() sigterm_time = 0 # send only once elif sigkill_time and timestamp() > sigkill_time: logger.warning("Killing capture process") captureproc.kill() sigkill_time = 0 # send only once time.sleep(0.1) # Remove preview files: for preview in conf['preview']: try: os.remove(preview.replace('{{previewdir}}', conf['preview_dir'])) except OSError: logger.warning('Could not remove preview files', exc_info=True) # Check process for errors exitcode = conf['exit_code'] if captureproc.poll() > 0 and captureproc.returncode != exitcode: raise RuntimeError('Recording failed (%i)' % captureproc.returncode) # Reset systemd status notify.notify('STATUS=Waiting') # files return files