def main(): # Check command line options try: opts, args = getopt.getopt(sys.argv[1:], 'hc:', ['help', 'config=']) except getopt.GetoptError: usage(1) cfg = None for opt, arg in opts: if opt in ("-h", "--help"): usage() if opt in ('-c', '--config'): cfg = arg break # Make sure we got only one command if len(args) > 1: usage(2) try: config.update_configuration(cfg) except ValueError as e: print(str(e)) sys.exit(4) # Set signal handler signal.signal(signal.SIGINT, sigint_handler) signal.signal(signal.SIGTERM, sigterm_handler) # Get the command with `run` as default cmd = (args + ['run'])[0] if cmd == 'run': # ensure database is created first get_session().close() run_all(schedule, capture, ingest, agentstate) elif cmd == 'all': get_session().close() signal.signal(signal.SIGINT, signal.default_int_handler) multiprocessing.Process( target=ui.app.run, kwargs={'threaded': False} ).start() run_all(schedule, capture, ingest, agentstate) elif cmd == 'schedule': schedule.run() elif cmd == 'capture': capture.run() elif cmd == 'ingest': ingest.run() elif cmd == 'agentstate': agentstate.run() elif cmd == 'ui': signal.signal(signal.SIGINT, signal.default_int_handler) ui.app.run(threaded=False) else: # Invalid command usage(3)
def test_get_schedule(self): # Failed request schedule.http_request = should_fail schedule.get_schedule() assert not db.get_session().query(db.UpcomingEvent).count() # Failed parsing ical schedule.http_request = lambda x: ShouldFailException schedule.get_schedule() assert not db.get_session().query(db.UpcomingEvent).count() # Get schedule schedule.http_request = lambda x: self.VCAL schedule.get_schedule() assert db.get_session().query(db.UpcomingEvent).count()
def start_capture(upcoming_event): '''Start the capture process, creating all necessary files and directories as well as ingesting the captured files if no backup mode is configured. ''' logger.info('Start recording') # First move event to recording_event table db = get_session() event = db.query(RecordedEvent)\ .filter(RecordedEvent.uid == upcoming_event.uid)\ .filter(RecordedEvent.start == upcoming_event.start)\ .first() if not event: event = RecordedEvent(upcoming_event) db.add(event) db.commit() try_mkdir(config()['capture']['directory']) os.mkdir(event.directory()) # Set state update_event_status(event, Status.RECORDING) recording_state(event.uid, 'capturing') set_service_status_immediate(Service.CAPTURE, ServiceStatus.BUSY) # Recording tracks = recording_command(event) event.set_tracks(tracks) db.commit() # Set status update_event_status(event, Status.FINISHED_RECORDING) recording_state(event.uid, 'capture_finished') set_service_status_immediate(Service.CAPTURE, ServiceStatus.IDLE)
def start_capture(upcoming_event): '''Start the capture process, creating all necessary files and directories as well as ingesting the captured files if no backup mode is configured. ''' logger.info('Start recording') # First move event to recording_event table db = get_session() event = db.query(RecordedEvent)\ .filter(RecordedEvent.uid == upcoming_event.uid)\ .filter(RecordedEvent.start == upcoming_event.start)\ .first() if not event: event = RecordedEvent(upcoming_event) db.add(event) db.commit() duration = event.end - timestamp() try_mkdir(config()['capture']['directory']) os.mkdir(event.directory()) # Set state set_service_status_immediate(Service.CAPTURE, ServiceStatus.BUSY) recording_state(event.uid, 'capturing') update_event_status(event, Status.RECORDING) # Recording tracks = recording_command(event.directory(), event.name(), duration) event.set_tracks(tracks) db.commit() # Set status set_service_status_immediate(Service.CAPTURE, ServiceStatus.IDLE) update_event_status(event, Status.FINISHED_RECORDING)
def modify_event(uid): '''Modify an event specified by its uid. The modifications for the event are expected as JSON with the content type correctly set in the request. Note that this method works for recorded events only. Upcoming events part of the scheduler cache cannot be modified. ''' try: data = request.get_json()['data'][0] if data['type'] != 'event' or data['id'] != uid: return make_error_response('Invalid data', 400) # Check attributes for key in data['attributes'].keys(): if key not in ('status', 'start', 'end'): return make_error_response('Invalid data', 400) # Check new status new_status = data['attributes'].get('status') if new_status: new_status = new_status.upper().replace(' ', '_') data['attributes']['status'] = int(getattr(Status, new_status)) except Exception: return make_error_response('Invalid data', 400) db = get_session() event = db.query(RecordedEvent).filter(RecordedEvent.uid == uid).first() if not event: return make_error_response('No event with specified uid', 404) event.start = data['attributes'].get('start', event.start) event.end = data['attributes'].get('end', event.end) event.status = data['attributes'].get('status', event.status) logger.debug('Updating event %s via api', uid) db.commit() return make_data_response(event.serialize())
def control_loop(): '''Main loop, retrieving the schedule. ''' set_service_status_immediate(Service.SCHEDULE, ServiceStatus.BUSY) notify.notify('READY=1') while not terminate(): notify.notify('WATCHDOG=1') # Try getting an updated schedule get_schedule() session = get_session() next_event = session.query(UpcomingEvent)\ .filter(UpcomingEvent.end > timestamp())\ .order_by(UpcomingEvent.start)\ .first() if next_event: logger.info('Next scheduled recording: %s', datetime.fromtimestamp(next_event.start)) notify.notify('STATUS=Next scheduled recording: %s' % datetime.fromtimestamp(next_event.start)) else: logger.info('No scheduled recording') notify.notify('STATUS=No scheduled recording') session.close() next_update = timestamp() + config('agent', 'update_frequency') while not terminate() and timestamp() < next_update: time.sleep(0.1) logger.info('Shutting down schedule service') set_service_status_immediate(Service.SCHEDULE, ServiceStatus.STOPPED)
def get_schedule(): '''Try to load schedule from the Matterhorn core. Returns a valid schedule or None on failure. ''' uri = '%s/calendars?agentid=%s' % (config()['service-scheduler'][0], config()['agent']['name']) lookahead = config()['agent']['cal_lookahead'] * 24 * 60 * 60 if lookahead: uri += '&cutoff=%i' % ((timestamp() + lookahead) * 1000) try: vcal = http_request(uri) except pycurl.error as e: logger.error('Could not get schedule: %s' % e) return try: cal = parse_ical(vcal.decode('utf-8')) except Exception: logger.error('Could not parse ical') logger.error(traceback.format_exc()) return db = get_session() db.query(UpcomingEvent).delete() for event in cal: # Ignore events that have already ended if event['dtend'] <= timestamp(): continue e = UpcomingEvent() e.start = event['dtstart'] e.end = event['dtend'] e.uid = event.get('uid') e.title = event.get('summary') e.set_data(event) db.add(e) db.commit()
def update_event_status(event, status): '''Update the status of a particular event in the database. ''' dbs = db.get_session() dbs.query(db.RecordedEvent).filter(db.RecordedEvent.start == event.start)\ .update({'status': status}) event.status = status dbs.commit()
def get_service_status(service): '''Update the status of a particular service in the database. ''' dbs = db.get_session() srvs = dbs.query(db.ServiceStates).filter(db.ServiceStates.type == service) if srvs.count(): return srvs[0].status return db.ServiceStatus.STOPPED
def event(uid): '''Return a specific events JSON ''' db = get_session() event = db.query(RecordedEvent).filter(RecordedEvent.uid == uid).first() \ or db.query(UpcomingEvent).filter(UpcomingEvent.uid == uid).first() if event: return make_data_response(event.serialize()) return make_error_response('No event with specified uid', 404)
def set_service_status(service, status): '''Update the status of a particular service in the database. ''' srv = db.ServiceStates() srv.type = service srv.status = status dbs = db.get_session() dbs.merge(srv) dbs.commit() dbs.close()
def add_test_event(self): event = db.RecordedEvent() event.uid = '123' event.start = 1 event.end = 2 event.set_data('') event.status = db.Status.FINISHED_UPLOADING session = db.get_session() session.add(event) session.commit() return db.RecordedEvent(event)
def events(): '''Serve a JSON representation of events ''' db = get_session() upcoming_events = db.query(UpcomingEvent)\ .order_by(UpcomingEvent.start) recorded_events = db.query(RecordedEvent)\ .order_by(RecordedEvent.start.desc()) result = [event.serialize() for event in upcoming_events] result += [event.serialize() for event in recorded_events] return make_data_response(result)
def set_service_status(service, status): '''Update the status of a particular service in the database. ''' dbs = db.get_session() s = dbs.query(db.ServiceStates).filter(db.ServiceStates.type == service) if s.count(): s.update({'status': status}) else: srv = db.ServiceStates() srv.type = service srv.status = status dbs.add(srv) dbs.commit()
def control_loop(): '''Main loop of the capture agent, retrieving and checking the schedule as well as starting the capture process if necessry. ''' set_service_status(Service.INGEST, ServiceStatus.IDLE) while not terminate(): # Get next recording events = get_session().query(RecordedEvent)\ .filter(RecordedEvent.status == Status.FINISHED_RECORDING) if events.count(): safe_start_ingest(events[0]) time.sleep(1.0) logger.info('Shutting down ingest service') set_service_status(Service.INGEST, ServiceStatus.STOPPED)
def control_loop(): '''Main loop of the capture agent, retrieving and checking the schedule as well as starting the capture process if necessry. ''' set_service_status(Service.INGEST, ServiceStatus.IDLE) while not terminate(): # Get next recording event = get_session().query(RecordedEvent)\ .filter(RecordedEvent.status == Status.FINISHED_RECORDING).first() if event: safe_start_ingest(event) time.sleep(1.0) logger.info('Shutting down ingest service') set_service_status(Service.INGEST, ServiceStatus.STOPPED)
def control_loop(): '''Main loop of the capture agent, retrieving and checking the schedule as well as starting the capture process if necessry. ''' set_service_status(Service.CAPTURE, ServiceStatus.IDLE) while not terminate(): # Get next recording event = get_session().query(UpcomingEvent)\ .filter(UpcomingEvent.start <= timestamp())\ .filter(UpcomingEvent.end > timestamp())\ .first() if event: safe_start_capture(event) time.sleep(1.0) logger.info('Shutting down capture service') set_service_status(Service.CAPTURE, ServiceStatus.STOPPED)
def setUp(self): ingest.http_request = lambda x, y=False: b'xxx' self.fd, self.dbfile = tempfile.mkstemp() self.cadir = tempfile.mkdtemp() config.config('agent')['database'] = 'sqlite:///' + self.dbfile config.config('capture')['directory'] = self.cadir config.config()['services']['org.opencastproject.ingest'] = [''] config.config()['services']['org.opencastproject.capture.admin'] = [''] # Mock event db.init() event = db.RecordedEvent() event.uid = '123123' event.status = db.Status.FINISHED_RECORDING event.start = utils.timestamp() event.end = event.start + 1 prop = 'org.opencastproject.capture.agent.properties' dcns = 'http://www.opencastproject.org/xsd/1.0/dublincore/' data = [{ 'data': u'äü%sÄÜß' % dcns, 'fmttype': 'application/xml', 'x-apple-filename': 'episode.xml' }, { 'data': u'äü%sÄÜß' % dcns, 'fmttype': 'application/xml', 'x-apple-filename': 'series.xml' }, { 'data': u'event.title=äüÄÜß\n' + u'org.opencastproject.workflow.config.x=123\n' + u'org.opencastproject.workflow.definition=fast', 'fmttype': 'application/text', 'x-apple-filename': prop }] event.set_data({'attach': data}) # Create recording os.mkdir(event.directory()) trackfile = os.path.join(event.directory(), 'test.mp4') open(trackfile, 'wb').close() event.set_tracks([('presenter/source', trackfile)]) session = db.get_session() session.add(event) session.commit() self.event = db.RecordedEvent(event)
def home(): '''Serve the status page of the capture agent. ''' # Get IDs of existing preview images preview = config()['capture']['preview'] previewdir = config()['capture']['preview_dir'] preview = [p.replace('{{previewdir}}', previewdir) for p in preview] preview = zip(preview, range(len(preview))) preview = [p[1] for p in preview if os.path.isfile(p[0])] # Get limits for recording table try: limit_upcoming = int(request.args.get('limit_upcoming', 5)) limit_processed = int(request.args.get('limit_processed', 15)) except ValueError: limit_upcoming = 5 limit_processed = 15 db = get_session() upcoming_events = db.query(UpcomingEvent)\ .order_by(UpcomingEvent.start)\ .limit(limit_upcoming) recorded_events = db.query(RecordedEvent)\ .order_by(RecordedEvent.start.desc())\ .limit(limit_processed) recording = get_service_status(Service.CAPTURE) == ServiceStatus.BUSY uploading = get_service_status(Service.INGEST) == ServiceStatus.BUSY processed = db.query(RecordedEvent).count() upcoming = db.query(UpcomingEvent).count() return render_template('home.html', preview=preview, config=config(), recorded_events=recorded_events, upcoming_events=upcoming_events, recording=recording, uploading=uploading, processed=processed, upcoming=upcoming, limit_upcoming=limit_upcoming, limit_processed=limit_processed, dtfmt=dtfmt)
def start_capture(upcoming_event): '''Start the capture process, creating all necessary files and directories as well as ingesting the captured files if no backup mode is configured. ''' logger.info('Start recording') # First move event to recording_event table db = get_session() event = db.query(RecordedEvent)\ .filter(RecordedEvent.uid == upcoming_event.uid)\ .filter(RecordedEvent.start == upcoming_event.start)\ .first() if not event: event = RecordedEvent(upcoming_event) db.add(event) db.commit() try_mkdir(config('capture', 'directory')) try_mkdir(event.directory()) # Set state update_event_status(event, Status.RECORDING) recording_state(event.uid, 'capturing') set_service_status_immediate(Service.CAPTURE, ServiceStatus.BUSY) # Recording files = recording_command(event) # [(flavor,path),…] event.set_tracks(list(zip(config('capture', 'flavors'), files))) db.commit() # Set status # If part files exist, its an partial recording p = any([glob.glob(f'{f}-part-*') for f in files]) state = Status.PARTIAL_RECORDING if p else Status.FINISHED_RECORDING logger.info("Set %s to %s", event.uid, Status.str(state)) update_event_status(event, state) recording_state(event.uid, 'capture_finished') set_service_status_immediate(Service.CAPTURE, ServiceStatus.IDLE) logger.info('Finished recording')
def control_loop(): '''Main loop, retrieving the schedule. ''' set_service_status(Service.SCHEDULE, ServiceStatus.BUSY) while not terminate(): # Try getting an updated schedule get_schedule() q = get_session().query(UpcomingEvent)\ .filter(UpcomingEvent.end > timestamp()) if q.count(): logger.info('Next scheduled recording: %s', datetime.fromtimestamp(q[0].start)) else: logger.info('No scheduled recording') next_update = timestamp() + config()['agent']['update_frequency'] while not terminate() and timestamp() < next_update: time.sleep(0.1) logger.info('Shutting down schedule service') set_service_status(Service.SCHEDULE, ServiceStatus.STOPPED)
def control_loop(): '''Main loop of the capture agent, retrieving and checking the schedule as well as starting the capture process if necessry. ''' set_service_status_immediate(Service.CAPTURE, ServiceStatus.IDLE) notify.notify('READY=1') notify.notify('STATUS=Waiting') while not terminate(): notify.notify('WATCHDOG=1') # Get next recording session = get_session() event = session.query(UpcomingEvent)\ .filter(UpcomingEvent.start <= timestamp())\ .filter(UpcomingEvent.end > timestamp())\ .first() if event: safe_start_capture(event) session.close() time.sleep(1.0) logger.info('Shutting down capture service') set_service_status(Service.CAPTURE, ServiceStatus.STOPPED)
def home(): '''Serve the status page of the capture agent. ''' # Get IDs of existing preview images preview = config()['capture']['preview'] previewdir = config()['capture']['preview_dir'] preview = [p.replace('{{previewdir}}', previewdir) for p in preview] preview = zip(preview, range(len(preview))) preview = [p[1] for p in preview if os.path.isfile(p[0])] # Get limits for recording table try: limit_upcoming = int(request.args.get('limit_upcoming', 5)) limit_processed = int(request.args.get('limit_processed', 15)) except ValueError: limit_upcoming = 5 limit_processed = 15 db = get_session() upcoming_events = db.query(UpcomingEvent)\ .order_by(UpcomingEvent.start)\ .limit(limit_upcoming) recorded_events = db.query(RecordedEvent)\ .order_by(RecordedEvent.start.desc())\ .limit(limit_processed) recording = get_service_status(Service.CAPTURE) \ == ServiceStatus.BUSY uploading = get_service_status(Service.INGEST) \ == ServiceStatus.BUSY processed = db.query(RecordedEvent).count() upcoming = db.query(UpcomingEvent).count() return render_template('home.html', preview=preview, config=config(), recorded_events=recorded_events, upcoming_events=upcoming_events, recording=recording, uploading=uploading, processed=processed, upcoming=upcoming, limit_upcoming=limit_upcoming, limit_processed=limit_processed, dtfmt=dtfmt)
def delete_event(uid): '''Delete a specific event identified by its uid. Note that only recorded events can be deleted. Events in the buffer for upcoming events are regularly replaced anyway and a manual removal could have unpredictable effects. Use ?hard=true parameter to delete the recorded files on disk as well. Returns 204 if the action was successful. Returns 404 if event does not exist ''' logger.info('deleting event %s via api', uid) db = get_session() events = db.query(RecordedEvent).filter(RecordedEvent.uid == uid) if not events.count(): return make_error_response('No event with specified uid', 404) hard_delete = request.args.get('hard', 'false') if hard_delete == 'true': logger.info('deleting recorded files at %s', events[0].directory()) shutil.rmtree(events[0].directory()) events.delete() db.commit() return make_response('', 204)
def get_schedule(): '''Try to load schedule from the Matterhorn core. Returns a valid schedule or None on failure. ''' params = {'agentid': config('agent', 'name').encode('utf8')} lookahead = config('agent', 'cal_lookahead') * 24 * 60 * 60 if lookahead: params['cutoff'] = str((timestamp() + lookahead) * 1000) uri = '%s/calendars?%s' % (service('scheduler')[0], urlencode(params)) try: vcal = http_request(uri) UpstreamState.update_sync_time(config('server', 'url')) except pycurl.error as e: logger.error('Could not get schedule: %s', e) return try: cal = parse_ical(vcal.decode('utf-8')) except Exception: logger.error('Could not parse ical') logger.error(traceback.format_exc()) return db = get_session() db.query(UpcomingEvent).delete() for event in cal: # Ignore events that have already ended if event['dtend'] <= timestamp(): continue e = UpcomingEvent() e.start = event['dtstart'] e.end = event['dtend'] e.uid = event.get('uid') e.title = event.get('summary') e.set_data(event) db.add(e) db.commit()
def get_schedule(): '''Try to load schedule from the Matterhorn core. Returns a valid schedule or None on failure. ''' try: uri = '%s/calendars?agentid=%s' % (config()['service-scheduler'][0], config()['agent']['name']) lookahead = config()['agent']['cal_lookahead'] * 24 * 60 * 60 if lookahead: uri += '&cutoff=%i' % ((timestamp() + lookahead) * 1000) vcal = http_request(uri) except Exception as e: # Silently ignore the error if the capture agent is not yet registered if e.args[1] != 404: logger.error('Could not get schedule') logger.error(traceback.format_exc()) return try: cal = parse_ical(vcal.decode('utf-8')) except Exception: logger.error('Could not parse ical') logger.error(traceback.format_exc()) return db = get_session() db.query(UpcomingEvent).delete() for event in cal: # Ignore events that have already ended if event['dtend'] <= timestamp(): continue e = UpcomingEvent() e.start = event['dtstart'] e.end = event['dtend'] e.uid = event.get('uid') e.set_data(event) db.add(e) db.commit()
def control_loop(): '''Main loop of the capture agent, retrieving and checking the schedule as well as starting the capture process if necessry. ''' set_service_status_immediate(Service.INGEST, ServiceStatus.IDLE) notify.notify('READY=1') notify.notify('STATUS=Running') while not terminate(): notify.notify('WATCHDOG=1') # Get next recording session = get_session() event = session.query(RecordedEvent)\ .filter(RecordedEvent.status == Status.FINISHED_RECORDING).first() if event: delay = random.randint(config('ingest', 'delay_min'), config('ingest', 'delay_max')) logger.info("Delaying ingest for %s seconds", delay) time.sleep(delay) safe_start_ingest(event) session.close() time.sleep(1.0) logger.info('Shutting down ingest service') set_service_status(Service.INGEST, ServiceStatus.STOPPED)
def test_get_session(self): self.assertIn('autocommit', db.get_session().__dict__.keys())
def test_get_session(self): assert 'autocommit' in db.get_session().__dict__.keys()
def __init__(self, db=get_session(), registry=REGISTRY): self.db = db registry.register(self)