def recording_command(directory, name, duration): '''Run the actual command to record the a/v material. ''' preview_dir = config()['capture']['preview_dir'] cmd = config()['capture']['command'] cmd = cmd.replace('{{time}}', str(duration)) cmd = cmd.replace('{{dir}}', directory) cmd = cmd.replace('{{name}}', name) cmd = cmd.replace('{{previewdir}}', preview_dir) logger.info(cmd) args = shlex.split(cmd) DEVNULL = getattr(subprocess, 'DEVNULL', os.open(os.devnull, os.O_RDWR)) captureproc = subprocess.Popen(args, stdin=DEVNULL) hasattr(subprocess, 'DEVNULL') or os.close(DEVNULL) while captureproc.poll() is None: time.sleep(0.1) if captureproc.returncode > 0: raise RuntimeError('Recording failed (%i)' % captureproc.returncode) # Remove preview files: for preview in config()['capture']['preview']: try: os.remove(preview.replace('{{previewdir}}', preview_dir)) except OSError: logger.warning('Could not remove preview files') logger.warning(traceback.format_exc()) # Return [(flavor,path),…] flavors = ensurelist(config()['capture']['flavors']) files = ensurelist(config()['capture']['files']) files = [f.replace('{{dir}}', directory) for f in files] files = [f.replace('{{name}}', name) for f in files] return list(zip(flavors, files))
def http_request(url, post_data=None): '''Make an HTTP request to a given URL with optional parameters. ''' buf = bio() curl = pycurl.Curl() curl.setopt(curl.URL, url.encode('ascii', 'ignore')) # Disable HTTPS verification methods if insecure is set if config()['server']['insecure']: curl.setopt(curl.SSL_VERIFYPEER, 0) curl.setopt(curl.SSL_VERIFYHOST, 0) if config()['server']['certificate']: # Make sure verification methods are turned on curl.setopt(curl.SSL_VERIFYPEER, 1) curl.setopt(curl.SSL_VERIFYHOST, 2) # Import your certificates curl.setopt(pycurl.CAINFO, config()['server']['certificate']) if post_data: curl.setopt(curl.HTTPPOST, post_data) curl.setopt(curl.WRITEFUNCTION, buf.write) curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_DIGEST) curl.setopt(pycurl.USERPWD, "%s:%s" % (config()['server']['username'], config()['server']['password'])) curl.setopt(curl.HTTPHEADER, ['X-Requested-Auth: Digest']) curl.perform() status = curl.getinfo(pycurl.HTTP_CODE) curl.close() if int(status / 100) != 2: raise Exception('Request to %s failed' % url, status) result = buf.getvalue() buf.close() return result
def setUp(self): utils.http_request = lambda x, y=False: b'xxx' self.fd, self.dbfile = tempfile.mkstemp() self.cadir = tempfile.mkdtemp() preview = os.path.join(self.cadir, 'preview.png') open(preview, 'a').close() config.config()['agent']['database'] = 'sqlite:///' + self.dbfile config.config()['capture']['command'] = 'touch {{dir}}/{{name}}.mp4' config.config()['capture']['directory'] = self.cadir config.config()['capture']['preview'] = [preview] config.config()['service-capture.admin'] = [''] # Mock event db.init() self.event = db.BaseEvent() self.event.uid = '123123' self.event.title = u'äüÄÜß' self.event.start = utils.timestamp() self.event.end = self.event.start self.event.status = db.Status.UPCOMING data = [{'data': u'äüÄÜß', 'fmttype': 'application/xml', 'x-apple-filename': 'episode.xml'}, {'data': u'äüÄÜß', 'fmttype': 'application/xml', 'x-apple-filename': 'series.xml'}, {'data': u'event.title=äüÄÜß\n' + u'org.opencastproject.workflow.config.x=123\n' + u'org.opencastproject.workflow.definition=fast', 'fmttype': 'application/text', 'x-apple-filename': 'org.opencastproject.capture.agent' + '.properties'}] self.event.set_data({'attach': data})
def http_request(url, post_data=None): '''Make an HTTP request to a given URL with optional parameters. ''' logger.debug('Requesting URL: %s' % url) buf = bio() curl = pycurl.Curl() curl.setopt(curl.URL, url.encode('ascii', 'ignore')) # Disable HTTPS verification methods if insecure is set if config()['server']['insecure']: curl.setopt(curl.SSL_VERIFYPEER, 0) curl.setopt(curl.SSL_VERIFYHOST, 0) if config()['server']['certificate']: # Make sure verification methods are turned on curl.setopt(curl.SSL_VERIFYPEER, 1) curl.setopt(curl.SSL_VERIFYHOST, 2) # Import your certificates curl.setopt(pycurl.CAINFO, config()['server']['certificate']) if post_data: curl.setopt(curl.HTTPPOST, post_data) curl.setopt(curl.WRITEFUNCTION, buf.write) curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_DIGEST) curl.setopt(pycurl.USERPWD, "%s:%s" % (config()['server']['username'], config()['server']['password'])) curl.setopt(curl.HTTPHEADER, ['X-Requested-Auth: Digest']) curl.setopt(curl.FAILONERROR, True) curl.setopt(curl.FOLLOWLOCATION, True) curl.perform() curl.close() result = buf.getvalue() buf.close() return result
def setUp(self): config.config()['service-capture.admin'] = [''] # db self.fd, self.dbfile = tempfile.mkstemp() config.config()['agent']['database'] = 'sqlite:///' + self.dbfile db.init()
def get_schedule(db): '''Try to load schedule from the Matterhorn core. Returns a valid schedule or None on failure. ''' params = {'agentid': config('agent', 'name').encode('utf8')} lookahead = config('agent', 'cal_lookahead') * 24 * 60 * 60 if lookahead: params['cutoff'] = str((timestamp() + lookahead) * 1000) uri = '%s/calendars?%s' % (service('scheduler')[0], urlencode(params)) try: vcal = http_request(uri) UpstreamState.update_sync_time(config('server', 'url')) except pycurl.error as e: logger.error('Could not get schedule: %s', e) return try: cal = parse_ical(vcal.decode('utf-8')) except Exception: logger.exception('Could not parse ical') return db.query(UpcomingEvent).delete() for event in cal: # Ignore events that have already ended if event['dtend'] <= timestamp(): continue e = UpcomingEvent() e.start = event['dtstart'] e.end = event['dtend'] e.uid = event.get('uid') e.title = event.get('summary') e.set_data(event) db.add(e) db.commit()
def get_schedule(): '''Try to load schedule from the Matterhorn core. Returns a valid schedule or None on failure. ''' uri = '%s/calendars?agentid=%s' % (config()['service-scheduler'][0], config()['agent']['name']) lookahead = config()['agent']['cal_lookahead'] * 24 * 60 * 60 if lookahead: uri += '&cutoff=%i' % ((timestamp() + lookahead) * 1000) try: vcal = http_request(uri) except pycurl.error as e: logger.error('Could not get schedule: %s' % e) return try: cal = parse_ical(vcal.decode('utf-8')) except Exception: logger.error('Could not parse ical') logger.error(traceback.format_exc()) return db = get_session() db.query(UpcomingEvent).delete() for event in cal: # Ignore events that have already ended if event['dtend'] <= timestamp(): continue e = UpcomingEvent() e.start = event['dtstart'] e.end = event['dtend'] e.uid = event.get('uid') e.title = event.get('summary') e.set_data(event) db.add(e) db.commit()
def register_ca(status='idle'): '''Register this capture agent at the Matterhorn admin server so that it shows up in the admin interface. :param address: Address of the capture agent web ui :param status: Current status of the capture agent ''' # If this is a backup CA we don't tell the Matterhorn core that we are # here. We will just run silently in the background: if config()['agent']['backup_mode']: return True params = [('address', config()['ui']['url']), ('state', status)] url = '%s/agents/%s' % (config()['service-capture.admin'][0], config()['agent']['name']) try: response = http_request(url, params).decode('utf-8') if response: logger.info(response) except: # Ignore errors (e.g. network issues) as it's more important to get # the recording as to set the correct current state in the admin ui. logger.warning('Could not set capture agent state') logger.warning(traceback.format_exc()) return False return True
def setUp(self): config.config()['services']['org.opencastproject.capture.admin'] = [''] # db self.fd, self.dbfile = tempfile.mkstemp() config.config()['agent']['database'] = 'sqlite:///' + self.dbfile db.init()
def test_recording_state(self): utils.http_request = lambda x, y=False: b'xxx' config.config()['service-capture.admin'] = [''] utils.recording_state('123', 'recording') utils.http_request = should_fail utils.recording_state('123', 'recording') config.config()['agent']['backup_mode'] = True utils.recording_state('123', 'recording')
def test_http_request(self): config.config()['server']['insecure'] = True config.config()['server']['certificate'] = 'nowhere' try: utils.http_request('http://127.0.0.1:8', [('x', 'y')]) assert False except Exception as e: assert e.args[0] == 7 # connection error
def decorated(*args, **kwargs): auth = request.authorization if config()['ui']['password'] and not request.authorization \ or request.authorization.username != config()['ui']['username'] \ or request.authorization.password != config()['ui']['password']: return Response('pyCA', 401, {'WWW-Authenticate': 'Basic realm="pyCA Login"'}) return f(*args, **kwargs)
def setUp(self): utils.http_request = lambda x, y=False: b'xxx' self.fd, self.dbfile = tempfile.mkstemp() config.config()['agent']['database'] = 'sqlite:///' + self.dbfile config.config()['service-scheduler'] = [''] # Mock event db.init()
def test_check(self): config.config()['server']['insecure'] = True config.config()['server']['certificate'] = '/xxx' try: config.check() assert False except IOError: assert True
def decorated(*args, **kwargs): auth = request.authorization if config()['ui']['password'] and not auth \ or auth.username != config()['ui']['username'] \ or auth.password != config()['ui']['password']: return Response('pyCA', 401, {'WWW-Authenticate': 'Basic realm="pyCA Login"'}) return f(*args, **kwargs)
def setUp(self): utils.http_request = lambda x, y=False: b'xxx' self.fd, self.dbfile = tempfile.mkstemp() config.config()['agent']['database'] = 'sqlite:///' + self.dbfile config.config()['service-capture.admin'] = [''] # Mock event db.init()
def decorated(*args, **kwargs): auth = request.authorization if config('ui', 'password') and not auth \ or auth.username != config('ui', 'username') \ or auth.password != config('ui', 'password'): return Response('pyCA: Login required\n', 401, {'WWW-Authenticate': 'Basic realm="pyCA Login"'}) return f(*args, **kwargs)
def test_http_request_mocked_curl(self): config.config()['server']['insecure'] = True config.config()['server']['certificate'] = 'nowhere' utils.pycurl.Curl = CurlMock try: utils.http_request('http://127.0.0.1:8', [('x', 'y')]) except Exception: self.fail() reload(utils.pycurl)
def test_http_request_mocked_curl(self): config.config()['server']['insecure'] = True config.config()['server']['certificate'] = 'nowhere' utils.pycurl.Curl = CurlMock try: utils.http_request('http://127.0.0.1:8', [('x', 'y')]) assert True except Exception: assert False reload(utils.pycurl)
def metrics(dbs): '''Serve several metrics about the pyCA services and the machine via json.''' # Get Disk Usage # If the capture directory do not exists, use the parent directory. directory = config('capture', 'directory') if not os.path.exists(directory): directory = os.path.abspath(os.path.join(directory, os.pardir)) total, used, free = shutil.disk_usage(directory) # Get Loads load_1m, load_5m, load_15m = os.getloadavg() # Get Memory memory = psutil.virtual_memory() # Get Services srvs = dbs.query(ServiceStates) services = [] for srv in srvs: services.append({ 'name': Service.str(srv.type), 'status': ServiceStatus.str(srv.status) }) # Get Upstream State state = dbs.query(UpstreamState).filter( UpstreamState.url == config('server', 'url')).first() last_synchronized = state.last_synced.isoformat() if state else None return make_response( jsonify({ 'meta': { 'services': services, 'disk_usage_in_bytes': { 'total': total, 'used': used, 'free': free, }, 'memory_usage_in_bytes': { 'total': memory.total, 'available': memory.available, 'used': memory.used, 'free': memory.free, 'cached': memory.cached, 'buffers': memory.buffers, }, 'load': { '1m': load_1m, '5m': load_5m, '15m': load_15m, }, 'upstream': { 'last_synchronized': last_synchronized, } } }))
def recording_command(event): '''Run the actual command to record the a/v material. ''' conf = config('capture') # Prepare command line cmd = conf['command'] cmd = cmd.replace('{{time}}', str(event.remaining_duration(timestamp()))) cmd = cmd.replace('{{dir}}', event.directory()) cmd = cmd.replace('{{name}}', event.name()) cmd = cmd.replace('{{previewdir}}', conf['preview_dir']) # Signal configuration sigterm_time = conf['sigterm_time'] sigkill_time = conf['sigkill_time'] sigterm_time = 0 if sigterm_time < 0 else event.end + sigterm_time sigkill_time = 0 if sigkill_time < 0 else event.end + sigkill_time # Launch capture command logger.info(cmd) args = shlex.split(cmd) DEVNULL = getattr(subprocess, 'DEVNULL', os.open(os.devnull, os.O_RDWR)) captureproc = subprocess.Popen(args, stdin=DEVNULL) hasattr(subprocess, 'DEVNULL') or os.close(DEVNULL) # Check process while captureproc.poll() is None: if sigterm_time and timestamp() > sigterm_time: logger.info("Terminating capture process") captureproc.terminate() sigterm_time = 0 # send only once elif sigkill_time and timestamp() > sigkill_time: logger.warning("Terminating capture process") captureproc.kill() sigkill_time = 0 # send only once time.sleep(0.1) # Remove preview files: for preview in conf['preview']: try: os.remove(preview.replace('{{previewdir}}', conf['preview_dir'])) except OSError: logger.warning('Could not remove preview files') logger.warning(traceback.format_exc()) # Check process for errors exitcode = config()['capture']['exit_code'] if captureproc.poll() > 0 and captureproc.returncode != exitcode: raise RuntimeError('Recording failed (%i)' % captureproc.returncode) # Return [(flavor,path),…] files = (f.replace('{{dir}}', event.directory()) for f in conf['files']) files = (f.replace('{{name}}', event.name()) for f in files) return list(zip(conf['flavors'], files))
def configure_service(service): '''Get the location of a given service from Opencast and add it to the current configuration. ''' while not config().get('service-' + service) and not terminate(): try: config()['service-' + service] = \ get_service('org.opencastproject.' + service) except pycurl.error as e: logger.error('Could not get %s endpoint: %s. Retrying in 5s' % (service, e)) time.sleep(5.0)
def configure_service(service): '''Get the location of a given service from Opencast and add it to the current configuration. ''' while not config().get('service-' + service): try: config()['service-' + service] = \ get_service('org.opencastproject.' + service) except: logger.error('Could not get %s endpoint. Retrying in 5 seconds' % service) logger.error(traceback.format_exc()) time.sleep(5.0)
def serve_image(image_id): '''Serve the preview image with the given id ''' filepath = '' try: preview_dir = config()['capture']['preview_dir'] filepath = config()['capture']['preview'][image_id] filepath = filepath.replace('{{previewdir}}', preview_dir) if os.path.isfile(filepath): [directory, filename] = filepath.rsplit('/', 1) return send_from_directory(directory, filename) except: pass return '', 404
def serve_image(image_id): '''Serve the preview image with the given id ''' try: preview_dir = config()['capture']['preview_dir'] filepath = config()['capture']['preview'][image_id] filepath = filepath.replace('{{previewdir}}', preview_dir) filepath = os.path.abspath(filepath) if os.path.isfile(filepath): directory, filename = filepath.rsplit('/', 1) return send_from_directory(directory, filename) except (IndexError, KeyError): pass return '', 404
def test_start_capture_recording_command_failure(self): config.config()['capture']['command'] = 'false' try: capture.start_capture(self.event) assert False except RuntimeError: assert True
def test_register_ca(self): utils.http_request = lambda x, y=False: b'xxx' assert utils.register_ca() utils.http_request = should_fail assert not utils.register_ca() config.config()['agent']['backup_mode'] = True assert utils.register_ca()
def serve_image(image_id): '''Serve the preview image with the given id ''' try: preview_dir = config('capture', 'preview_dir') filepath = config('capture', 'preview')[image_id] filepath = filepath.replace('{{previewdir}}', preview_dir) filepath = os.path.abspath(filepath) if os.path.isfile(filepath): directory, filename = filepath.rsplit('/', 1) response = send_from_directory(directory, filename) response.cache_control.no_cache = True return response except (IndexError, KeyError): pass return '', 404
def home(): '''Serve the status page of the capture agent. ''' refresh_rate = config('ui', 'refresh_rate') return redirect(url_for( 'static', filename='index.html', refresh=refresh_rate))
def init(): '''Initialize connection to database. Additionally the basic database structure will be created if nonexistent. ''' global engine engine = create_engine(config()['agent']['database']) Base.metadata.create_all(engine)
def schedule_event(): try: # We only allow one schedule at a time print(0, request) print(1, request.data) print(2, request.get_json()) data = request.get_json()['data'] if len(data) != 1: return make_error_response('Invalid data', 400) data = data[0] # Check attributes for key in data.keys(): if key not in ('title', 'duration', 'creator'): return make_error_response('Invalid data', 400) # Check duration if type(data['duration']) != int: return make_error_response('Duration must be an integer', 400) except Exception as e: logger.debug('bad request', e) return make_error_response('Invalid data', 400) try: schedule(title=data.get('title', 'pyCA Recording'), duration=data['duration'], creator=data.get('creator', config('ui', 'username'))) except Exception: return make_error_response('Scheduling conflict', 409) return make_data_response('Event scheduled')
def test_recording_state(self): utils.http_request = lambda x, y=False: b'' utils.recording_state('123', 'recording') utils.http_request = should_fail utils.recording_state('123', 'recording') config.config()['agent']['backup_mode'] = True utils.recording_state('123', 'recording')
def start_capture(upcoming_event): '''Start the capture process, creating all necessary files and directories as well as ingesting the captured files if no backup mode is configured. ''' logger.info('Start recording') # First move event to recording_event table db = get_session() event = db.query(RecordedEvent)\ .filter(RecordedEvent.uid == upcoming_event.uid)\ .filter(RecordedEvent.start == upcoming_event.start)\ .first() if not event: event = RecordedEvent(upcoming_event) db.add(event) db.commit() try_mkdir(config()['capture']['directory']) os.mkdir(event.directory()) # Set state update_event_status(event, Status.RECORDING) recording_state(event.uid, 'capturing') set_service_status_immediate(Service.CAPTURE, ServiceStatus.BUSY) # Recording tracks = recording_command(event) event.set_tracks(tracks) db.commit() # Set status update_event_status(event, Status.FINISHED_RECORDING) recording_state(event.uid, 'capture_finished') set_service_status_immediate(Service.CAPTURE, ServiceStatus.IDLE)
def control_loop(): '''Main loop, retrieving the schedule. ''' set_service_status_immediate(Service.SCHEDULE, ServiceStatus.BUSY) notify.notify('READY=1') while not terminate(): notify.notify('WATCHDOG=1') # Try getting an updated schedule get_schedule() session = get_session() next_event = session.query(UpcomingEvent)\ .filter(UpcomingEvent.end > timestamp())\ .order_by(UpcomingEvent.start)\ .first() if next_event: logger.info('Next scheduled recording: %s', datetime.fromtimestamp(next_event.start)) notify.notify('STATUS=Next scheduled recording: %s' % datetime.fromtimestamp(next_event.start)) else: logger.info('No scheduled recording') notify.notify('STATUS=No scheduled recording') session.close() next_update = timestamp() + config('agent', 'update_frequency') while not terminate() and timestamp() < next_update: time.sleep(0.1) logger.info('Shutting down schedule service') set_service_status_immediate(Service.SCHEDULE, ServiceStatus.STOPPED)
def start_capture(upcoming_event): '''Start the capture process, creating all necessary files and directories as well as ingesting the captured files if no backup mode is configured. ''' logger.info('Start recording') # First move event to recording_event table db = get_session() event = db.query(RecordedEvent)\ .filter(RecordedEvent.uid == upcoming_event.uid)\ .filter(RecordedEvent.start == upcoming_event.start)\ .first() if not event: event = RecordedEvent(upcoming_event) db.add(event) db.commit() duration = event.end - timestamp() try_mkdir(config()['capture']['directory']) os.mkdir(event.directory()) # Set state set_service_status_immediate(Service.CAPTURE, ServiceStatus.BUSY) recording_state(event.uid, 'capturing') update_event_status(event, Status.RECORDING) # Recording tracks = recording_command(event.directory(), event.name(), duration) event.set_tracks(tracks) db.commit() # Set status set_service_status_immediate(Service.CAPTURE, ServiceStatus.IDLE) update_event_status(event, Status.FINISHED_RECORDING)
def test_register_ca(self): utils.http_request = lambda x, y=False: b'xxx' utils.register_ca() utils.http_request = should_fail utils.register_ca() config.config()['agent']['backup_mode'] = True utils.register_ca()
def ingest(event): '''Ingest a finished recording to the Opencast server. ''' # Update status set_service_status(Service.INGEST, ServiceStatus.BUSY) recording_state(event.uid, 'uploading') update_event_status(event, Status.UPLOADING) # Select ingest service # The ingest service to use is selected at random from the available # ingest services to ensure that not every capture agent uses the same # service at the same time service = config('service-ingest') service = service[randrange(0, len(service))] logger.info('Selecting ingest service to use: ' + service) # create mediapackage logger.info('Creating new mediapackage') mediapackage = http_request(service + '/createMediaPackage') # extract workflow_def, workflow_config and add DC catalogs prop = 'org.opencastproject.capture.agent.properties' dcns = 'http://www.opencastproject.org/xsd/1.0/dublincore/' for attachment in event.get_data().get('attach'): data = attachment.get('data') if attachment.get('x-apple-filename') == prop: workflow_def, workflow_config = get_config_params(data) # Check for dublincore catalogs elif attachment.get('fmttype') == 'application/xml' and dcns in data: name = attachment.get('x-apple-filename', '').rsplit('.', 1)[0] logger.info('Adding %s DC catalog' % name) fields = [('mediaPackage', mediapackage), ('flavor', 'dublincore/%s' % name), ('dublinCore', data.encode('utf-8'))] mediapackage = http_request(service + '/addDCCatalog', fields) # add track for (flavor, track) in event.get_tracks(): logger.info('Adding track ({0} -> {1})'.format(flavor, track)) track = track.encode('ascii', 'ignore') fields = [('mediaPackage', mediapackage), ('flavor', flavor), ('BODY1', (pycurl.FORM_FILE, track))] mediapackage = http_request(service + '/addTrack', fields) # ingest logger.info('Ingest recording') fields = [('mediaPackage', mediapackage)] if workflow_def: fields.append(('workflowDefinitionId', workflow_def)) if event.uid: fields.append(('workflowInstanceId', event.uid.encode('ascii', 'ignore'))) fields += workflow_config mediapackage = http_request(service + '/ingest', fields) # Update status recording_state(event.uid, 'upload_finished') update_event_status(event, Status.FINISHED_UPLOADING) set_service_status_immediate(Service.INGEST, ServiceStatus.IDLE)
def test_run(self): ingest.terminate(True) ingest.run() ingest.terminate = terminate_fn(1) ingest.run() config.config('agent')['backup_mode'] = True ingest.run()
def schedule(title='pyCA Recording', duration=60, creator=None): '''Schedule a recording for this capture agent with the given title, creator and duration starting 10 seconds from now. :param title: Title of the event to schedule :type title: string :param creator: Creator of the event to schedule :type creator: string :param duration: Duration of the event to schedule in seconds :type creator: int ''' if not creator: creator = config('ui', 'username') # Select ingest service # The ingest service to use is selected at random from the available # ingest services to ensure that not every capture agent uses the same # service at the same time service_url = service('ingest', force_update=True) service_url = service_url[random.randrange(0, len(service_url))] logger.info('Selecting ingest service for scheduling: ' + service_url) # create media package logger.info('Creating new media package') mediapackage = http_request(service_url + '/createMediaPackage') # add dublin core catalog start = datetime.utcnow() + timedelta(seconds=10) end = start + timedelta(seconds=duration) dublincore = DUBLINCORE.format(agent_name=xml_escape( config('agent', 'name')), start=start.strftime('%Y-%m-%dT%H:%M:%SZ'), end=end.strftime('%Y-%m-%dT%H:%M:%SZ'), title=xml_escape(title), creator=xml_escape(creator)) logger.info('Adding Dublin Core catalog for scheduling') fields = [('mediaPackage', mediapackage), ('flavor', 'dublincore/episode'), ('dublinCore', dublincore)] mediapackage = http_request(service_url + '/addDCCatalog', fields) # schedule event logger.info('Scheduling recording') fields = [('mediaPackage', mediapackage)] mediapackage = http_request(service_url + '/schedule', fields) # Update status logger.info('Event successfully scheduled')
def setUp(self): ingest.http_request = lambda x, y=False: b'xxx' self.fd, self.dbfile = tempfile.mkstemp() self.cadir = tempfile.mkdtemp() config.config('agent')['database'] = 'sqlite:///' + self.dbfile config.config('capture')['directory'] = self.cadir config.config()['services']['org.opencastproject.ingest'] = [''] config.config()['services']['org.opencastproject.capture.admin'] = [''] # Mock event db.init() event = db.RecordedEvent() event.uid = '123123' event.status = db.Status.FINISHED_RECORDING event.start = utils.timestamp() event.end = event.start + 1 prop = 'org.opencastproject.capture.agent.properties' dcns = 'http://www.opencastproject.org/xsd/1.0/dublincore/' data = [{ 'data': u'äü%sÄÜß' % dcns, 'fmttype': 'application/xml', 'x-apple-filename': 'episode.xml' }, { 'data': u'äü%sÄÜß' % dcns, 'fmttype': 'application/xml', 'x-apple-filename': 'series.xml' }, { 'data': u'event.title=äüÄÜß\n' + u'org.opencastproject.workflow.config.x=123\n' + u'org.opencastproject.workflow.definition=fast', 'fmttype': 'application/text', 'x-apple-filename': prop }] event.set_data({'attach': data}) # Create recording os.mkdir(event.directory()) trackfile = os.path.join(event.directory(), 'test.mp4') open(trackfile, 'wb').close() event.set_tracks([('presenter/source', trackfile)]) session = db.get_session() session.add(event) session.commit() self.event = db.RecordedEvent(event)
def test_images(self): # Without authentication with ui.app.test_request_context(): self.assertEqual(ui.jsonapi.get_images().status_code, 401) config.config()['capture']['preview_dir'] = '/tmp' # With authentication for preview, response_len in (([], 0), ([__file__], 1)): config.config()['capture']['preview'] = preview with ui.app.test_request_context(headers=self.headers): response = ui.jsonapi.get_images() self.assertEqual(response.headers['Content-Type'], self.content_type) self.assertEqual(response.status_code, 200) data = json.loads(response.data.decode('utf-8')) self.assertEqual(len(data['data']), response_len)
def ingest(tracks, recording_dir, recording_id, workflow_def, workflow_config): '''Ingest a finished recording to the Matterhorn server. ''' # select ingest service # The ingest service to use is selected at random from the available # ingest services to ensure that not every capture agent uses the same # service at the same time service = config()['service-ingest'] service = service[randrange(0, len(service))] logger.info('Selecting ingest service to use: ' + service) # create mediapackage logger.info('Creating new mediapackage') mediapackage = http_request(service + '/createMediaPackage') # add episode DublinCore catalog if os.path.isfile('%s/episode.xml' % recording_dir): logger.info('Adding episode DC catalog') dublincore = '' with open('%s/episode.xml' % recording_dir, 'rb') as episodefile: dublincore = episodefile.read().decode('utf8') fields = [('mediaPackage', mediapackage), ('flavor', 'dublincore/episode'), ('dublinCore', dublincore)] mediapackage = http_request(service + '/addDCCatalog', fields) # add series DublinCore catalog if os.path.isfile('%s/series.xml' % recording_dir): logger.info('Adding series DC catalog') dublincore = '' with open('%s/series.xml' % recording_dir, 'rb') as seriesfile: dublincore = seriesfile.read().decode('utf8') fields = [('mediaPackage', mediapackage), ('flavor', 'dublincore/series'), ('dublinCore', dublincore)] mediapackage = http_request(service + '/addDCCatalog', fields) # add track for (flavor, track) in tracks: logger.info('Adding track ({0} -> {1})'.format(flavor, track)) track = track.encode('ascii', 'ignore') fields = [('mediaPackage', mediapackage), ('flavor', flavor), ('BODY1', (pycurl.FORM_FILE, track))] mediapackage = http_request(service + '/addTrack', fields) # ingest logger.info('Ingest recording') fields = [('mediaPackage', mediapackage)] if workflow_def: fields.append(('workflowDefinitionId', workflow_def)) if recording_id: fields.append(('workflowInstanceId', recording_id.encode('ascii', 'ignore'))) fields += workflow_config mediapackage = http_request(service + '/ingest', fields)
def home(): '''Serve the status page of the capture agent. ''' # Get IDs of existing preview images preview = config()['capture']['preview'] previewdir = config()['capture']['preview_dir'] preview = [p.replace('{{previewdir}}', previewdir) for p in preview] preview = zip(preview, range(len(preview))) preview = [p[1] for p in preview if os.path.isfile(p[0])] # Get limits for recording table try: limit_upcoming = int(request.args.get('limit_upcoming', 5)) limit_processed = int(request.args.get('limit_processed', 15)) except ValueError: limit_upcoming = 5 limit_processed = 15 db = get_session() upcoming_events = db.query(UpcomingEvent)\ .order_by(UpcomingEvent.start)\ .limit(limit_upcoming) recorded_events = db.query(RecordedEvent)\ .order_by(RecordedEvent.start.desc())\ .limit(limit_processed) recording = get_service_status(Service.CAPTURE) \ == ServiceStatus.BUSY uploading = get_service_status(Service.INGEST) \ == ServiceStatus.BUSY processed = db.query(RecordedEvent).count() upcoming = db.query(UpcomingEvent).count() return render_template('home.html', preview=preview, config=config(), recorded_events=recorded_events, upcoming_events=upcoming_events, recording=recording, uploading=uploading, processed=processed, upcoming=upcoming, limit_upcoming=limit_upcoming, limit_processed=limit_processed, dtfmt=dtfmt)
def control_loop(): '''Main loop, updating the capture agent state. ''' set_service_status(Service.AGENTSTATE, ServiceStatus.BUSY) while not terminate(): update_agent_state() next_update = timestamp() + config()['agent']['update_frequency'] while not terminate() and timestamp() < next_update: time.sleep(0.1) logger.info('Shutting down agentstate service') set_service_status(Service.AGENTSTATE, ServiceStatus.STOPPED)
def recording_state(recording_id, status): '''Send the state of the current recording to the Matterhorn core. :param recording_id: ID of the current recording :param status: Status of the recording ''' # If this is a backup CA we do not update the recording state since the # actual CA does that and we want to interfere. We will just run silently # in the background: if config()['agent']['backup_mode']: return params = [('state', status)] url = config()['service-capture.admin'][0] url += '/recordings/%s' % recording_id try: result = http_request(url, params) logger.info(result) except: # Ignore errors (e.g. network issues) as it's more important to get # the recording as to set the correct current state in the admin ui. logger.warning('Could not set recording state') logger.warning(traceback.format_exc())
def get_service(service_type): '''Get available service endpoints for a given service type from the Opencast ServiceRegistry. ''' endpoint = '/services/available.json?serviceType=' + str(service_type) url = '%s%s' % (config()['server']['url'], endpoint) response = http_request(url).decode('utf-8') services = json.loads(response).get('services', {}).get('service', []) services = ensurelist(services) endpoints = [service['host'] + service['path'] for service in services if service['online'] and service['active']] for endpoint in endpoints: logger.info(u'Endpoint for %s: %s', service_type, endpoint) return endpoints
def get_schedule(): '''Try to load schedule from the Matterhorn core. Returns a valid schedule or None on failure. ''' try: uri = '%s/calendars?agentid=%s' % (config()['service-scheduler'][0], config()['agent']['name']) lookahead = config()['agent']['cal_lookahead'] * 24 * 60 * 60 if lookahead: uri += '&cutoff=%i' % ((timestamp() + lookahead) * 1000) vcal = http_request(uri) except Exception as e: # Silently ignore the error if the capture agent is not yet registered if e.args[1] != 404: logger.error('Could not get schedule') logger.error(traceback.format_exc()) return try: cal = parse_ical(vcal.decode('utf-8')) except Exception: logger.error('Could not parse ical') logger.error(traceback.format_exc()) return db = get_session() db.query(UpcomingEvent).delete() for event in cal: # Ignore events that have already ended if event['dtend'] <= timestamp(): continue e = UpcomingEvent() e.start = event['dtstart'] e.end = event['dtend'] e.uid = event.get('uid') e.set_data(event) db.add(e) db.commit()
def setUp(self): utils.http_request = lambda x, y=False: b'xxx' ingest.http_request = lambda x, y=False: b'xxx' self.fd, self.dbfile = tempfile.mkstemp() self.cadir = tempfile.mkdtemp() config.config()['agent']['database'] = 'sqlite:///' + self.dbfile config.config()['capture']['directory'] = self.cadir config.config()['service-ingest'] = [''] config.config()['service-capture.admin'] = [''] # Mock event db.init() self.event = db.RecordedEvent() self.event.uid = '123123' self.event.start = utils.timestamp() self.event.end = self.event.start + 1 data = [{'data': u'äüÄÜß', 'fmttype': 'application/xml', 'x-apple-filename': 'episode.xml'}, {'data': u'äüÄÜß', 'fmttype': 'application/xml', 'x-apple-filename': 'series.xml'}, {'data': u'event.title=äüÄÜß\n' + u'org.opencastproject.workflow.config.x=123\n' + u'org.opencastproject.workflow.definition=fast', 'fmttype': 'application/text', 'x-apple-filename': 'org.opencastproject.capture.agent' + '.properties'}] self.event.set_data({'attach': data}) # Create recording os.mkdir(self.event.directory()) trackfile = os.path.join(self.event.directory(), 'test.mp4') with open(trackfile, 'wb') as f: f.write(b'123') self.event.set_tracks([('presenter/source', trackfile)])
def control_loop(): '''Main loop, retrieving the schedule. ''' set_service_status(Service.SCHEDULE, ServiceStatus.BUSY) while not terminate(): # Try getting an updated schedule get_schedule() q = get_session().query(UpcomingEvent)\ .filter(UpcomingEvent.end > timestamp()) if q.count(): logger.info('Next scheduled recording: %s', datetime.fromtimestamp(q[0].start)) else: logger.info('No scheduled recording') next_update = timestamp() + config()['agent']['update_frequency'] while not terminate() and timestamp() < next_update: time.sleep(0.1) logger.info('Shutting down schedule service') set_service_status(Service.SCHEDULE, ServiceStatus.STOPPED)
def start_ingest(event): # Put metadata files on disk attachments = event.get_data().get('attach') workflow_config = '' for attachment in attachments: value = attachment.get('data') if attachment.get('fmttype') == 'application/text': workflow_def, workflow_config = get_config_params(value) filename = attachment.get('x-apple-filename') with open(os.path.join(event.directory(), filename), 'wb') as f: f.write(value.encode('utf-8')) # If we are a backup CA, we don't want to actually upload anything. So # let's just quit here. if config()['agent']['backup_mode']: return True # Upload everything set_service_status(Service.INGEST, ServiceStatus.BUSY) recording_state(event.uid, 'uploading') update_event_status(event, Status.UPLOADING) try: ingest(event.get_tracks(), event.directory(), event.uid, workflow_def, workflow_config) except: logger.error('Something went wrong during the upload') logger.error(traceback.format_exc()) # Update state if something went wrong recording_state(event.uid, 'upload_error') update_event_status(event, Status.FAILED_UPLOADING) set_service_status_immediate(Service.INGEST, ServiceStatus.IDLE) return False # Update state recording_state(event.uid, 'upload_finished') update_event_status(event, Status.FINISHED_UPLOADING) set_service_status_immediate(Service.INGEST, ServiceStatus.IDLE) return True
def directory(self): '''Returns recording directory of this event. ''' return os.path.join(config()['capture']['directory'], self.name())
def main(): # Probe for configuration file location cfg = '/etc/pyca.conf' if not os.path.isfile(cfg): cfg = './etc/pyca.conf' # Check command line options try: opts, args = getopt.getopt(sys.argv[1:], 'hc:', ['help', 'config=']) except getopt.GetoptError: usage(1) for opt, arg in opts: if opt in ("-h", "--help"): usage() if opt in ('-c', '--config'): cfg = arg break # Make sure we got only one command if len(args) > 1: usage(2) try: config.update_configuration(cfg) except ValueError as e: print(str(e)) sys.exit(4) # Initialize logger handlers = [] conf = config.config() if conf['logging']['syslog']: handlers.append(logging.handlers.SysLogHandler(address='/dev/log')) if conf['logging']['stderr']: handlers.append(logging.StreamHandler(sys.stderr)) logger = logging.getLogger('') for h in handlers: h.setFormatter(logging.Formatter( '[%(name)s:%(lineno)s:%(funcName)s()] %(message)s')) logger.addHandler(h) logger.setLevel(conf['logging']['level'].upper()) # Set signal handler signal.signal(signal.SIGINT, sigint_handler) signal.signal(signal.SIGTERM, sigterm_handler) # Get the command with `run` as default cmd = (args + ['run'])[0] if cmd == 'run': run_all(schedule, capture, ingest, agentstate) elif cmd == 'schedule': schedule.run() elif cmd == 'capture': capture.run() elif cmd == 'ingest': ingest.run() elif cmd == 'agentstate': agentstate.run() elif cmd == 'ui': signal.signal(signal.SIGINT, signal.default_int_handler) ui.app.run() else: # Invalid command usage(3)
def test_configure_service(self): utils.get_service = lambda x: 'x' utils.configure_service('x') assert config.config()['service-x'] == 'x'
def timestamp(): '''Get current unix timestamp ''' if config()['agent']['ignore_timezone']: return unix_ts(datetime.now()) return unix_ts(datetime.now(tzutc()))
def setUp(self): self.fd1, self.dbfile = tempfile.mkstemp() self.fd2, self.previewfile = tempfile.mkstemp() config.config()['capture']['preview'] = [self.previewfile] config.config()['agent']['database'] = 'sqlite:///' + self.dbfile db.init()
def test_check(self): config.config()['server']['insecure'] = True config.config()['server']['certificate'] = 'xxx' config.check()
def setUp(self): cfg = './etc/pyca.conf' config.update_configuration(cfg) self.fd, self.dbfile = tempfile.mkstemp() config.config()['agent']['database'] = 'sqlite:///' + self.dbfile