def compact_files(folder, outfile): """ Compact all the files in the read directory to a single file """ # load from outfile if exists try: with open(outfile, 'r') as f: full = json.load(f) except FileNotFoundError: full = [] # add to outfile data for file in os.listdir(folder): with open(os.path.join(folder, file), 'r') as f: try: full.append(json.load(f)) except json.decoder.JSONDecodeError: logger.critical( "FlashLight hasn't learnt to read non-JSON files yet") return False try: # doing another loop since removing the file in previous loop could # cause data lose if user stops the process in between for file in os.listdir(folder): os.remove(os.path.join(folder, file)) # writing outfile with open(outfile, 'w+') as f: json.dump(full, f) except PermissionError: logger.critical("PermissionDenied: FlashLight cannot use its Home :O") return False return True
async def init_backend(sanic, loop): try: app.backend = Backend(app) n = sdnotify.SystemdNotifier() n.notify("READY=1") except: logger.critical('Unexpected error:', exc_info=1) sanic.stop()
async def app_other_error(request: Request, ex): logger.critical("Exception on {0}:{1}".format(request.path, traceback.format_exc())) if SMTP.initialized: message = MIMEText(traceback.format_exc()) message["From"] = config.SMTP_CREDENTIALS["username"] message["To"] = config.SMTP_CREDENTIALS["username"] message["Subject"] = "[YunNet] Encountered exception." await SMTP.send_message(message) return messages.INTERNAL_SERVER_ERROR
def init_homefolder(folder, outfile): """ Create flashlight home folder if not exists """ try: os.makedirs(folder) with open(outfile, 'w+') as f: json.dump([], f) except PermissionError: logger.critical("PermissionDenied: FlashLight cannot use its Home :O") return False return True
async def db_update_test(scripts_dir, script, user, organization, team, package=None, version=None): if scripts_dir is None: return None if not script.endswith('.md'): return None basename = os.path.basename(script) dirname = os.path.dirname(script) test_suite = os.path.splitext(basename)[0] test = await Test.find_one({ 'test_suite': test_suite, 'path': dirname, 'organization': organization.pk, 'team': team.pk if team else None }) if not test: test = Test(path=dirname, author=user, organization=organization, test_suite=test_suite, package=package, package_version=version) if team: test.team = team test.create_date = datetime.datetime.utcnow() await test.commit() else: if package and version: test.package = package test.package_version = version else: package = await test.package.fetch() if package: package.modified = True await package.commit() test.update_date = datetime.datetime.utcnow() await test.commit() ret = await update_test_from_md(os.path.join(scripts_dir, script), test) if ret: await test.commit() logger.critical(f'Update test suite for {script}') return test
async def _connect(self): try: await self.broker.connect(self.app.broker_url) logger.info("Backend connected to broker") await self.broker.subscribe([(self.timer.topic, mqtt_client.QOS_0)] ) await self.backend() while self.app.is_running: message = await self.broker.deliver_message() logger.debug("BROKER topic={}, payload={}".format( message.topic, message.data)) if message.topic == self.timer.topic: if message.data == b'timer': await self.backend() except mqtt_client.ClientException: logger.critical("Unable to connect to broker! Shutting down.") app.stop()
async def init(app, loop): if not config.CUSTOM_JWT_SECRET: logger.info("Using random JWT secret.") app.config.JWT["jwtSecret"] = "".join( random.choice(string.digits + string.ascii_letters) for i in range(64)) if config.LOGGING_SOCKET_ENABLED: sh = logging.handlers.SocketHandler(**config.LOGGING_SOCKET) logger.addHandler(sh) logger.info("Socket handler initialized.") try: # init aiohttp session app.aiohttp_session = aiohttp.ClientSession(loop=loop) await aiohttpSession.init({"limit": 200}) # init SMTP client if config.DEBUG_ENABLE_SMTP or (not config.DEBUG): logger.info("Initializing SMTP...") await SMTP.init(config.SMTP_CLIENT_PARAMETERS, config.SMTP_CREDENTIALS) # init mongo log if config.DEBUG_ENABLE_MONGO or (not config.DEBUG): logger.info("Initializing MongoDB...") await MongoDB.init(config.MONGODB_URI) app.mongo = SimpleNamespace() app.mongo.motor_client = AsyncIOMotorClient(config.MONGODB_URI) app.mongo.log_db = app.mongo.motor_client["yunnet"] app.mongo.log_collection = app.mongo.log_db["log"] # init aiomysql pool if config.DEBUG_ENABLE_SQL or (not config.DEBUG): logger.info("Initializing aiomysql...") await SQLPool.init_pool(**config.SQL_CREDENTIALS) SQLPool.debug = config.DEBUG_PRINT_SQL_ONLY # MAC updating task logger.info("Initializing Switch Updater") loop.create_task(switch_update(config.MAC_UPDATER_ENDPOINT)) except Exception as ex: logger.critical(traceback.format_exc()) raise ex
async def event_handler_cancel_task(app, event): global ROBOT_PROCESSES, TASK_PER_ENDPOINT endpoint_uid = event.message['endpoint_uid'] # already uuid.UUID type priority = event.message['priority'] task_id = event.message['task_id'] organization = await event.organization.fetch() team = None if event.team: team = await event.team.fetch() task = await Task.find_one({'_id': ObjectId(task_id)}) if not task: logger.error('Task not found for ' + task_id) return if endpoint_uid: endpoint = await Endpoint.find_one({ 'uid': endpoint_uid, 'organization': organization.pk, 'team': team.pk if team else None }) if not endpoint: logger.error('Endpoint not found for {}'.format(endpoint_uid)) return taskqueue = await TaskQueue.find_one({ 'organization': organization.pk, 'team': team.pk if team else None, 'endpoint': endpoint.pk, 'priority': priority }) if not taskqueue: logger.error('Task queue not found for {}:{}'.format( endpoint_uid, priority)) return else: taskqueue = await TaskQueue.find_one({ 'organization': organization.pk, 'team': team.pk if team else None, 'priority': priority, 'tasks': task.pk }) if not taskqueue: logger.error('Task queue not found for task {}'.format(task_id)) return taskqueue.tasks.remove(task) await taskqueue.commit() task.status = 'cancelled' await task.commit() logger.info('Waiting task cancelled') return if task.status == 'waiting': if taskqueue.running_task and taskqueue.running_task == task and str( endpoint.pk) in TASK_PER_ENDPOINT: logger.critical('Waiting task to run') for i in range(20): await task.reload() if task.status == 'running': break await asyncio.sleep(0.1) else: logger.error('Waiting task to run timeouted out') del taskqueue.running_task await taskqueue.commit() task.status = 'cancelled' await task.commit() else: if taskqueue.running_task and taskqueue.running_task == task: del taskqueue.running_task await taskqueue.commit() taskqueue.tasks.remove(task) await taskqueue.commit() task.status = 'cancelled' await task.commit() logger.info('Waiting task cancelled without process running') return if task.status == 'running': if str(endpoint.pk) in TASK_PER_ENDPOINT: if str(task.pk) in ROBOT_PROCESSES: del taskqueue.running_task await taskqueue.commit() task.status = 'cancelled' await task.commit() ROBOT_PROCESSES[str(task.pk)].terminate() # del ROBOT_PROCESSES[task.pk] # will be done in the task loop when robot process exits logger.info('Running task cancelled with process running') return else: logger.error( 'Task process not found when cancelling task (%s)' % task_id) del taskqueue.running_task await taskqueue.commit() task.status = 'cancelled' await task.commit() logger.info('Running task cancelled without process running')
async def install_test_suite(package, user, organization, team, pypi_root, proprietary, version=None, installed=None, recursive=False): first_package = len(installed) == 0 pkg_file = await package.get_package_by_version(version) if not pkg_file: logger.error( f'package file not found for {package.name} with version {version}' ) return False pkg_file_path = pypi_root / package.package_name / pkg_file.filename if package in installed: return True requires = await get_package_requires(str(pkg_file_path), organization, team, 'Test Suite', installed) if requires: for pkg, ver in requires: ret = await install_test_suite(pkg, user, organization, team, pypi_root, proprietary, version=ver, installed=installed) if not ret: logger.error( f'Failed to install dependent package {package.name}') return False # always install the first package if not recursively install if not recursive and not first_package: pkg_file.modify(inc__download_times=1) return True scripts_root = await get_user_scripts_root(organization=organization, team=team) libraries_root = await get_back_scripts_root(organization=organization, team=team) async with ZipFile(pkg_file_path) as zf: for f in zf.namelist(): if f.startswith('EGG-INFO'): continue dirname = os.path.dirname(f) if await async_exists(scripts_root / dirname): await async_rmtree(scripts_root / dirname) if await async_exists(libraries_root / dirname): await async_rmtree(libraries_root / dirname) async with ZipFile(pkg_file_path) as zf: libraries = (f for f in zf.namelist() if not f.startswith('EGG-INFO') and '/scripts/' not in f) for l in libraries: await zf.extract(l, libraries_root) scripts = [f for f in zf.namelist() if '/scripts/' in f] for s in scripts: await zf.extract(s, scripts_root) new_tests = [] all_tests = [] for pkg_name in set((s.split('/', 1)[0] for s in scripts)): for f in await async_listdir(scripts_root / pkg_name / 'scripts'): await async_move(str(scripts_root / pkg_name / 'scripts' / f), scripts_root / pkg_name) test = await db_update_test(scripts_root, os.path.join(pkg_name, f), user, organization, team, package, version) if test: new_tests.append(test) await async_rmtree(scripts_root / pkg_name / 'scripts') async for test in Test.find({ 'path': pkg_name, 'organization': organization.pk, 'team': team.pk if team else None }): all_tests.append(test) tests = set(all_tests) - set(new_tests) for test in tests: logger.critical(f'Remove the staled test suite: {test.test_suite}') await test.delete() await package.collection.find_one_and_update( {'_id': package.pk}, {'$inc': { 'download_times': 1 }}) return True
async def delete(self, request): data = request.json organization = request.ctx.organization team = request.ctx.team endpoint_uid = data.get('endpoint_uid', None) if endpoint_uid is None: return json( response_message(EINVAL, 'Field endpoint_uid is required'), 400) endpoint = await Endpoint.find_one({'uid': uuid.UUID(endpoint_uid)}) if endpoint is None: return json(response_message(EINVAL, 'Endpoint not found'), 404) if await TaskQueue.count_documents({ 'endpoint': endpoint.pk, 'organization': organization.pk, 'team': team.pk if team else None }) == 0: await endpoint.delete() logger.critical( 'Deleting the endpoint without any attached task queues') return json(response_message(SUCCESS)) taskqueues = await TaskQueue.find({ 'endpoint': endpoint.pk, 'organization': organization.pk, 'team': team.pk if team else None }).to_list(len(QUEUE_PRIORITY)) for taskqueue in taskqueues: taskqueue.to_delete = True await taskqueue.commit() await asyncio.gather(*[q.flush(cancelled=True) for q in taskqueues]) for q in taskqueues: if q.running_task: running_task = await q.running_task.fetch() message = { 'endpoint_uid': endpoint_uid, 'priority': q.priority, 'task_id': str(running_task.pk) } ret = await push_event(organization=organization, team=team, code=EVENT_CODE_CANCEL_TASK, message=message) if not ret: return json( response_message( EPERM, 'Pushing the event to event queue failed')) ret = await push_event(organization=organization, team=team, code=EVENT_CODE_START_TASK, message={ 'endpoint_uid': endpoint_uid, 'to_delete': True }) if not ret: return json( response_message(EPERM, 'Pushing the event to event queue failed')) return json(response_message(SUCCESS))
if not args.config: # Don't specify this as an argument default or else it will always be # included in the list. args.config = "config.ini" return args def interactive(argv=None): """ Execute the application CLI. :param argv: argument list to parse (sys.argv by default) """ args = _args(argv) logger.debug(args.warn) logger.debug("starting execution") config.read_file(open(args.config, 'rt')) # do not move, needs to be imported after config is set up from ci_hooks_app import server return server.github_app if __name__ == "__main__": try: status = main() except: logger.critical("shutting down due to fatal error") raise # print stack trace else: raise SystemExit(status)