Exemple #1
0
def start(config_file,
          shock_url=None,
          mongo_host=None,
          mongo_port=None,
          rabbit_host=None,
          rabbit_port=None,
          deploy_config=None):

    logger.info("============================================")
    logger.info("  Starting Assembly Service Control Server")
    logger.info("============================================")

    # Read config file
    cparser = SafeConfigParser()
    if deploy_config:
        cparser.read(deploy_config)
        logger.info("[.] Found Deployment Config: {}".format(deploy_config))
    else:
        cparser.read(config_file)

    if not shock_url:
        shock_host = cparser.get('shock', 'host')
        shock_url = shock.verify_shock_url(shock_host)
    if not mongo_host:
        mongo_host = cparser.get('assembly', 'mongo_host')
    if not mongo_port:
        mongo_port = int(cparser.get('assembly', 'mongo_port'))
    if not rabbit_host:
        rabbit_host = cparser.get('assembly', 'rabbitmq_host')
    if not rabbit_port:
        rabbit_port = int(cparser.get('assembly', 'rabbitmq_port'))

    logger.info("[.] Shock URL: {}".format(shock_url))
    logger.info("[.] MongoDB host: {}".format(mongo_host))
    logger.info("[.] MongoDB port: {}".format(mongo_port))
    logger.info("[.] RabbitMQ host: {}".format(rabbit_host))
    logger.info("[.] RabbitMQ port: {}".format(rabbit_port))

    # Check MongoDB status
    try:
        connection = pymongo.mongo_client.MongoClient(mongo_host, mongo_port)
        logging.debug("MongoDB Info: %s" % connection.server_info())
    except pymongo.errors.PyMongoError as e:
        logger.error("MongoDB connection error: {}".format(e))
        sys.exit('MongoDB error: {}'.format(e))
    logger.info("[.] MongoDB connection successful.")

    router_kwargs = {
        'shock_url': shock_url,
        'mongo_host': mongo_host,
        'mongo_port': mongo_port,
        'rabbit_host': rabbit_host,
        'rabbit_port': rabbit_port
    }

    router_process = multiprocessing.Process(name='router',
                                             target=start_router,
                                             args=(config_file, ),
                                             kwargs=router_kwargs)
    router_process.start()
Exemple #2
0
def start(config_file, shock_url=None, mongo_host=None, mongo_port=None,
          rabbit_host=None, rabbit_port=None, deploy_config=None):

    logger.info("============================================")
    logger.info("  Starting Assembly Service Control Server")
    logger.info("============================================")

    # Read config file
    cparser = SafeConfigParser()
    if deploy_config:
        cparser.read(deploy_config)
        logger.info("[.] Found Deployment Config: {}".format(deploy_config))
    else:
        cparser.read(config_file)

    if not shock_url:
        shock_host = cparser.get('shock', 'host')
        shock_url = shock.verify_shock_url(shock_host)
    if not mongo_host:
        mongo_host = cparser.get('assembly', 'mongo_host')
    if not mongo_port:
        mongo_port = int(cparser.get('assembly', 'mongo_port'))
    if not rabbit_host:
        rabbit_host = cparser.get('assembly', 'rabbitmq_host')
    if not rabbit_port:
        rabbit_port = int(cparser.get('assembly', 'rabbitmq_port'))

    logger.info("[.] Shock URL: {}".format(shock_url))
    logger.info("[.] MongoDB host: {}".format(mongo_host))
    logger.info("[.] MongoDB port: {}".format(mongo_port))
    logger.info("[.] RabbitMQ host: {}".format(rabbit_host))
    logger.info("[.] RabbitMQ port: {}".format(rabbit_port))

    # Check MongoDB status
    try:
        connection = pymongo.mongo_client.MongoClient(mongo_host, mongo_port)
        logging.debug("MongoDB Info: %s" % connection.server_info())
    except pymongo.errors.PyMongoError as e:
        logger.error("MongoDB connection error: {}".format(e))
        sys.exit('MongoDB error: {}'.format(e))
    logger.info("[.] MongoDB connection successful.")

    router_kwargs = {'shock_url': shock_url,
                     'mongo_host': mongo_host, 'mongo_port': mongo_port,
                     'rabbit_host' :rabbit_host, 'rabbit_port' : rabbit_port}

    router_process = multiprocessing.Process(name='router', target=start_router,
                                             args=(config_file,), kwargs=router_kwargs)
    router_process.start()
Exemple #3
0
    def compute(self, body):
        self.job_list_lock.acquire()
        try:
            job_data = self.prepare_job_data(body)
            self.job_list.append(job_data)
        except:
            logger.error("Error in adding new job to job_list")
            raise
        finally:
            self.job_list_lock.release()

        status = ''
        logger.debug('job_data = {}'.format(job_data))

        params = json.loads(body)
        job_id = params['job_id']
        data_id = params['data_id']
        uid = params['_id']
        user = params['ARASTUSER']
        token = params['oauth_token']
        pipelines = params.get('pipeline')
        recipe = params.get('recipe')
        wasp_in = params.get('wasp')
        jobpath = os.path.join(self.datapath, user, str(data_id), str(job_id))

        url = shock.verify_shock_url(self.shockurl)

        self.start_time = time.time()

        timer_thread = UpdateTimer(self.metadata, 29, time.time(), uid, self.done_flag)
        timer_thread.start()

        #### Parse pipeline to wasp exp
        reload(recipes)
        if recipe:
            try: wasp_exp = recipes.get(recipe[0], job_id)
            except AttributeError: raise Exception('"{}" recipe not found.'.format(recipe[0]))
        elif wasp_in:
            wasp_exp = wasp_in[0]
        elif not pipelines:
            wasp_exp = recipes.get('auto', job_id)
        elif pipelines:
            ## Legacy client
            if pipelines[0] == 'auto':
                wasp_exp = recipes.get('auto', job_id)
            ##########
            else:
                if type(pipelines[0]) is not list: # --assemblers
                    pipelines = [pipelines]
                all_pipes = []
                for p in pipelines:
                    all_pipes += self.pmanager.parse_input(p)
                logger.debug("pipelines = {}".format(all_pipes))
                wasp_exp = wasp.pipelines_to_exp(all_pipes, params['job_id'])
        else:
            raise asmtypes.ArastClientRequestError('Malformed job request.')
        logger.debug('Wasp Expression: {}'.format(wasp_exp))
        w_engine = wasp.WaspEngine(self.pmanager, job_data, self.metadata)

        ###### Run Job
        try:
            w_engine.run_expression(wasp_exp, job_data)
            ###### Upload all result files and place them into appropriate tags
            uploaded_fsets = job_data.upload_results(url, token)

            # Format report
            new_report = open('{}.tmp'.format(self.out_report_name), 'w')

            ### Log errors
            if len(job_data['errors']) > 0:
                new_report.write('PIPELINE ERRORS\n')
                for i,e in enumerate(job_data['errors']):
                    new_report.write('{}: {}\n'.format(i, e))
            try: ## Get Quast output
                quast_report = job_data['wasp_chain'].find_module('quast')['data'].find_type('report')[0].files[0]
                with open(quast_report) as q:
                    new_report.write(q.read())
            except:
                new_report.write('No Summary File Generated!\n\n\n')
            self.out_report.close()
            with open(self.out_report_name) as old:
                new_report.write(old.read())

            for log in job_data['logfiles']:
                new_report.write('\n{1} {0} {1}\n'.format(os.path.basename(log), '='*20))
                with open(log) as l:
                    new_report.write(l.read())

            ### Log tracebacks
            if len(job_data['tracebacks']) > 0:
                new_report.write('EXCEPTION TRACEBACKS\n')
                for i,e in enumerate(job_data['tracebacks']):
                    new_report.write('{}: {}\n'.format(i, e))

            new_report.close()
            os.remove(self.out_report_name)
            shutil.move(new_report.name, self.out_report_name)
            res = self.upload(url, user, token, self.out_report_name)
            report_info = asmtypes.FileInfo(self.out_report_name, shock_url=url, shock_id=res['data']['id'])

            self.metadata.update_job(uid, 'report', [asmtypes.set_factory('report', [report_info])])
            status = 'Complete with errors' if job_data.get('errors') else 'Complete'

            ## Make compatible with JSON dumps()
            del job_data['out_report']
            del job_data['initial_reads']
            del job_data['raw_reads']
            self.metadata.update_job(uid, 'data', job_data)
            self.metadata.update_job(uid, 'result_data', uploaded_fsets)
            ###### Legacy Support #######
            filesets = uploaded_fsets.append(asmtypes.set_factory('report', [report_info]))
            contigsets = [fset for fset in uploaded_fsets if fset.type == 'contigs' or fset.type == 'scaffolds']
            download_ids = {fi['filename']: fi['shock_id'] for fset in uploaded_fsets for fi in fset['file_infos']}
            contig_ids = {fi['filename']: fi['shock_id'] for fset in contigsets for fi in fset['file_infos']}
            self.metadata.update_job(uid, 'result_data_legacy', [download_ids])
            self.metadata.update_job(uid, 'contig_ids', [contig_ids])
            ###################

            sys.stdout.flush()
            touch(os.path.join(jobpath, "_DONE_"))
            logger.info('============== JOB COMPLETE ===============')

        except asmtypes.ArastUserInterrupt:
            status = 'Terminated by user'
            sys.stdout.flush()
            touch(os.path.join(jobpath, "_CANCELLED__"))
            logger.info('============== JOB KILLED ===============')

        finally:
            self.remove_job_from_lists(job_data)
            logger.debug('Reinitialize plugin manager...') # Reinitialize to get live changes
            self.pmanager = ModuleManager(self.threads, self.kill_list, self.kill_list_lock, self.job_list, self.binpath, self.modulebin)

        self.metadata.update_job(uid, 'status', status)
Exemple #4
0
    def compute(self, body):
        self.job_list_lock.acquire()
        try:
            job_data = self.prepare_job_data(body)
            self.job_list.append(job_data)
        except:
            logger.error("Error in adding new job to job_list")
            raise
        finally:
            self.job_list_lock.release()

        status = ''
        logger.debug('job_data = {}'.format(job_data))

        params = json.loads(body)
        job_id = params['job_id']
        data_id = params['data_id']
        uid = params['_id']
        user = params['ARASTUSER']
        token = params['oauth_token']
        pipelines = params.get('pipeline')
        recipe = params.get('recipe')
        wasp_in = params.get('wasp')
        jobpath = os.path.join(self.datapath, user, str(data_id), str(job_id))

        url = shock.verify_shock_url(self.shockurl)

        self.start_time = time.time()

        timer_thread = UpdateTimer(self.metadata, 29, time.time(), uid,
                                   self.done_flag)
        timer_thread.start()

        #### Parse pipeline to wasp exp
        reload(recipes)
        if recipe:
            try:
                wasp_exp = recipes.get(recipe[0], job_id)
            except AttributeError:
                raise Exception('"{}" recipe not found.'.format(recipe[0]))
        elif wasp_in:
            wasp_exp = wasp_in[0]
        elif not pipelines:
            wasp_exp = recipes.get('auto', job_id)
        elif pipelines:
            ## Legacy client
            if pipelines[0] == 'auto':
                wasp_exp = recipes.get('auto', job_id)
            ##########
            else:
                if type(pipelines[0]) is not list:  # --assemblers
                    pipelines = [pipelines]
                all_pipes = []
                for p in pipelines:
                    all_pipes += self.pmanager.parse_input(p)
                logger.debug("pipelines = {}".format(all_pipes))
                wasp_exp = wasp.pipelines_to_exp(all_pipes, params['job_id'])
        else:
            raise asmtypes.ArastClientRequestError('Malformed job request.')
        logger.debug('Wasp Expression: {}'.format(wasp_exp))
        w_engine = wasp.WaspEngine(self.pmanager, job_data, self.metadata)

        ###### Run Job
        try:
            w_engine.run_expression(wasp_exp, job_data)
            ###### Upload all result files and place them into appropriate tags
            uploaded_fsets = job_data.upload_results(url, token)

            # Format report
            new_report = open('{}.tmp'.format(self.out_report_name), 'w')

            ### Log errors
            if len(job_data['errors']) > 0:
                new_report.write('PIPELINE ERRORS\n')
                for i, e in enumerate(job_data['errors']):
                    new_report.write('{}: {}\n'.format(i, e))
            try:  ## Get Quast output
                quast_report = job_data['wasp_chain'].find_module(
                    'quast')['data'].find_type('report')[0].files[0]
                with open(quast_report) as q:
                    new_report.write(q.read())
            except:
                new_report.write('No Summary File Generated!\n\n\n')
            self.out_report.close()
            with open(self.out_report_name) as old:
                new_report.write(old.read())

            for log in job_data['logfiles']:
                new_report.write('\n{1} {0} {1}\n'.format(
                    os.path.basename(log), '=' * 20))
                with open(log) as l:
                    new_report.write(l.read())

            ### Log tracebacks
            if len(job_data['tracebacks']) > 0:
                new_report.write('EXCEPTION TRACEBACKS\n')
                for i, e in enumerate(job_data['tracebacks']):
                    new_report.write('{}: {}\n'.format(i, e))

            new_report.close()
            os.remove(self.out_report_name)
            shutil.move(new_report.name, self.out_report_name)
            res = self.upload(url, user, token, self.out_report_name)
            report_info = asmtypes.FileInfo(self.out_report_name,
                                            shock_url=url,
                                            shock_id=res['data']['id'])

            self.metadata.update_job(
                uid, 'report', [asmtypes.set_factory('report', [report_info])])
            status = 'Complete with errors' if job_data.get(
                'errors') else 'Complete'

            ## Make compatible with JSON dumps()
            del job_data['out_report']
            del job_data['initial_reads']
            del job_data['raw_reads']
            self.metadata.update_job(uid, 'data', job_data)
            self.metadata.update_job(uid, 'result_data', uploaded_fsets)
            ###### Legacy Support #######
            filesets = uploaded_fsets.append(
                asmtypes.set_factory('report', [report_info]))
            contigsets = [
                fset for fset in uploaded_fsets
                if fset.type == 'contigs' or fset.type == 'scaffolds'
            ]
            download_ids = {
                fi['filename']: fi['shock_id']
                for fset in uploaded_fsets for fi in fset['file_infos']
            }
            contig_ids = {
                fi['filename']: fi['shock_id']
                for fset in contigsets for fi in fset['file_infos']
            }
            self.metadata.update_job(uid, 'result_data_legacy', [download_ids])
            self.metadata.update_job(uid, 'contig_ids', [contig_ids])
            ###################

            sys.stdout.flush()
            touch(os.path.join(jobpath, "_DONE_"))
            logger.info('============== JOB COMPLETE ===============')

        except asmtypes.ArastUserInterrupt:
            status = 'Terminated by user'
            sys.stdout.flush()
            touch(os.path.join(jobpath, "_CANCELLED__"))
            logger.info('============== JOB KILLED ===============')

        finally:
            self.remove_job_from_lists(job_data)
            logger.debug('Reinitialize plugin manager...'
                         )  # Reinitialize to get live changes
            self.pmanager = ModuleManager(self.threads, self.kill_list,
                                          self.kill_list_lock, self.job_list,
                                          self.binpath, self.modulebin)

        self.metadata.update_job(uid, 'status', status)
Exemple #5
0
def start(config_file, shock_url=None,
          mongo_host=None, mongo_port=None,
          rabbit_host=None, rabbit_port=None):

    global parser, metadata, rjobmon
    # logging.basicConfig(level=logging.DEBUG)

    parser = SafeConfigParser()
    parser.read(config_file)
    collections = {'jobs': parser.get('meta', 'mongo.collection'),
                   'auth': parser.get('meta', 'mongo.collection.auth'),
                   'data': parser.get('meta', 'mongo.collection.data'),
                   'running': parser.get('meta', 'mongo.collection.running')}

    # Config precedence: args > config file

    if shock_url:
        parser.set('shock', 'host', shock.verify_shock_url(shock_url))
    if mongo_host:
        parser.set('assembly', 'mongo_host', mongo_host)
    if mongo_port:
        parser.set('assembly', 'mongo_port', str(mongo_port))
    if rabbit_host:
        parser.set('assembly', 'rabbitmq_host', rabbit_host)
    if rabbit_port:
        parser.set('assembly', 'rabbitmq_port', str(rabbit_port))

    metadata = meta.MetadataConnection(parser.get('assembly', 'mongo_host'),
                                       int(parser.get('assembly', 'mongo_port')),
                                       parser.get('meta', 'mongo.db'),
                                       collections)

    ##### Running Job Monitor #####
    rjobmon = RunningJobsMonitor(metadata)
    cherrypy.process.plugins.Monitor(cherrypy.engine, rjobmon.purge,
                                     frequency=int(parser.get('monitor', 'running_job_freq'))).subscribe()

    ##### CherryPy ######
    conf = {
        'global': {
            'server.socket_host': '0.0.0.0',
            'server.socket_port': int(parser.get('assembly', 'cherrypy_port')),
            'log.screen': True,
            'ar_shock_url': parser.get('shock', 'host'),
            'environment': 'production'
            },
    }

    static_root = parser.get('web_serve', 'root')

    root = Root()
    root.user = UserResource()
    root.module = ModuleResource()
    root.recipe = RecipeResource()
    root.shock = ShockResource({"shockurl": get_upload_url()})
    root.static = StaticResource(static_root)

    #### Admin Routes ####
    rmq_host = parser.get('assembly', 'rabbitmq_host')
    rmq_mp = parser.get('rabbitmq', 'management_port')
    rmq_user = parser.get('rabbitmq', 'management_user')
    rmq_pass = parser.get('rabbitmq', 'management_pass')
    root.admin = SystemResource(rmq_host, rmq_mp, rmq_user, rmq_pass)

    cherrypy.request.hooks.attach('before_finalize', CORS)
    cherrypy.quickstart(root, '/', conf)
Exemple #6
0
def start(config_file, shock_url=None, mongo_host=None, mongo_port=None, rabbit_host=None, rabbit_port=None):

    global parser, metadata, rjobmon
    logging.basicConfig(level=logging.DEBUG)

    parser = SafeConfigParser()
    parser.read(config_file)
    collections = {
        "jobs": parser.get("meta", "mongo.collection"),
        "auth": parser.get("meta", "mongo.collection.auth"),
        "data": parser.get("meta", "mongo.collection.data"),
        "running": parser.get("meta", "mongo.collection.running"),
    }

    # Config precedence: args > config file

    if shock_url:
        parser.set("shock", "host", shock.verify_shock_url(shock_url))
    if mongo_host:
        parser.set("assembly", "mongo_host", mongo_host)
    if mongo_port:
        parser.set("assembly", "mongo_port", str(mongo_port))
    if rabbit_host:
        parser.set("assembly", "rabbitmq_host", rabbit_host)
    if rabbit_port:
        parser.set("assembly", "rabbitmq_port", str(rabbit_port))

    metadata = meta.MetadataConnection(
        parser.get("assembly", "mongo_host"),
        int(parser.get("assembly", "mongo_port")),
        parser.get("meta", "mongo.db"),
        collections,
    )

    ##### Running Job Monitor #####
    rjobmon = RunningJobsMonitor(metadata)
    cherrypy.process.plugins.Monitor(
        cherrypy.engine, rjobmon.purge, frequency=int(parser.get("monitor", "running_job_freq"))
    ).subscribe()

    ##### CherryPy ######
    conf = {
        "global": {
            "server.socket_host": "0.0.0.0",
            "server.socket_port": int(parser.get("assembly", "cherrypy_port")),
            "log.screen": True,
            "ar_shock_url": parser.get("shock", "host"),
        }
    }

    static_root = parser.get("web_serve", "root")

    root = Root()
    root.user = UserResource()
    root.module = ModuleResource()
    root.recipe = RecipeResource()
    root.shock = ShockResource({"shockurl": get_upload_url()})
    root.static = StaticResource(static_root)

    #### Admin Routes ####
    rmq_host = parser.get("assembly", "rabbitmq_host")
    rmq_mp = parser.get("rabbitmq", "management_port")
    rmq_user = parser.get("rabbitmq", "management_user")
    rmq_pass = parser.get("rabbitmq", "management_pass")
    root.admin = SystemResource(rmq_host, rmq_mp, rmq_user, rmq_pass)

    cherrypy.request.hooks.attach("before_finalize", CORS)
    cherrypy.quickstart(root, "/", conf)
Exemple #7
0
def start(config_file,
          shock_url=None,
          mongo_host=None,
          mongo_port=None,
          rabbit_host=None,
          rabbit_port=None):

    global parser, metadata, rjobmon
    # logging.basicConfig(level=logging.DEBUG)

    parser = SafeConfigParser()
    parser.read(config_file)
    collections = {
        'jobs': parser.get('meta', 'mongo.collection'),
        'auth': parser.get('meta', 'mongo.collection.auth'),
        'data': parser.get('meta', 'mongo.collection.data'),
        'running': parser.get('meta', 'mongo.collection.running')
    }

    # Config precedence: args > config file

    if shock_url:
        parser.set('shock', 'host', shock.verify_shock_url(shock_url))
    if mongo_host:
        parser.set('assembly', 'mongo_host', mongo_host)
    if mongo_port:
        parser.set('assembly', 'mongo_port', str(mongo_port))
    if rabbit_host:
        parser.set('assembly', 'rabbitmq_host', rabbit_host)
    if rabbit_port:
        parser.set('assembly', 'rabbitmq_port', str(rabbit_port))

    metadata = meta.MetadataConnection(
        parser.get('assembly', 'mongo_host'),
        int(parser.get('assembly', 'mongo_port')),
        parser.get('meta', 'mongo.db'), collections)

    ##### Running Job Monitor #####
    rjobmon = RunningJobsMonitor(metadata)
    cherrypy.process.plugins.Monitor(
        cherrypy.engine,
        rjobmon.purge,
        frequency=int(parser.get('monitor', 'running_job_freq'))).subscribe()

    ##### CherryPy ######
    conf = {
        'global': {
            'server.socket_host': '0.0.0.0',
            'server.socket_port': int(parser.get('assembly', 'cherrypy_port')),
            'log.screen': True,
            'ar_shock_url': parser.get('shock', 'host'),
            'environment': 'production'
        },
    }

    static_root = parser.get('web_serve', 'root')

    root = Root()
    root.user = UserResource()
    root.module = ModuleResource()
    root.recipe = RecipeResource()
    root.shock = ShockResource({"shockurl": get_upload_url()})
    root.static = StaticResource(static_root)

    #### Admin Routes ####
    rmq_host = parser.get('assembly', 'rabbitmq_host')
    rmq_mp = parser.get('rabbitmq', 'management_port')
    rmq_user = parser.get('rabbitmq', 'management_user')
    rmq_pass = parser.get('rabbitmq', 'management_pass')
    root.admin = SystemResource(rmq_host, rmq_mp, rmq_user, rmq_pass)

    cherrypy.request.hooks.attach('before_finalize', CORS)
    cherrypy.quickstart(root, '/', conf)