def xmlrpc_launchPlugins(self, result_pk, plugins, net_location, username, runlevel=RunLevel.DEFAULT, params={}): """ Launch multiple plugins with dependencies For multi-runlevel plugins the input 'plugins' is common for all runlevels """ msg = '' logger.debug("[launchPlugins] result %s requested plugins: %s" % (result_pk, ','.join(plugins.keys())) ) try: # get plugins to run for this runlevel plugins, plugins_to_run, satisfied_dependencies = get_plugins_to_run(plugins, result_pk, runlevel) if len(plugins_to_run) > 0: logger.debug("[launchPlugins] runlevel: %s, depsolved launch order: %s" % (runlevel, ','.join(plugins_to_run)) ) else: logger.debug("[launchPlugins] no plugins to run at runlevel: %s" % runlevel) return plugins, msg result = Results.objects.get(pk=result_pk) report_dir = result.get_report_dir() url_root = result.reportWebLink() # get pluginresult owner - must be a valid TS user try: user = User.objects.get(username=username) except User.DoesNotExist: user = User.objects.get(pk=1) logger.error("Invalid user specified for plugin launch: %s, will use %s" % (username, user.username) ) for name in plugins_to_run: try: p = plugins[name] # get params for this plugin, make empty json value if doesn't exist plugin_params = params.setdefault('plugins',{}).setdefault(name,{}) # Get pluginresult for multi-runlevel plugins or if specified to be reused by manual launch pr = None pluginresult_pk = p.get('pluginresult') or plugin_params.get('pluginresult') if pluginresult_pk: logger.debug("Searching for PluginResult: %s", pluginresult_pk) try: pr = result.pluginresult_set.get(pk=pluginresult_pk) except: logger.error("Failed to find pluginresult for plugin %s, result %s: %s" % (name, result.resultsName, pluginresult_pk) ) pr = None elif Feature.EXPORT in p.get('features',[]): # Export plugins rerun in place to enable resuming upload pr = result.pluginresult_set.filter(plugin=p['id']) if pr.count() > 0: pr = pr[0] pr.owner = user # Create new pluginresult - this is the most common path if not pr: pr = PluginResult.objects.create(result_id=result_pk, plugin_id=p['id'], owner=user) logger.debug("New pluginresult id=%s created for plugin %s and result %s." % (pr.pk, name, result.resultsName) ) # Always create new, unique output folder. # Never fallback to old *_out format. plugin_output = pr.path(create=True, fallback=False) else: # Use existing output folder plugin_output = pr.path(create=False) p['results_dir'] = plugin_output p['pluginresult'] = pr.pk p = add_hold_jid(p, plugins, runlevel) start_json = make_plugin_json(result_pk, report_dir, p, plugin_output, net_location, url_root, username, runlevel, params.get('blockId',''), params.get('block_dirs',["."]), plugin_params.get('instance_config',{}) ) # Pass on run_mode (launch source - manual/instance, pipeline) start_json['runplugin']['run_mode'] = params.get('run_mode', '') # add dependency info to startplugin json if p.get('depends') and isinstance(p['depends'],list): start_json['depends'] = {} for depends_name in p['depends']: if depends_name in satisfied_dependencies: start_json['depends'][depends_name] = satisfied_dependencies[depends_name] elif depends_name in plugins and plugins[depends_name].get('pluginresult'): start_json['depends'][depends_name] = { 'pluginresult': plugins[depends_name]['pluginresult'], 'version': plugins[depends_name].get('version',''), 'pluginresult_path': plugins[depends_name].get('results_dir') } # prepare for launch: updates config, sets pluginresults status, generates api key pr.prepare(config = start_json['pluginconfig']) pr.save() # update startplugin json with pluginresult info start_json['runinfo']['pluginresult'] = pr.pk start_json['runinfo']['api_key'] = pr.apikey # NOTE: Job is held on start, and subsequently released # to avoid any race condition on updating job queue status # launch plugin jid = SGEPluginJob(start_json, hold=True) if jid: # Update pluginresult status PluginResult.objects.filter(pk=pr.pk).update(state='Queued', jobid=jid) # Release now that jobid and queued state are set. _session.control(jid, drmaa.JobControlAction.RELEASE) # no return value msg += 'Launched plugin %s: jid %s, depends %s, hold_jid %s \n' % \ (p['name'], jid, p['depends'], p['hold_jid']) if runlevel != RunLevel.BLOCK: p['jid'] = jid else: p.setdefault('block_jid',[]).append(jid) except: logger.error(traceback.format_exc()) msg += 'ERROR: Plugin %s failed to launch.\n' % p['name'] pr = PluginResult.objects.get(pk=pr.pk) pr.complete('Error') pr.save() except: logger.error(traceback.format_exc()) msg += 'ERROR: Failed to launch requested plugins.' return plugins, msg
def launchPlugins( self, result_pk, plugins, net_location, username, runlevel=RunLevel.DEFAULT, params={}, ): """ Launch multiple plugins with dependencies For multi-runlevel plugins the input 'plugins' is common for all runlevels """ msg = "" logger.debug("[launchPlugins] result %s requested plugins: %s" % (result_pk, ",".join(list(plugins.keys())))) try: # get plugins to run for this runlevel plugins, plugins_to_run, satisfied_dependencies = get_plugins_to_run( plugins, result_pk, runlevel) if len(plugins_to_run) > 0: logger.debug( "[launchPlugins] runlevel: %s, depsolved launch order: %s" % (runlevel, ",".join(plugins_to_run))) else: logger.debug( "[launchPlugins] no plugins to run at runlevel: %s" % runlevel) return plugins, msg result = Results.objects.get(pk=result_pk) report_dir = result.get_report_dir() url_root = result.reportWebLink() # get pluginresult owner - must be a valid TS user try: user = User.objects.get(username=username) except User.DoesNotExist: user = User.objects.get(pk=1) logger.error( "Invalid user specified for plugin launch: %s, will use %s" % (username, user.username)) for name in plugins_to_run: try: p = plugins[name] # get params for this plugin, make empty json value if doesn't exist plugin_params = params.setdefault("plugins", {}).setdefault(name, {}) # Get pluginresult for multi-runlevel plugins or if specified to be reused by manual launch pr = None pluginresult_pk = p.get( "pluginresult") or plugin_params.get("pluginresult") if pluginresult_pk: logger.debug("Searching for PluginResult: %s", pluginresult_pk) try: pr = result.pluginresult_set.get( pk=pluginresult_pk) except Exception: logger.error( "Failed to find pluginresult for plugin %s, result %s: %s" % (name, result.resultsName, pluginresult_pk)) pr = None # Create new pluginresult - this is the most common path if not pr: pr = PluginResult.objects.create(result_id=result_pk, plugin_id=p["id"], owner=user) logger.debug( "New pluginresult id=%s created for plugin %s and result %s." % (pr.pk, name, result.resultsName)) # Always create new, unique output folder. # Never fallback to old *_out format. plugin_output = pr.path(create=True, fallback=False) else: # Use existing output folder plugin_output = pr.path(create=False) p["results_dir"] = plugin_output p["pluginresult"] = pr.pk p, holding_for = add_hold_jid(p, plugins, runlevel, satisfied_dependencies) start_json = make_plugin_json( result_pk, report_dir, p, plugin_output, net_location, url_root, username, runlevel, params.get("blockId", ""), params.get("block_dirs", ["."]), plugin_params.get("instance_config", {}), ) # Pass on run_mode (launch source - manual/instance, pipeline) run_mode = params.get("run_mode", "") start_json["runplugin"]["run_mode"] = run_mode # add dependency info to startplugin json if p.get("depends") and isinstance(p["depends"], list): start_json["depends"] = {} for depends_name in p["depends"]: if depends_name in satisfied_dependencies: start_json["depends"][ depends_name] = satisfied_dependencies[ depends_name] elif depends_name in plugins and plugins[ depends_name].get("pluginresult"): start_json["depends"][depends_name] = { "pluginresult": plugins[depends_name]["pluginresult"], "version": plugins[depends_name].get("version", ""), "pluginresult_path": plugins[depends_name].get("results_dir"), } # prepare for launch: updates config, sets pluginresults status, generates api key pr.prepare() # perform the validation of the plugin configuration here pr.validation_errors = { "validation_errors": Plugin.validate(p["id"], start_json["pluginconfig"], run_mode == "manual") } pr.save() if pr.validation_errors.get("validation_errors", list()): continue # update startplugin json with pluginresult info start_json["runinfo"]["pluginresult"] = pr.pk start_json["runinfo"]["api_key"] = pr.apikey # NOTE: Job is held on start, and subsequently released # to avoid any race condition on updating job queue status # launch plugin jid = SGEPluginJob(start_json, hold=True) if jid: # Update pluginresult status prj, created = PluginResultJob.objects.get_or_create( plugin_result=pr, run_level=runlevel, grid_engine_jobid=jid, state="Queued", config=start_json["pluginconfig"], ) prj.save() # Release now that jobid and queued state are set. _session.control( jid, drmaa.JobControlAction.RELEASE) # no return value msg += ( "Plugin: %s result: %s, jid %s, depends %s, holding for %s \n" % ( p.get("name", ""), result.resultsName, jid, p.get("depends", []), holding_for, )) if runlevel != RunLevel.BLOCK: p["jid"] = jid else: p.setdefault("block_jid", []).append(jid) except Exception as exc: logger.error(traceback.format_exc()) msg += "ERROR: Plugin %s failed to launch.\n" % p["name"] pr = PluginResult.objects.get(pk=pr.pk) except Exception: logger.error(traceback.format_exc()) msg += "ERROR: Failed to launch requested plugins." return plugins, msg