def jobLog(id): """ Returns job stdout & stderr :param id: Job id :return: json data """ job = jobs_.get(id=id) extractLog(id) locdir = '/%s/.sys/%s' % (getScope(job.owner.username), job.container.guid) absdir = ddm_getlocalabspath(locdir) fout = find('payload_stdout.txt', absdir) ferr = find('payload_stderr.txt', absdir) out = '' err = '' if len(fout) > 0: with open(fout[0]) as f: out = f.read() if len(ferr) > 0: with open(ferr[0]) as f: err = f.read() data = {} data['id'] = id data['out'] = out data['err'] = err return make_response(jsonify({'data': data}), 200)
def job_info(id): """ Job info view :param guid: guid of job :return: Response obj """ job = jobs_.get(id) container = job.container resend_form = JobResendForm() kill_form = JobKillForm() return render_template("dashboard/jobs/job.html", job=job, files=container.files, ftp=current_app.config['FTP'], resend_form=resend_form, kill_form=kill_form)
def job_resend(): """ Initiate job resend :return: Response obj """ form = JobResendForm() if request.method == 'POST': id_ = int(form.id_.data) job = jobs_.get(id_) pandaid = job.pandaid return redirect(url_for('jobs.jobs')) return make_response(jsonify({'status': 'Page not found'}), 404)
def extractOutputs(id): """ Finds local output archives and extracts it :param id: Job id :return: """ job = jobs_.get(id) files = job.container.files for f in files: if f.type == 'output': replicas = f.replicas for r in replicas: if r.se == current_app.config['DEFAULT_SE'] and r.status == 'ready' and r.lfn.endswith('.tgz'): ddm_localextractfile(r.lfn)
def job_kill(): """ Initiate job kill :return: Response obj """ form = JobKillForm() if request.method == 'POST': id_ = int(form.id_.data) job = jobs_.get(id=id_) pandaid = job.pandaid if pandaid is not None: out = async_kill_job(pandaid) return make_response(jsonify({'data': out}), 200) return redirect(url_for('jobs.jobs')) return make_response(jsonify({'status': 'Page not found'}), 404)
def extractOutputs(id): """ Finds local output archives and extracts it :param id: Job id :return: """ job = jobs_.get(id) files = job.container.files for f in files: if f.type == 'output': replicas = f.replicas for r in replicas: if r.se == current_app.config[ 'DEFAULT_SE'] and r.status == 'ready' and r.lfn.endswith( '.tgz'): ddm_localextractfile(r.lfn)
def jobLogAPI(id): """Returns job stdout & stderr""" g.user = request.oauth.user job = jobs_.get(id) extractLog(id) locdir = '/%s/.sys/%s' % (getScope(job.owner.username), job.container.guid) absdir = ddm_getlocalabspath(locdir) fout = find('payload.stdout', absdir) ferr = find('payload.stderr', absdir) out = '' err = '' if len(fout) > 0: with open(fout[0]) as f: out = f.read() if len(ferr) > 0: with open(ferr[0]) as f: err = f.read() data = dict() data['id'] = id data['out'] = out data['err'] = err return data
def send_job(jobid, siteid): _logger.debug('Jobid: ' + str(jobid)) site = sites_.get(siteid) job = jobs_.get(int(jobid)) cont = job.container files_catalog = cont.files fscope = getScope(job.owner.username) datasetName = '{}:{}'.format(fscope, cont.guid) distributive = job.distr.name release = job.distr.release # Prepare runScript parameters = job.distr.command parameters = parameters.replace("$COMMAND$", job.params) parameters = parameters.replace("$USERNAME$", job.owner.username) parameters = parameters.replace("$WORKINGGROUP$", job.owner.working_group) # Prepare metadata metadata = dict(user=job.owner.username) # Prepare PanDA Object pandajob = JobSpec() pandajob.jobDefinitionID = int(time.time()) % 10000 pandajob.jobName = cont.guid pandajob.transformation = client_config.DEFAULT_TRF pandajob.destinationDBlock = datasetName pandajob.destinationSE = site.se pandajob.currentPriority = 1000 pandajob.prodSourceLabel = 'user' pandajob.computingSite = site.ce pandajob.cloud = 'RU' pandajob.VO = 'atlas' pandajob.prodDBlock = "%s:%s" % (fscope, pandajob.jobName) pandajob.coreCount = job.corecount pandajob.metadata = json.dumps(metadata) #pandajob.workingGroup = job.owner.working_group if site.encode_commands: # It requires script wrapper on cluster side pandajob.jobParameters = '%s %s %s "%s"' % (cont.guid, release, distributive, parameters) else: pandajob.jobParameters = parameters has_input = False for fcc in files_catalog: if fcc.type == 'input': f = fcc.file guid = f.guid fileIT = FileSpec() fileIT.lfn = f.lfn fileIT.dataset = pandajob.prodDBlock fileIT.prodDBlock = pandajob.prodDBlock fileIT.type = 'input' fileIT.scope = fscope fileIT.status = 'ready' fileIT.GUID = guid pandajob.addFile(fileIT) has_input = True if fcc.type == 'output': f = fcc.file fileOT = FileSpec() fileOT.lfn = f.lfn fileOT.destinationDBlock = pandajob.prodDBlock fileOT.destinationSE = pandajob.destinationSE fileOT.dataset = pandajob.prodDBlock fileOT.type = 'output' fileOT.scope = fscope fileOT.GUID = f.guid pandajob.addFile(fileOT) # Save replica meta fc.new_replica(f, site) if not has_input: # Add fake input fileIT = FileSpec() fileIT.lfn = "fake.input" fileIT.dataset = pandajob.prodDBlock fileIT.prodDBlock = pandajob.prodDBlock fileIT.type = 'input' fileIT.scope = fscope fileIT.status = 'ready' fileIT.GUID = "fake.guid" pandajob.addFile(fileIT) # Prepare lof file fileOL = FileSpec() fileOL.lfn = "%s.log.tgz" % pandajob.jobName fileOL.destinationDBlock = pandajob.destinationDBlock fileOL.destinationSE = pandajob.destinationSE fileOL.dataset = '{}:logs'.format(fscope) fileOL.type = 'log' fileOL.scope = 'panda' pandajob.addFile(fileOL) # Save log meta log = File() log.scope = fscope log.lfn = fileOL.lfn log.guid = getGUID(log.scope, log.lfn) log.type = 'log' log.status = 'defined' files_.save(log) # Save replica meta fc.new_replica(log, site) # Register file in container fc.reg_file_in_cont(log, cont, 'log') # Submit job o = submitJobs([pandajob]) x = o[0] try: #update PandaID PandaID = int(x[0]) job.pandaid = PandaID job.ce = site.ce except: job.status = 'submit_error' jobs_.save(job) return 0
def show(id_): return jobs_.get(id_)