示例#1
0
def get_log_files(job_id):
    """Get a dictionary of recognised logs for a given job.

    Scans the log directory of the given job looking for log
    files which match one of the patterns in the log_types
    dictionary.

    Returns a dictionary where the keys are the log types
    for which logs were found, and the values are lists of
    logs of that type.
    """

    log_files = {}
    log_dir = get_log_dir(job_id)
    if os.path.isdir(log_dir):
        for file in sorted(os.listdir(get_log_dir(job_id)), reverse=True):
            for (type_, pattern) in log_types.items():
                if pattern.match(file):
                    if file.endswith('.html'):
                        url = url_for('job_log_html', job_id=job_id, log=file)
                    else:
                        url = url_for('job_log_text', job_id=job_id, log=file)

                    if type_ in log_files:
                        log_files[type_].append(LogInfo(file, url, None))
                    else:
                        log_files[type_] = [LogInfo(file, url, None)]

    return log_files
示例#2
0
def get_log_files(job_id):
    """Get a dictionary of recognised logs for a given job.

    Scans the log directory of the given job looking for log
    files which match one of the patterns in the log_types
    dictionary.

    Returns a dictionary where the keys are the log types
    for which logs were found, and the values are lists of
    logs of that type.
    """

    log_files = {}
    log_dir = get_log_dir(job_id)
    if os.path.isdir(log_dir):
        for file in sorted(os.listdir(get_log_dir(job_id)), reverse=True):
            for (type_, pattern) in log_types.items():
                if pattern.match(file):
                    if file.endswith('.html'):
                        url = url_for('job_log_html', job_id=job_id, log=file)
                    else:
                        url = url_for('job_log_text', job_id=job_id, log=file)

                    if type_ in log_files:
                        log_files[type_].append(LogInfo(file, url))
                    else:
                        log_files[type_] = [LogInfo(file, url)]

    return log_files
示例#3
0
def prepare_job_list(db, page, **kwargs):
    # Generate query objects based on the parameters.
    (query, job_query) = job_search(**kwargs)

    # Identify number of jobs.
    count = db.find_jobs(count=True, **job_query)
    (number, page, pagination) = calculate_pagination(count, 24, page,
                                                      'job_list', query)

    # If no number in kwargs, add in default.
    if 'number' not in job_query:
        job_query['number'] = number

    jobs = []

    for job in db.find_jobs(outputs='%_64.png',
                            offset=(number * page),
                            **job_query):
        if job.outputs:
            preview = url_for('job_preview',
                              job_id=job.id,
                              preview=job.outputs[0])
        else:
            preview = None
        jobs.append({
            'url': url_for('job_info', job_id=job.id),
            'qaurl': url_for('job_qa', job_id=job.id),
            'id': job.id,
            'state': job.state,
            'tag': job.tag,
            'location': job.location,
            'preview': preview,
            'qa_state': job.qa_state
        })

    return {
        'title': 'Job List',
        'jobs': jobs,
        'locations': ('JAC', 'CADC'),
        'states': JSAProcState.STATE_ALL,
        'qa_states': JSAQAState.STATE_ALL,
        'tasks': db.get_tasks(),
        'number': number,
        'pagination': pagination,
        'obsqueries': ObsQueryDict,
        'query': query,
        'mode': query['mode'],
        'count': count,
    }
示例#4
0
    def job_add_note(job_id):
        message = request.form['message']
        username = request.authorization['username']

        try:
            # Add the note.
            prepare_add_note(db, job_id, message, username)

            # Redirect back to the job info page.
            flash('The note has been saved.')
            raise HTTPRedirect(url_for('job_info', job_id=job_id))

        except ErrorPage as err:
            return error_page_response(err)
示例#5
0
    def job_add_note(job_id):
        message = request.form['message']
        username = request.authorization['username']

        try:
            # Add the note.
            prepare_add_note(db, job_id, message, username)

            # Redirect back to the job info page.
            flash('The note has been saved.')
            raise HTTPRedirect(url_for('job_info', job_id=job_id))

        except ErrorPage as err:
            return error_page_response(err)
示例#6
0
def get_orac_log_files(job_id):
    """
    Get a dictionary of ORAC-DR (log.*) files for a job.

    Scans the log directory to get all log.* files.

    Skips them if they have a date stamp older than the last run of the system.

    """
    pattern = re.compile('log.*')
    log_dir = get_log_dir(job_id)

    log_files = []
    if os.path.isdir(log_dir):
        files = os.listdir(log_dir)
        for f in sorted(files):
            if pattern.match(f):
                mtime = time.ctime(os.path.getmtime(os.path.join(log_dir, f)))
                url = url_for('job_log_text', job_id=job_id, log=f)
                log_files.append(LogInfo(f, url, mtime))
    return log_files
示例#7
0
 def fop_summary_getres():
     userid = request.form['userid']
     semester = request.form['semester']
     raise HTTPRedirect(
         url_for('fop_summary', userid=userid, semester=semester))
示例#8
0
 def home_page():
     raise HTTPRedirect(url_for('task_summary'))
示例#9
0
 def fop_summary_getres():
     userid = request.form['userid']
     semester = request.form['semester']
     raise HTTPRedirect(url_for(
         'fop_summary', userid=userid, semester=semester))
示例#10
0
 def home_page():
     raise HTTPRedirect(url_for('task_summary'))
示例#11
0
def make_output_file_list(db, job_id, preview_filter=None):
    """Prepare output file lists for job information pages.
    """

    output_files = []
    previews1024 = []
    previews256 = []

    try:
        for i in sorted(db.get_output_files(job_id)):
            url = None
            mtype = None

            if i.endswith('.png'):
                url = url_for('job_preview', job_id=job_id, preview=i)

                if preview_filter is None or any((f in i for f in preview_filter)):
                    caption = i
                    caption = re.sub('^jcmt_', '', caption)
                    caption = re.sub('_(preview_)?\d+\.png', '', caption)

                    if '_256.png' in i:
                        previews256.append(PreviewInfo(url, caption))

                    if '_1024.png' in i:
                        previews1024.append(PreviewInfo(url, caption))

            elif i.endswith('.pdf'):
                url = url_for('job_preview_pdf', job_id=job_id, preview=i)

            elif i.endswith('.txt'):
                url = url_for('job_text_file', job_id=job_id, text_file=i)

            elif i.endswith('.fits'):
                url = 'file://{0}/{1}'.format(get_output_dir(job_id), i)

                if re.search('-cat[0-9]{6}', i):
                    mtype = 'table.load.fits'

                elif re.search('-moc[0-9]{6}', i):
                    # This should be "coverage.load.moc.fits" but neither GAIA
                    # nor Aladin appear to subscribe to that mtype yet.
                    # mtype = 'coverage.load.moc.fits'
                    mtype = 'image.load.fits'

                elif '_rsp_' in i:
                    # Prevent a broadcast button being shown for spectra
                    # for now.
                    mtype = None

                else:
                    mtype = 'image.load.fits'

                # Remove URL for types we can't broadcast.
                if mtype is None:
                    url = None

            output_files.append(FileInfo(i, url, mtype))

    except NoRowsError:
        pass

    return (output_files, previews1024, previews256)
示例#12
0
def prepare_job_info(db, job_id, query):
    # Fetch job information from the database.
    try:
        job = db.get_job(job_id)
    except NoRowsError:
        raise HTTPNotFound()

    # Convert the information to a dictionary so that we can augment it.
    info = job._asdict()
    if info['foreign_id'] is not None:
        if info['location'] == 'CADC':
            info['foreign_url'] = \
                'http://beta.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/' \
                'dp/recipe/{0}'.format(info['foreign_id'])
        else:
            info['foreign_url'] = None

    # Try to get tiles.
    try:
        tiles = db.get_tilelist(job_id)
    except NoRowsError:
        tiles = None
    if tiles == []:
        tiles = None

    # Try to get input files (if any)
    try:
        input_files = db.get_input_files(job_id)
    except NoRowsError:
        input_files = None

    # Try to get parent jobs (if any).
    # Dictionary with parent as key and filter as item.
    try:
        parents = db.get_parents(job_id)
        parents = dict(parents)
        parent_obs = OrderedDict()
        pjobs = list(parents.keys())
        pjobs.sort()
        for i in pjobs:
            parent_obs[i] = [o._asdict() for o in db.get_obs_info(i)]
    except NoRowsError:
        parents = None
        parent_obs = None

    # See if there are any child jobs.
    try:
        children = db.get_children(job_id)
    except NoRowsError:
        children = None

    (output_files, previews1024, previews256) = \
        make_output_file_list(db, job.id)

    obs_info = db.get_obs_info(job.id)

    if obs_info:
        obs_info = [o._asdict() for o in obs_info]

    else:
        obs_info = None

    # Logged entries in the database (newest first).
    log = db.get_logs(job_id)
    log.reverse()

    # Get the log files on disk (if any)
    log_files = get_log_files(job_id)

    # Get the ORAC-DR log.* files from the database
    try:
        orac_log_files = db.get_log_files(job_id)
        orac_log_files = [(i, url_for('job_log_text', job_id=job_id, log=i))
                          for i in orac_log_files]
    except NoRowsError:
        orac_log_files = None


    # Get notes.
    notes = db.get_notes(job_id)

    # If we know what the user's job query was (from the session information)
    # then set up pagination based on the previous and next job identifiers.
    if query is not None:

        (url_query, job_query) = job_search(**query)

        # Need to remove 'number' option from job_query to.
        pnquery = job_query.copy()
        if 'number' in pnquery:
            del(pnquery['number'])
        (prev, next) = db.job_prev_next(job_id, **pnquery)
        count = db.find_jobs(count=True, **job_query)
        pagination = Pagination(
            None,
            None if prev is None else url_for('job_info', job_id=prev),
            None if next is None else url_for('job_info', job_id=next),
            None,
            url_for('job_list', **url_query),
            count,)
    else:
        pagination = None

    return {
        'title': 'Job {}'.format(job_id),
        'info': info,
        'tiles': tiles,
        'log': log,
        'notes': notes,
        'input_files': input_files,
        'parents': parents,
        'children': children,
        'output_files': output_files,
        'log_files': log_files,
        'orac_log_files': orac_log_files,
        'previews': list(zip(previews256, previews1024)),
        'states': JSAProcState.STATE_ALL,
        'obsinfo': obs_info,
        'parent_obs': parent_obs,
        'pagination': pagination,
    }
示例#13
0
def prepare_job_qa_info(db, job_id, query):
    # Fetch job and qa information from the database.
    try:
        job = db.get_job(job_id)
    except NoRowsError:
        raise HTTPNotFound()

    # Convert the information to a dictionary so that we can augment it.
    info = job._asdict()
    if info['foreign_id'] is not None:
        if info['location'] == 'CADC':
            info['foreign_url'] = \
                'http://beta.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/' \
                'dp/recipe/{0}'.format(info['foreign_id'])
        else:
            info['foreign_url'] = None

    try:
        input_files = db.get_input_files(job_id)
    except NoRowsError:
        input_files = None

        # Try to get parent jobs (if any).
    # Dictionary with parent as key and filter as item.
    try:
        parents = db.get_parents(job_id)
        parents = dict(parents)
        parent_obs = OrderedDict()
        pjobs = list(parents.keys())
        pjobs.sort()
        for i in pjobs:
            obsinfo = db.get_obs_info(i)
            if obsinfo != []:
                obsinfo = [o._asdict() for o in db.get_obs_info(i)]
                qa_state = db.get_job(i).qa_state
                for o in obsinfo:
                    o['qa_state'] = qa_state

                parent_obs[i] = obsinfo
        if parent_obs.keys() == []:
            parent_obs = None
    except NoRowsError:
        parents = None
        parent_obs = None

    # See if there are any child jobs.
    try:
        children = db.get_children(job_id)
    except NoRowsError:
        children = None

    (output_files, previews1024, _) = \
        make_output_file_list(db, job.id)

    obs_info = db.get_obs_info(job.id)

    if obs_info:
        obs_info = [o._asdict() for o in obs_info]

    else:
        obs_info = None

    # Get the log files on disk (if any)
    log_files = get_log_files(job_id)

    # QA log (f any)
    qalog = db.get_qas(job_id)
    qalog.reverse()

    # If we know what the user's job query was (from the session information)
    # then set up pagination based on the previous and next job identifiers.
    if query is not None:
        (url_query, job_query) = job_search(**query)

        # prev_next query should not  contain kwarg 'number'.
        pnquery = job_query.copy()
        if 'number' in pnquery:
            pnquery.pop('number')

        (prev, next) = db.job_prev_next(job_id, **pnquery)
        count = db.find_jobs(count=True, **job_query)
        pagination = Pagination(
            None,
            None if prev is None else url_for('job_qa', job_id=prev),
            None if next is None else url_for('job_qa', job_id=next),
            None,
            url_for('job_list', **url_query),
            count,
        )
    else:
        pagination = None

    # In the case of task='*-cat' and there are no output preview
    # images, show the preview image from the 1st parent job.
    if '-cat' in info['task'] and previews1024 == []:
        (_, previews1024, _) = make_output_file_list(db, parents.keys()[0])
        nopreview = True
    else:
        nopreview = False

    # Get parent output .fits files.
    parent_fits = []
    if parents:
        for i in parents.keys():
            (parent_outputs, _, _) = make_output_file_list(db, i)
            # remove everything that isn't a .fits file from output list.
            [
                parent_fits.append(i) for i in parent_outputs
                if '.fits' in i.name
            ]

    return {
        'title': 'Job {}'.format(job_id),
        'info': info,
        'qalog': qalog,
        'output_files': output_files,
        'parents': parents,
        'children': children,
        'log_files': log_files,
        'previews': list(zip(previews1024, previews1024)),
        'states': JSAProcState.STATE_ALL,
        'obsinfo': obs_info,
        'parent_obs': parent_obs,
        'qa_states': JSAQAState.STATE_ALL,
        'pagination': pagination,
        'nopreview': nopreview,
        'parent_fits': parent_fits,
    }
示例#14
0
def make_output_file_list(db, job_id, preview_filter=None):
    """Prepare output file lists for job information pages.
    """

    output_files = []
    previews1024 = []
    previews256 = []

    try:
        for i in sorted(db.get_output_files(job_id)):
            url = None
            mtype = None

            if i.endswith('.png'):
                url = url_for('job_preview', job_id=job_id, preview=i)

                if preview_filter is None or any(
                    (f in i for f in preview_filter)):
                    caption = i
                    caption = re.sub('^jcmt_', '', caption)
                    caption = re.sub('_(preview_)?\d+\.png', '', caption)

                    if '_256.png' in i:
                        previews256.append(PreviewInfo(url, caption))

                    if '_1024.png' in i:
                        previews1024.append(PreviewInfo(url, caption))

            elif i.endswith('.pdf'):
                url = url_for('job_preview_pdf', job_id=job_id, preview=i)

            elif i.endswith('.txt'):
                url = url_for('job_text_file', job_id=job_id, text_file=i)

            elif i.endswith('.fits'):
                url = 'file://{0}/{1}'.format(get_output_dir(job_id), i)

                if re.search('-cat[0-9]{6}', i):
                    mtype = 'table.load.fits'

                elif re.search('-moc[0-9]{6}', i):
                    # This should be "coverage.load.moc.fits" but neither GAIA
                    # nor Aladin appear to subscribe to that mtype yet.
                    # mtype = 'coverage.load.moc.fits'
                    mtype = 'image.load.fits'

                elif '_rsp_' in i:
                    # Prevent a broadcast button being shown for spectra
                    # for now.
                    mtype = None

                else:
                    mtype = 'image.load.fits'

                # Remove URL for types we can't broadcast.
                if mtype is None:
                    url = None

            output_files.append(FileInfo(i, url, mtype))

    except NoRowsError:
        pass

    return (output_files, previews1024, previews256)
示例#15
0
def prepare_job_info(db, job_id, query):
    # Fetch job information from the database.
    try:
        job = db.get_job(job_id)
    except NoRowsError:
        raise HTTPNotFound()

    # Convert the information to a dictionary so that we can augment it.
    info = job._asdict()
    if info['foreign_id'] is not None:
        if info['location'] == 'CADC':
            info['foreign_url'] = \
                'http://beta.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/' \
                'dp/recipe/{0}'.format(info['foreign_id'])
        else:
            info['foreign_url'] = None

    # Try to get tiles.
    try:
        tiles = db.get_tilelist(job_id)
    except NoRowsError:
        tiles = None
    if tiles == []:
        tiles = None

    # Try to get input files (if any)
    try:
        input_files = db.get_input_files(job_id)
    except NoRowsError:
        input_files = None

    # Try to get parent jobs (if any).
    # Dictionary with parent as key and filter as item.
    try:
        parents = db.get_parents(job_id)
        parents = dict(parents)
        parent_obs = OrderedDict()
        pjobs = parents.keys()
        pjobs.sort()
        for i in pjobs:
            parent_obs[i] = [o._asdict() for o in db.get_obs_info(i)]
    except NoRowsError:
        parents = None
        parent_obs = None

    # See if there are any child jobs.
    try:
        children = db.get_children(job_id)
    except NoRowsError:
        children = None

    (output_files, previews1024, previews256) = \
        make_output_file_list(db, job.id)

    obs_info = db.get_obs_info(job.id)

    if obs_info:
        obs_info = [o._asdict() for o in obs_info]

    else:
        obs_info = None

    # Logged entries in the database (newest first).
    log = db.get_logs(job_id)
    log.reverse()

    # Get the log files on disk (if any)
    log_files = get_log_files(job_id)

    # Get notes.
    notes = db.get_notes(job_id)

    # If we know what the user's job query was (from the session information)
    # then set up pagination based on the previous and next job identifiers.
    if query is not None:

        (url_query, job_query) = job_search(**query)

        # Need to remove 'number' option from job_query to.
        pnquery = job_query.copy()
        if 'number' in pnquery:
            del(pnquery['number'])
        (prev, next) = db.job_prev_next(job_id, **pnquery)
        count = db.find_jobs(count=True, **job_query)
        pagination = Pagination(
            None,
            None if prev is None else url_for('job_info', job_id=prev),
            None if next is None else url_for('job_info', job_id=next),
            None,
            url_for('job_list', **url_query),
            count,)
    else:
        pagination = None

    return {
        'title': 'Job {}'.format(job_id),
        'info': info,
        'tiles': tiles,
        'log': log,
        'notes': notes,
        'input_files': input_files,
        'parents': parents,
        'children': children,
        'output_files': output_files,
        'log_files': log_files,
        'previews': zip(previews256, previews1024),
        'states': JSAProcState.STATE_ALL,
        'obsinfo': obs_info,
        'parent_obs': parent_obs,
        'pagination': pagination,
    }