Exemplo n.º 1
0
def _open_logs(id_, no_detach):
    """Open Cylc log handlers for a flow run."""
    if not no_detach:
        while LOG.handlers:
            LOG.handlers[0].close()
            LOG.removeHandler(LOG.handlers[0])
    log_path = get_workflow_run_log_name(id_)
    LOG.addHandler(TimestampRotatingFileHandler(log_path, no_detach))
    # Add file installation log
    file_install_log_path = get_workflow_file_install_log_name(id_)
    RSYNC_LOG.addHandler(
        TimestampRotatingFileHandler(file_install_log_path, no_detach))
Exemplo n.º 2
0
def daemonize(schd):
    """Turn a cylc scheduler into a Unix daemon.

    Do the UNIX double-fork magic, see Stevens' "Advanced Programming in the
    UNIX Environment" for details (ISBN 0201563177)

    ATTRIBUTION: base on a public domain code recipe by Jurgen Hermann:
    http://code.activestate.com/recipes/66012-fork-a-daemon-process-on-unix/

    """
    logfname = get_workflow_run_log_name(schd.workflow)
    try:
        old_log_mtime = os.stat(logfname).st_mtime
    except OSError:
        old_log_mtime = None
    # fork 1
    try:
        pid = os.fork()
        if pid > 0:
            # Poll for workflow log to be populated
            workflow_pid = None
            workflow_url = None
            pub_url = None
            timeout = time() + _TIMEOUT
            while time() <= timeout and (workflow_pid is None
                                         or workflow_url is None
                                         or pub_url is None):
                sleep(0.1)
                try:
                    # First INFO line of workflow log should contain
                    # start up message, URL and PID. Format is:
                    #  LOG-PREFIX Workflow schd program: url=URL, pid=PID
                    # Otherwise, something is wrong, print the workflow log
                    # and exit with an error.
                    log_stat = os.stat(logfname)
                    if (log_stat.st_mtime == old_log_mtime
                            or log_stat.st_size == 0):
                        continue
                    for line in open(logfname):
                        if schd.START_MESSAGE_PREFIX in line:
                            workflow_url, workflow_pid = (item.rsplit(
                                "=", 1)[-1] for item in line.rsplit()[-2:])
                        if schd.START_PUB_MESSAGE_PREFIX in line:
                            pub_url = line.rsplit("=", 1)[-1].rstrip()
                        if workflow_url and pub_url:
                            break
                        elif ' ERROR -' in line or ' CRITICAL -' in line:
                            # ERROR and CRITICAL before workflow starts
                            try:
                                sys.stderr.write(open(logfname).read())
                                sys.exit(1)
                            except IOError:
                                sys.exit("Workflow schd program exited")
                except (IOError, OSError, ValueError):
                    pass
            if workflow_pid is None or workflow_url is None:
                sys.exit("Workflow not started after %ds" % _TIMEOUT)
            # Print workflow information
            info = {
                "workflow": schd.workflow,
                "host": schd.host,
                "url": workflow_url,
                "pub_url": pub_url,
                "ps_opts": PS_OPTS,
                "pid": workflow_pid
            }
            if schd.options.format == 'json':
                sys.stdout.write(json.dumps(info, indent=4))
            else:
                sys.stdout.write(WORKFLOW_INFO_TMPL % info)
            # exit parent 1
            sys.exit(0)
    except OSError as exc:
        sys.exit("fork #1 failed: %d (%s)\n" % (exc.errno, exc.strerror))

    # decouple from parent environment
    os.chdir("/")
    os.setsid()
    os.umask(0)

    # fork 2
    try:
        pid = os.fork()
        if pid > 0:
            # exit parent 2
            sys.exit(0)
    except OSError as exc:
        sys.exit("fork #2 failed: %d (%s)\n" % (exc.errno, exc.strerror))

    # reset umask, octal
    os.umask(0o22)

    # Redirect /dev/null to stdin.
    # Note that simply reassigning the sys streams is not sufficient
    # if we import modules that write to stdin and stdout from C
    # code - evidently the subprocess module is in this category!
    # TODO: close resource? atexit?
    dvnl = open(os.devnull, 'r')
    os.dup2(dvnl.fileno(), sys.stdin.fileno())
Exemplo n.º 3
0
def main(parser: COP,
         options: 'Values',
         reg: str,
         task_id: Optional[str] = None,
         color: bool = False) -> None:
    """Implement cylc cat-log CLI.

    Determine log path, user@host, batchview_cmd, and action (print, dir-list,
    cat, edit, or tail), and then if the log path is:
      a) local: perform action on log path, or
      b) remote: re-invoke cylc cat-log as a) on the remote account

    """
    if options.remote_args:
        # Invoked on job hosts for job logs only, as a wrapper to view_log().
        # Tail and batchview commands from global config on workflow host).
        logpath, mode, tail_tmpl = options.remote_args[0:3]
        logpath = expand_path(logpath)
        tail_tmpl = expand_path(tail_tmpl)
        try:
            batchview_cmd = options.remote_args[3]
        except IndexError:
            batchview_cmd = None
        res = view_log(logpath,
                       mode,
                       tail_tmpl,
                       batchview_cmd,
                       remote=True,
                       color=color)
        if res == 1:
            sys.exit(res)
        return

    workflow_name, _ = parse_reg(reg)
    # Get long-format mode.
    try:
        mode = MODES[options.mode]
    except KeyError:
        mode = options.mode

    if not task_id:
        # Cat workflow logs, local only.
        if options.filename is not None:
            raise UserInputError("The '-f' option is for job logs only.")

        logpath = get_workflow_run_log_name(workflow_name)
        if options.rotation_num:
            logs = glob('%s.*' % logpath)
            logs.sort(key=os.path.getmtime, reverse=True)
            try:
                logpath = logs[int(options.rotation_num)]
            except IndexError:
                raise UserInputError("max rotation %d" % (len(logs) - 1))
        tail_tmpl = os.path.expandvars(get_platform()["tail command template"])
        out = view_log(logpath, mode, tail_tmpl, color=color)
        if out == 1:
            sys.exit(1)
        if mode == 'edit':
            tmpfile_edit(out, options.geditor)
        return

    if task_id:
        # Cat task job logs, may be on workflow or job host.
        if options.rotation_num is not None:
            raise UserInputError("only workflow (not job) logs get rotated")
        try:
            task, point = TaskID.split(task_id)
        except ValueError:
            parser.error("Illegal task ID: %s" % task_id)
        if options.submit_num != NN:
            try:
                options.submit_num = "%02d" % int(options.submit_num)
            except ValueError:
                parser.error("Illegal submit number: %s" % options.submit_num)
        if options.filename is None:
            options.filename = JOB_LOG_OUT
        else:
            # Convert short filename args to long (e.g. 'o' to 'job.out').
            with suppress(KeyError):
                options.filename = JOB_LOG_OPTS[options.filename]
                # KeyError: Is already long form (standard log, or custom).
        platform_name, job_runner_name, live_job_id = get_task_job_attrs(
            workflow_name, point, task, options.submit_num)
        platform = get_platform(platform_name)
        batchview_cmd = None
        if live_job_id is not None:
            # Job is currently running. Get special job runner log view
            # command (e.g. qcat) if one exists, and the log is out or err.
            conf_key = None
            if options.filename == JOB_LOG_OUT:
                if mode == 'cat':
                    conf_key = "out viewer"
                elif mode == 'tail':
                    conf_key = "out tailer"
            elif options.filename == JOB_LOG_ERR:
                if mode == 'cat':
                    conf_key = "err viewer"
                elif mode == 'tail':
                    conf_key = "err tailer"
            if conf_key is not None:
                batchview_cmd_tmpl = None
                with suppress(KeyError):
                    batchview_cmd_tmpl = platform[conf_key]
                if batchview_cmd_tmpl is not None:
                    batchview_cmd = batchview_cmd_tmpl % {
                        "job_id": str(live_job_id)
                    }

        log_is_remote = (is_remote_platform(platform)
                         and (options.filename != JOB_LOG_ACTIVITY))
        log_is_retrieved = (platform['retrieve job logs']
                            and live_job_id is None)
        if log_is_remote and (not log_is_retrieved or options.force_remote):
            logpath = os.path.normpath(
                get_remote_workflow_run_job_dir(workflow_name, point, task,
                                                options.submit_num,
                                                options.filename))
            tail_tmpl = platform["tail command template"]
            # Reinvoke the cat-log command on the remote account.
            cmd = ['cat-log', *verbosity_to_opts(cylc.flow.flags.verbosity)]
            for item in [logpath, mode, tail_tmpl]:
                cmd.append('--remote-arg=%s' % shlex.quote(item))
            if batchview_cmd:
                cmd.append('--remote-arg=%s' % shlex.quote(batchview_cmd))
            cmd.append(workflow_name)
            is_edit_mode = (mode == 'edit')
            # TODO: Add Intelligent Host selection to this
            try:
                proc = remote_cylc_cmd(cmd,
                                       platform,
                                       capture_process=is_edit_mode,
                                       manage=(mode == 'tail'))
            except KeyboardInterrupt:
                # Ctrl-C while tailing.
                pass
            else:
                if is_edit_mode:
                    # Write remote stdout to a temp file for viewing in editor.
                    # Only BUFSIZE bytes at a time in case huge stdout volume.
                    out = NamedTemporaryFile()
                    data = proc.stdout.read(BUFSIZE)
                    while data:
                        out.write(data)
                        data = proc.stdout.read(BUFSIZE)
                    os.chmod(out.name, S_IRUSR)
                    out.seek(0, 0)
        else:
            # Local task job or local job log.
            logpath = os.path.normpath(
                get_workflow_run_job_dir(workflow_name, point, task,
                                         options.submit_num, options.filename))
            tail_tmpl = os.path.expandvars(platform["tail command template"])
            out = view_log(logpath,
                           mode,
                           tail_tmpl,
                           batchview_cmd,
                           color=color)
            if mode != 'edit':
                sys.exit(out)
        if mode == 'edit':
            tmpfile_edit(out, options.geditor)