Esempio n. 1
0
def print_last_process_state_change(process_type=None):
    """
    Print the last time that a process of the specified type has changed its state.
    This function will also print a warning if the daemon is not running.

    :param process_type: optional process type for which to get the latest state change timestamp.
        Valid process types are either 'calculation' or 'work'.
    """
    from aiida.cmdline.utils.echo import echo_info, echo_warning
    from aiida.common import timezone
    from aiida.common.utils import str_timedelta
    from aiida.engine.daemon.client import get_daemon_client
    from aiida.engine.utils import get_process_state_change_timestamp

    client = get_daemon_client()

    timestamp = get_process_state_change_timestamp(process_type)

    if timestamp is None:
        echo_info('last time an entry changed state: never')
    else:
        timedelta = timezone.delta(timestamp, timezone.now())
        formatted = format_local_time(timestamp,
                                      format_str='at %H:%M:%S on %Y-%m-%d')
        relative = str_timedelta(timedelta,
                                 negative_to_zero=True,
                                 max_num_fields=1)
        echo_info('last time an entry changed state: {} ({})'.format(
            relative, formatted))

    if not client.is_daemon_running:
        echo_warning('the daemon is not running', bold=True)
Esempio n. 2
0
def do_list(past_days, limit):
    """
    Return a list of running workflows on screen
    """
    from aiida.common.utils import str_timedelta
    from aiida.backends.utils import load_dbenv, is_dbenv_loaded
    if not is_dbenv_loaded():
        load_dbenv()
    import aiida.utils.timezone as timezone
    from aiida.orm.mixins import SealableMixin
    _SEALED_ATTRIBUTE_KEY = 'attributes.{}'.format(SealableMixin.SEALED_KEY)

    now = timezone.now()

    table = []
    for res in _build_query(limit=limit, past_days=past_days):
        calc = res['calculation']
        creation_time = str_timedelta(timezone.delta(calc['ctime'], now),
                                      negative_to_zero=True,
                                      max_num_fields=1)

        table.append([
            calc['id'], creation_time, calc['type'],
            str(calc[_SEALED_ATTRIBUTE_KEY])
        ])

    print(tabulate(table, headers=["PID", "Creation time", "Type", "Sealed"]))
Esempio n. 3
0
    def daemon_status(self, *args):
        """
        Print the status of the daemon
        """
        if not is_dbenv_loaded():
            from aiida.backends.utils import load_dbenv
            load_dbenv(process='daemon')

        if args:
            print >> sys.stderr, (
                "No arguments allowed for the '{}' command.".format(
                    self.get_full_command_name()))
            sys.exit(1)

        from aiida.utils import timezone

        from aiida.daemon.timestamps import get_most_recent_daemon_timestamp
        from aiida.common.utils import str_timedelta
        from pytz import UTC

        most_recent_timestamp = get_most_recent_daemon_timestamp()

        if most_recent_timestamp is not None:
            timestamp_delta = (timezone.datetime.now(tz=UTC) -
                               most_recent_timestamp)
            print("# Most recent daemon timestamp:{}".format(
                str_timedelta(timestamp_delta)))
        else:
            print("# Most recent daemon timestamp: [Never]")

        pid = self.get_daemon_pid()
        if pid is None:
            print "Daemon not running (cannot find the PID for it)"
            return

        import psutil

        def create_time(p):
            return datetime.fromtimestamp(p.create_time())

        try:
            daemon_process = psutil.Process(self.get_daemon_pid())
        except psutil.NoSuchProcess:
            print "Daemon process can not be found"
            return

        print "Daemon is running as pid {pid} since {time}, child processes:".format(
            pid=daemon_process.pid, time=create_time(daemon_process))
        workers = daemon_process.children(recursive=True)

        if workers:
            for worker in workers:
                print "   * {name}[{pid}] {status:>10}, started at {time:%Y-%m-%d %H:%M:%S}".format(
                    name=worker.name(),
                    pid=worker.pid,
                    status=worker.status(),
                    time=create_time(worker))
        else:
            print "... but it does not have any child processes, which is wrong"
Esempio n. 4
0
def format_relative_time(datetime):
    """
    Return a string formatted timedelta of the given datetime with respect to the current datetime

    :param datetime: the datetime to format
    :return: string representation of the relative time since the given datetime
    """
    from aiida.common.utils import str_timedelta
    from aiida.common import timezone

    timedelta = timezone.delta(datetime, timezone.now())

    return str_timedelta(timedelta, negative_to_zero=True, max_num_fields=1)
Esempio n. 5
0
def group_show(group, raw, limit, uuid):
    """Show information for a given group."""
    from tabulate import tabulate

    from aiida.common.utils import str_timedelta
    from aiida.common import timezone

    if limit:
        node_iterator = group.nodes[:limit]
    else:
        node_iterator = group.nodes

    if raw:
        if uuid:
            echo.echo(' '.join(str(_.uuid) for _ in node_iterator))
        else:
            echo.echo(' '.join(str(_.pk) for _ in node_iterator))
    else:
        type_string = group.type_string
        desc = group.description
        now = timezone.now()

        table = []
        table.append(['Group label', group.label])
        table.append(['Group type_string', type_string])
        table.append(
            ['Group description', desc if desc else '<no description>'])
        echo.echo(tabulate(table))

        table = []
        header = []
        if uuid:
            header.append('UUID')
        header.extend(['PK', 'Type', 'Created'])
        echo.echo('# Nodes:')
        for node in node_iterator:
            row = []
            if uuid:
                row.append(node.uuid)
            row.append(node.pk)
            row.append(node.node_type.rsplit('.', 2)[1])
            row.append(
                str_timedelta(now - node.ctime,
                              short=True,
                              negative_to_zero=True))
            table.append(row)
        echo.echo(tabulate(table, headers=header))
Esempio n. 6
0
def group_show(group, raw, uuid):
    """
    Show information on a given group. Pass the GROUP as a parameter.
    """

    from aiida.common.utils import str_timedelta
    from aiida.utils import timezone
    from aiida.plugins.loader import get_plugin_type_from_type_string
    from tabulate import tabulate

    if raw:
        if uuid:
            echo.echo(" ".join(str(_.uuid) for _ in group.nodes))
        else:
            echo.echo(" ".join(str(_.pk) for _ in group.nodes))
    else:
        type_string = group.type_string
        desc = group.description
        now = timezone.now()

        table = []
        table.append(["Group name", group.name])
        table.append(
            ["Group type", type_string if type_string else "<user-defined>"])
        table.append(
            ["Group description", desc if desc else "<no description>"])
        echo.echo(tabulate(table))

        table = []
        header = []
        if uuid:
            header.append('UUID')
        header.extend(['PK', 'Type', 'Created'])
        echo.echo("# Nodes:")
        for node in group.nodes:
            row = []
            if uuid:
                row.append(node.uuid)
            row.append(node.pk)
            row.append(
                get_plugin_type_from_type_string(node.type).rsplit(".", 1)[1])
            row.append(
                str_timedelta(now - node.ctime,
                              short=True,
                              negative_to_zero=True))
            table.append(row)
        echo.echo(tabulate(table, headers=header))
Esempio n. 7
0
def do_list(past_days, all_nodes, limit):
    """
    Return a list of running workflows on screen
    """
    from aiida.common.utils import str_timedelta
    from aiida.backends.utils import load_dbenv, is_dbenv_loaded
    if not is_dbenv_loaded():
        load_dbenv()
    import aiida.utils.timezone as timezone
    from aiida.orm.mixins import Sealable
    _SEALED_ATTRIBUTE_KEY = 'attributes.{}'.format(Sealable.SEALED_KEY)

    now = timezone.now()

    if all_nodes:
        past_days = None

    table = []
    for res in _build_query(limit=limit,
                            past_days=past_days,
                            order_by={'ctime': 'desc'}):
        calc = res['calculation']
        creation_time = str_timedelta(timezone.delta(calc['ctime'], now),
                                      negative_to_zero=True,
                                      max_num_fields=1)

        table.append([
            calc['id'], creation_time, calc['attributes._process_label'],
            str(calc[_SEALED_ATTRIBUTE_KEY])
        ])

    # Revert table:
    # in this way, I order by 'desc', so I start by the most recent, but then
    # I print this as the las one (like 'verdi calculation list' does)
    # This is useful when 'limit' is set to not None
    table = table[::-1]
    print(
        tabulate(table,
                 headers=["PID", "Creation time", "ProcessLabel", "Sealed"]))
Esempio n. 8
0
    def group_show(self, *args):
        """
        Show information on a given group. Pass the PK as a parameter.
        """
        if not is_dbenv_loaded():
            load_dbenv()

        import argparse
        from aiida.common.exceptions import NotExistent
        from aiida.orm import Group as G
        from aiida.common.utils import str_timedelta
        from aiida.utils import timezone
        from aiida.common.pluginloader import from_type_to_pluginclassname
        from tabulate import tabulate

        parser = argparse.ArgumentParser(
            prog=self.get_full_command_name(),
            description='Information on a given AiiDA group.')
        parser.add_argument('-r', '--raw',
                            dest='raw', action='store_true',
                            help="Show only a space-separated list of PKs of "
                                 "the calculations in the group")
        parser.add_argument('-u', '--uuid',
                            dest='uuid', action='store_true',
                            help="Show UUIDs together with PKs. Note: if the "
                                 "--raw option is also passed, PKs are not "
                                 "printed, but oly UUIDs.")
        parser.add_argument('GROUP', help="The PK of the group to show")
        parser.set_defaults(raw=False)
        parser.set_defaults(uuid=False)

        args = list(args)
        parsed_args = parser.parse_args(args)

        group = parsed_args.GROUP
        try:
            group_pk = int(group)
        except ValueError:
            group_pk = None
            group_name = group

        if group_pk is not None:
            try:
                group = G(dbgroup=group_pk)
            except NotExistent as e:
                print >> sys.stderr, "Error: {}.".format(e.message)
                sys.exit(1)
        else:
            try:
                group = G.get_from_string(group_name)
            except NotExistent as e:
                print >> sys.stderr, "Error: {}.".format(e.message)
                sys.exit(1)

        group_pk = group.pk
        group_name = group.name

        if parsed_args.raw:
            if parsed_args.uuid:
                print " ".join(str(_.uuid) for _ in group.nodes)
            else:
                print " ".join(str(_.pk) for _ in group.nodes)
        else:
            type_string = group.type_string
            desc = group.description
            now = timezone.now()

            table = []
            table.append(["Group name", group.name])
            table.append(["Group type",
                          type_string if type_string else "<user-defined>"])
            table.append(["Group description",
                          desc if desc else "<no description>"])
            print(tabulate(table))

            table = []
            header = []
            if parsed_args.uuid:
                header.append('UUID')
            header.extend(['PK', 'Type', 'Created'])
            print "# Nodes:"
            for n in group.nodes:
                row = []
                if parsed_args.uuid:
                    row.append(n.uuid)
                row.append(n.pk)
                row.append(from_type_to_pluginclassname(n.dbnode.type).
                           rsplit(".", 1)[1])

                row.append(str_timedelta(now - n.ctime, short=True,
                                         negative_to_zero=True))
                table.append(row)
            print(tabulate(table, headers=header))
Esempio n. 9
0
def get_workflow_info(w, tab_size=2, short=False, pre_string="", depth=16):
    """
    Return a string with all the information regarding the given workflow and
    all its calculations and subworkflows.
    This is a recursive function (to print all subworkflows info as well).

    :param w: a DbWorkflow instance
    :param tab_size: number of spaces to use for the indentation
    :param short: if True, provide a shorter output (only total number of
        calculations, rather than the state of each calculation)
    :param pre_string: string appended at the beginning of each line
    :param depth: the maximum depth level the recursion on sub-workflows will
                  try to reach (0 means we stay at the step level and don't go
                  into sub-workflows, 1 means we go down to one step level of
                  the sub-workflows, etc.)

    :return lines: list of lines to be outputed
    """
    # Note: pre_string becomes larger at each call of get_workflow_info on the
    #       subworkflows: pre_string -> pre_string + "|" + " "*(tab_size-1))
    # TODO SP: abstract the dependence on DbWorkflow

    from aiida.backends.djsite.db.models import DbWorkflow

    if tab_size < 2:
        raise ValueError("tab_size must be > 2")

    now = timezone.now()

    lines = []

    if w.label:
        wf_labelstring = "'{}', ".format(w.label)
    else:
        wf_labelstring = ""

    lines.append(pre_string)  # put an empty line before any workflow
    lines.append(pre_string + "+ Workflow {} ({}pk: {}) is {} [{}]".format(
        w.module_class, wf_labelstring, w.pk, w.state,
        str_timedelta(now - w.ctime, negative_to_zero=True)))

    # print information on the steps only if depth is higher than 0
    if depth > 0:

        # order all steps by time and  get all the needed values
        steps_and_subwf_pks = w.steps.all().order_by(
            'time', 'sub_workflows__ctime',
            'calculations__ctime').values_list('pk', 'sub_workflows__pk',
                                               'calculations', 'name',
                                               'nextcall', 'state')
        # get the list of step pks (distinct), preserving the order
        steps_pk = []
        for item in steps_and_subwf_pks:
            if item[0] not in steps_pk:
                steps_pk.append(item[0])

        # build a dictionary with all the infos for each step pk
        subwfs_of_steps = {}
        for step_pk, subwf_pk, calc_pk, name, nextcall, state in steps_and_subwf_pks:
            if step_pk not in subwfs_of_steps.keys():
                subwfs_of_steps[step_pk] = {
                    'name': name,
                    'nextcall': nextcall,
                    'state': state,
                    'subwf_pks': [],
                    'calc_pks': [],
                }
            if subwf_pk:
                subwfs_of_steps[step_pk]['subwf_pks'].append(subwf_pk)
            if calc_pk:
                subwfs_of_steps[step_pk]['calc_pks'].append(calc_pk)

        # TODO SP: abstract this
        # get all subworkflows for all steps
        wflows = DbWorkflow.objects.filter(
            parent_workflow_step__in=steps_pk)  # .order_by('ctime')
        # dictionary mapping pks into workflows
        workflow_mapping = {_.pk: _ for _ in wflows}

        # get all calculations for all steps
        calcs = JobCalculation.query(
            workflow_step__in=steps_pk)  # .order_by('ctime')
        # dictionary mapping pks into calculations
        calc_mapping = {_.pk: _ for _ in calcs}

        for step_pk in steps_pk:
            lines.append(
                pre_string + "|" + '-' * (tab_size - 1) +
                "* Step: {0} [->{1}] is {2}".format(
                    subwfs_of_steps[step_pk]['name'], subwfs_of_steps[step_pk]
                    ['nextcall'], subwfs_of_steps[step_pk]['state']))

            calc_pks = subwfs_of_steps[step_pk]['calc_pks']

            # print calculations only if it is not short
            if short:
                lines.append(pre_string + "|" + " " * (tab_size - 1) +
                             "| [{0} calculations]".format(len(calc_pks)))
            else:
                for calc_pk in calc_pks:
                    c = calc_mapping[calc_pk]
                    calc_state = c.get_state()
                    if c.label:
                        labelstring = "'{}', ".format(c.label)
                    else:
                        labelstring = ""

                    if calc_state == calc_states.WITHSCHEDULER:
                        sched_state = c.get_scheduler_state()
                        if sched_state is None:
                            remote_state = "(remote state still unknown)"
                        else:
                            last_check = c._get_scheduler_lastchecktime()
                            if last_check is not None:
                                when_string = " {}".format(
                                    str_timedelta(now - last_check,
                                                  short=True,
                                                  negative_to_zero=True))
                                verb_string = "was "
                            else:
                                when_string = ""
                                verb_string = ""
                            remote_state = " ({}{}{})".format(
                                verb_string, sched_state, when_string)
                    else:
                        remote_state = ""
                    lines.append(
                        pre_string + "|" + " " * (tab_size - 1) +
                        "| Calculation ({}pk: {}) is {}{}".format(
                            labelstring, calc_pk, calc_state, remote_state))

            ## SubWorkflows
            for subwf_pk in subwfs_of_steps[step_pk]['subwf_pks']:
                subwf = workflow_mapping[subwf_pk]
                lines.extend(
                    get_workflow_info(subwf,
                                      short=short,
                                      tab_size=tab_size,
                                      pre_string=pre_string + "|" + " " *
                                      (tab_size - 1),
                                      depth=depth - 1))

            lines.append(pre_string + "|")

    return lines
Esempio n. 10
0
    def _list_calculations_old(cls, states=None, past_days=None, group=None,
                               group_pk=None, all_users=False, pks=[],
                               relative_ctime=True):
        """
        Return a string with a description of the AiiDA calculations.

        .. todo:: does not support the query for the IMPORTED state (since it
          checks the state in the Attributes, not in the DbCalcState table).
          Decide which is the correct logic and implement the correct query.

        :param states: a list of string with states. If set, print only the
            calculations in the states "states", otherwise shows all.
            Default = None.
        :param past_days: If specified, show only calculations that were
            created in the given number of past days.
        :param group: If specified, show only calculations belonging to a
            user-defined group with the given name.
            Can use colons to separate the group name from the type,
            as specified in :py:meth:`aiida.orm.group.Group.get_from_string`
            method.
        :param group_pk: If specified, show only calculations belonging to a
            user-defined group with the given PK.
        :param pks: if specified, must be a list of integers, and only
            calculations within that list are shown. Otherwise, all
            calculations are shown.
            If specified, sets state to None and ignores the
            value of the ``past_days`` option.")
        :param relative_ctime: if true, prints the creation time relative from now.
                               (like 2days ago). Default = True
        :param all_users: if True, list calculation belonging to all users.
                           Default = False

        :return: a string with description of calculations.
        """
        # I assume that calc_states are strings. If this changes in the future,
        # update the filter below from dbattributes__tval to the correct field.
        from aiida.backends.djsite.db.models import DbAuthInfo, DbAttribute
        from aiida.daemon.timestamps import get_last_daemon_timestamp

        if states:
            for state in states:
                if state not in calc_states:
                    return "Invalid state provided: {}.".format(state)

        warnings_list = []

        now = timezone.now()

        if pks:
            q_object = Q(pk__in=pks)
        else:
            q_object = Q()

            if group is not None:
                g_pk = Group.get_from_string(group).pk
                q_object.add(Q(dbgroups__pk=g_pk), Q.AND)

            if group_pk is not None:
                q_object.add(Q(dbgroups__pk=group_pk), Q.AND)

            if not all_users:
                q_object.add(Q(user=get_automatic_user()), Q.AND)

            if states is not None:
                q_object.add(Q(dbattributes__key='state',
                               dbattributes__tval__in=states, ), Q.AND)
            if past_days is not None:
                now = timezone.now()
                n_days_ago = now - datetime.timedelta(days=past_days)
                q_object.add(Q(ctime__gte=n_days_ago), Q.AND)

        calc_list_pk = list(
            cls.query(q_object).distinct().values_list('pk', flat=True))

        calc_list = cls.query(pk__in=calc_list_pk).order_by('ctime')

        scheduler_states = dict(
            DbAttribute.objects.filter(dbnode__pk__in=calc_list_pk,
                                       key='scheduler_state').values_list(
                'dbnode__pk', 'tval'))

        # I do the query now, so that the list of pks gets cached
        calc_list_data = list(
            calc_list.filter(
                # dbcomputer__dbauthinfo__aiidauser=F('user')
            ).distinct().order_by('ctime').values(
                'pk', 'dbcomputer__name', 'ctime',
                'type', 'dbcomputer__enabled',
                'dbcomputer__pk',
                'user__pk'))
        list_comp_pk = [i['dbcomputer__pk'] for i in calc_list_data]
        list_aiduser_pk = [i['user__pk']
                           for i in calc_list_data]
        enabled_data = DbAuthInfo.objects.filter(
            dbcomputer__pk__in=list_comp_pk, aiidauser__pk__in=list_aiduser_pk
        ).values_list('dbcomputer__pk', 'aiidauser__pk', 'enabled')

        enabled_auth_dict = {(i[0], i[1]): i[2] for i in enabled_data}

        states = {c.pk: c._get_state_string() for c in calc_list}

        scheduler_lastcheck = dict(DbAttribute.objects.filter(
            dbnode__in=calc_list,
            key='scheduler_lastchecktime').values_list('dbnode__pk', 'dval'))

        ## Get the last daemon check
        try:
            last_daemon_check = get_last_daemon_timestamp('updater',
                                                          when='stop')
        except ValueError:
            last_check_string = ("# Last daemon state_updater check: "
                                 "(Error while retrieving the information)")
        else:
            if last_daemon_check is None:
                last_check_string = "# Last daemon state_updater check: (Never)"
            else:
                last_check_string = ("# Last daemon state_updater check: "
                                     "{} ({})".format(
                    str_timedelta(now - last_daemon_check,
                                  negative_to_zero=True),
                    timezone.localtime(last_daemon_check).strftime(
                        "at %H:%M:%S on %Y-%m-%d")))

        disabled_ignorant_states = [
            None, calc_states.FINISHED, calc_states.SUBMISSIONFAILED,
            calc_states.RETRIEVALFAILED, calc_states.PARSINGFAILED,
            calc_states.FAILED
        ]

        if not calc_list:
            return last_check_string
        else:
            # first save a matrix of results to be printed
            res_str_list = [last_check_string]
            str_matrix = []
            title = ['# Pk', 'State', 'Creation',
                     'Sched. state', 'Computer', 'Type']
            str_matrix.append(title)
            len_title = [len(i) for i in title]

            for calcdata in calc_list_data:
                remote_state = "None"

                calc_state = states[calcdata['pk']]
                remote_computer = calcdata['dbcomputer__name']
                try:
                    sched_state = scheduler_states.get(calcdata['pk'], None)
                    if sched_state is None:
                        remote_state = "(unknown)"
                    else:
                        remote_state = '{}'.format(sched_state)
                        if calc_state == calc_states.WITHSCHEDULER:
                            last_check = scheduler_lastcheck.get(calcdata['pk'],
                                                                 None)
                            if last_check is not None:
                                when_string = " {}".format(
                                    str_timedelta(now - last_check, short=True,
                                                  negative_to_zero=True))
                                verb_string = "was "
                            else:
                                when_string = ""
                                verb_string = ""
                            remote_state = "{}{}{}".format(verb_string,
                                                           sched_state,
                                                           when_string)
                except ValueError:
                    raise

                calc_module = \
                from_type_to_pluginclassname(calcdata['type']).rsplit(".", 1)[0]
                prefix = 'calculation.job.'
                prefix_len = len(prefix)
                if calc_module.startswith(prefix):
                    calc_module = calc_module[prefix_len:].strip()

                if relative_ctime:
                    calc_ctime = str_timedelta(now - calcdata['ctime'],
                                               negative_to_zero=True,
                                               max_num_fields=1)
                else:
                    calc_ctime = " ".join([timezone.localtime(
                        calcdata['ctime']).isoformat().split('T')[0],
                                           timezone.localtime(calcdata[
                                                                  'ctime']).isoformat().split(
                                               'T')[1].split('.')[
                                               0].rsplit(":", 1)[0]])

                the_state = states[calcdata['pk']]

                # decide if it is needed to print enabled/disabled information
                # By default, if the computer is not configured for the
                # given user, assume it is user_enabled
                user_enabled = enabled_auth_dict.get(
                    (calcdata['dbcomputer__pk'],
                     calcdata['user__pk']), True)
                global_enabled = calcdata["dbcomputer__enabled"]

                enabled = "" if (user_enabled and global_enabled or
                                 the_state in disabled_ignorant_states) else " [Disabled]"

                str_matrix.append([calcdata['pk'],
                                   the_state,
                                   calc_ctime,
                                   remote_state,
                                   remote_computer + "{}".format(enabled),
                                   calc_module
                                   ])

            # prepare a formatted text of minimal row length (to fit in terminals!)
            rows = []
            for j in range(len(str_matrix[0])):
                rows.append([len(str(i[j])) for i in str_matrix])
            line_lengths = [str(max(max(rows[i]), len_title[i])) for i in
                            range(len(rows))]
            fmt_string = "{:<" + "}|{:<".join(line_lengths) + "}"
            for row in str_matrix:
                res_str_list.append(fmt_string.format(*[str(i) for i in row]))

            res_str_list += ["# {}".format(_) for _ in warnings_list]
            return "\n".join(res_str_list)
Esempio n. 11
0
def get_workflow_info(w, tab_size=2, short=False, pre_string="", depth=16):
    """
    Return a string with all the information regarding the given workflow and
    all its calculations and subworkflows.
    This is a recursive function (to print all subworkflows info as well).

    :param w: a DbWorkflow instance
    :param tab_size: number of spaces to use for the indentation
    :param short: if True, provide a shorter output (only total number of
        calculations, rather than the state of each calculation)
    :param pre_string: string appended at the beginning of each line
    :param depth: the maximum depth level the recursion on sub-workflows will
                  try to reach (0 means we stay at the step level and don't go
                  into sub-workflows, 1 means we go down to one step level of
                  the sub-workflows, etc.)

    :return lines: list of lines to be outputed
    """
    from aiida.orm import load_node
    from aiida.common.datastructures import calc_states
    # Note: pre_string becomes larger at each call of get_workflow_info on the
    #       subworkflows: pre_string -> pre_string + "|" + " "*(tab_size-1))

    if tab_size < 2:
        raise ValueError("tab_size must be > 2")

    # TODO SP: abstract this
    now = timezone.now()

    lines = []

    if w.label:
        wf_labelstring = "'{}', ".format(w.label)
    else:
        wf_labelstring = ""

    lines.append(pre_string)  # put an empty line before any workflow
    lines.append(pre_string + "+ Workflow {} ({}pk: {}) is {} [{}]".format(
        w.module_class, wf_labelstring, w.id, w.state,
        str_timedelta(now - w.ctime, negative_to_zero=True)))

    # print information on the steps only if depth is higher than 0
    if depth > 0:

        # order all steps by time and  get all the needed values
        step_list = sorted([[_.time, _] for _ in w.steps])
        step_list = [_[1] for _ in step_list]

        steps_and_subwf_pks = []
        for step in step_list:
            wf_id = None
            calc_id = None
            if step.calculations:
                for calc in step.calculations:
                    steps_and_subwf_pks.append([
                        step.id, wf_id, calc.id, step.name, step.nextcall,
                        step.state
                    ])
            if step.sub_workflows:
                for www in step.sub_workflows:
                    steps_and_subwf_pks.append([
                        step.id, www.id, calc_id, step.name, step.nextcall,
                        step.state
                    ])
            if (not step.calculations) and (not step.sub_workflows):
                steps_and_subwf_pks.append([
                    step.id, wf_id, calc_id, step.name, step.nextcall,
                    step.state
                ])

        # get the list of step pks (distinct), preserving the order
        steps_pk = []
        for item in steps_and_subwf_pks:
            if item[0] not in steps_pk:
                steps_pk.append(item[0])

        # build a dictionary with all the infos for each step pk
        subwfs_of_steps = {}
        for step_pk, subwf_pk, calc_pk, name, nextcall, state in steps_and_subwf_pks:
            if step_pk not in subwfs_of_steps.keys():
                subwfs_of_steps[step_pk] = {
                    'name': name,
                    'nextcall': nextcall,
                    'state': state,
                    'subwf_pks': [],
                    'calc_pks': [],
                }
            if subwf_pk:
                subwfs_of_steps[step_pk]['subwf_pks'].append(subwf_pk)
            if calc_pk:
                subwfs_of_steps[step_pk]['calc_pks'].append(calc_pk)

        # TODO: replace the database access using SQLAlchemy

        # get all subworkflows for all steps
        # wflows = DbWorkflow.query.filter_by(DbWorkflow.parent_workflow_step.in_(steps_pk))
        # although the line above is equivalent to the following, has a bug of sqlalchemy.
        #  import warnings
        # from sqlalchemy import exc as sa_exc
        # with warnings.catch_warnings():
        #     warnings.simplefilter("ignore", category=sa_exc.SAWarning)
        #     wflows = DbWorkflow.parent_workflow_step.any(DbWorkflowStep.id.in_(steps_pk))

        wflows = DbWorkflow.query.join(DbWorkflow.parent_workflow_step).filter(
            DbWorkflowStep.id.in_(steps_pk)).all()

        # dictionary mapping pks into workflows
        workflow_mapping = {_.id: _ for _ in wflows}

        # get all calculations for all steps
        # calcs = JobCalculation.query(workflow_step__in=steps_pk)  #.order_by('ctime')
        calcs_ids = [_[2] for _ in steps_and_subwf_pks
                     if _[2] is not None]  # extremely inefficient!
        calcs = [load_node(_) for _ in calcs_ids]
        # dictionary mapping pks into calculations
        calc_mapping = {_.id: _ for _ in calcs}

        for step_pk in steps_pk:
            lines.append(
                pre_string + "|" + '-' * (tab_size - 1) +
                "* Step: {0} [->{1}] is {2}".format(
                    subwfs_of_steps[step_pk]['name'], subwfs_of_steps[step_pk]
                    ['nextcall'], subwfs_of_steps[step_pk]['state']))

            calc_pks = subwfs_of_steps[step_pk]['calc_pks']

            # print calculations only if it is not short
            if short:
                lines.append(pre_string + "|" + " " * (tab_size - 1) +
                             "| [{0} calculations]".format(len(calc_pks)))
            else:
                for calc_pk in calc_pks:
                    c = calc_mapping[calc_pk]
                    calc_state = c.get_state()
                    if c.label:
                        labelstring = "'{}', ".format(c.label)
                    else:
                        labelstring = ""

                    if calc_state == calc_states.WITHSCHEDULER:
                        sched_state = c.get_scheduler_state()
                        if sched_state is None:
                            remote_state = "(remote state still unknown)"
                        else:
                            last_check = c._get_scheduler_lastchecktime()
                            if last_check is not None:
                                when_string = " {}".format(
                                    str_timedelta(now - last_check,
                                                  short=True,
                                                  negative_to_zero=True))
                                verb_string = "was "
                            else:
                                when_string = ""
                                verb_string = ""
                            remote_state = " ({}{}{})".format(
                                verb_string, sched_state, when_string)
                    else:
                        remote_state = ""
                    lines.append(
                        pre_string + "|" + " " * (tab_size - 1) +
                        "| Calculation ({}pk: {}) is {}{}".format(
                            labelstring, calc_pk, calc_state, remote_state))

            ## SubWorkflows
            for subwf_pk in subwfs_of_steps[step_pk]['subwf_pks']:
                subwf = workflow_mapping[subwf_pk]
                lines.extend(
                    get_workflow_info(subwf,
                                      short=short,
                                      tab_size=tab_size,
                                      pre_string=pre_string + "|" + " " *
                                      (tab_size - 1),
                                      depth=depth - 1))

            lines.append(pre_string + "|")

    return lines
Esempio n. 12
0
def do_list(past_days, all_states, limit, project):
    """
    Return a list of running workflows on screen
    """
    from aiida.backends.utils import load_dbenv, is_dbenv_loaded
    if not is_dbenv_loaded():
        load_dbenv()

    from aiida.common.utils import str_timedelta
    from aiida.utils import timezone
    from aiida.orm.mixins import Sealable
    from aiida.orm.calculation.work import WorkCalculation

    _SEALED_ATTRIBUTE_KEY = 'attributes.{}'.format(Sealable.SEALED_KEY)
    _ABORTED_ATTRIBUTE_KEY = 'attributes.{}'.format(
        WorkCalculation.ABORTED_KEY)
    _FAILED_ATTRIBUTE_KEY = 'attributes.{}'.format(WorkCalculation.FAILED_KEY)
    _FINISHED_ATTRIBUTE_KEY = 'attributes.{}'.format(
        WorkCalculation.FINISHED_KEY)

    if not project:
        project = ('id', 'ctime', 'label', 'state', 'sealed'
                   )  # default projections

    # Mapping of projections to list table headers.
    hmap_dict = {
        'id': 'PID',
        'ctime': 'Creation time',
        'label': 'Process Label',
        'uuid': 'UUID',
        'descr': 'Description',
        'mtime': 'Modification time'
    }

    def map_header(p):
        try:
            return hmap_dict[p]
        except KeyError:
            return p.capitalize()

    # Mapping of querybuilder keys that differ from projections.
    pmap_dict = {
        'label': 'attributes._process_label',
        'sealed': _SEALED_ATTRIBUTE_KEY,
        'failed': _FAILED_ATTRIBUTE_KEY,
        'aborted': _ABORTED_ATTRIBUTE_KEY,
        'finished': _FINISHED_ATTRIBUTE_KEY,
        'descr': 'description',
    }

    def map_projection(p):
        try:
            return pmap_dict[p]
        except KeyError:
            return p

    def calculation_state(calculation):
        if calculation[_FAILED_ATTRIBUTE_KEY]:
            return 'FAILED'
        elif calculation[_ABORTED_ATTRIBUTE_KEY]:
            return 'ABORTED'
        elif calculation[_FINISHED_ATTRIBUTE_KEY]:
            return 'FINISHED'
        else:
            return 'RUNNING'

    # Mapping of to-string formatting of projections that do need it.
    rmap_dict = {
        'ctime':
        lambda calc: str_timedelta(timezone.delta(
            calc[map_projection('ctime')], now),
                                   negative_to_zero=True,
                                   max_num_fields=1),
        'mtime':
        lambda calc: str_timedelta(timezone.delta(
            calc[map_projection('mtime')], now),
                                   negative_to_zero=True,
                                   max_num_fields=1),
        'sealed':
        lambda calc: str(calc[map_projection('sealed')]),
        'state':
        lambda calc: calculation_state(calc),
    }

    def map_result(p, obj):
        try:
            return rmap_dict[p](obj)
        except:
            return obj[map_projection(p)]

    mapped_projections = list(map(lambda p: map_projection(p), project))
    mapped_projections.extend([
        _FAILED_ATTRIBUTE_KEY, _ABORTED_ATTRIBUTE_KEY, _FINISHED_ATTRIBUTE_KEY
    ])
    table = []

    for res in _build_query(limit=limit,
                            projections=mapped_projections,
                            past_days=past_days,
                            order_by={'ctime': 'desc'}):
        calc = res['calculation']
        if calc[_SEALED_ATTRIBUTE_KEY] and not all_states:
            continue
        table.append(list(map(lambda p: map_result(p, calc), project)))

    # Since we sorted by descending creation time, we revert the list to print the most
    # recent entries last
    table = table[::-1]

    print(
        tabulate(table, headers=(list(map(lambda p: map_header(p), project)))))
Esempio n. 13
0
def do_list(past_days, all_states, limit):
    """
    Return a list of running workflows on screen
    """
    from aiida.backends.utils import load_dbenv, is_dbenv_loaded
    if not is_dbenv_loaded():
        load_dbenv()

    from aiida.common.utils import str_timedelta
    from aiida.utils import timezone
    from aiida.orm.mixins import Sealable
    from aiida.orm.calculation.work import WorkCalculation

    _SEALED_ATTRIBUTE_KEY = 'attributes.{}'.format(Sealable.SEALED_KEY)
    _ABORTED_ATTRIBUTE_KEY = 'attributes.{}'.format(
        WorkCalculation.ABORTED_KEY)
    _FAILED_ATTRIBUTE_KEY = 'attributes.{}'.format(WorkCalculation.FAILED_KEY)
    _FINISHED_ATTRIBUTE_KEY = 'attributes.{}'.format(
        WorkCalculation.FINISHED_KEY)

    table = []
    for res in _build_query(limit=limit,
                            past_days=past_days,
                            order_by={'ctime': 'desc'}):

        calculation = res['calculation']

        creation_time = str_timedelta(timezone.delta(calculation['ctime'],
                                                     timezone.now()),
                                      negative_to_zero=True,
                                      max_num_fields=1)

        if _SEALED_ATTRIBUTE_KEY in calculation and calculation[
                _SEALED_ATTRIBUTE_KEY]:
            sealed = True
        else:
            sealed = False

        if _FINISHED_ATTRIBUTE_KEY in calculation and calculation[
                _FINISHED_ATTRIBUTE_KEY]:
            state = 'Finished'
        elif _FAILED_ATTRIBUTE_KEY in calculation and calculation[
                _FAILED_ATTRIBUTE_KEY]:
            state = 'Failed'
        elif _ABORTED_ATTRIBUTE_KEY in calculation and calculation[
                _ABORTED_ATTRIBUTE_KEY]:
            state = 'Aborted'
        elif sealed:
            # If it is not in terminal state but sealed, we have an inconsistent state
            state = 'Unknown'
        else:
            state = 'Running'

        # By default we only display unsealed entries, unless all_states flag is set
        if sealed and not all_states:
            continue

        table.append([
            calculation['id'], creation_time, state,
            str(sealed), calculation['attributes._process_label']
        ])

    # Since we sorted by descending creation time, we revert the list to print the most
    # recent entries last
    table = table[::-1]

    print(
        tabulate(table,
                 headers=['PK', 'Creation', 'State', 'Sealed',
                          'ProcessLabel']))
Esempio n. 14
0
def get_workflow_info(w, tab_size=2, short=False, pre_string="",
                      depth=16):
    """
    Return a string with all the information regarding the given workflow and
    all its calculations and subworkflows.
    This is a recursive function (to print all subworkflows info as well).

    :param w: a DbWorkflow instance
    :param tab_size: number of spaces to use for the indentation
    :param short: if True, provide a shorter output (only total number of
        calculations, rather than the state of each calculation)
    :param pre_string: string appended at the beginning of each line
    :param depth: the maximum depth level the recursion on sub-workflows will
                  try to reach (0 means we stay at the step level and don't go
                  into sub-workflows, 1 means we go down to one step level of
                  the sub-workflows, etc.)

    :return lines: list of lines to be outputed
    """
    # Note: pre_string becomes larger at each call of get_workflow_info on the
    #       subworkflows: pre_string -> pre_string + "|" + " "*(tab_size-1))

    if tab_size < 2:
        raise ValueError("tab_size must be > 2")

    # TODO SP: abstract this
    now = timezone.now()

    lines = []

    if w.label:
        wf_labelstring = "'{}', ".format(w.label)
    else:
        wf_labelstring = ""

    lines.append(pre_string)  # put an empty line before any workflow
    lines.append(pre_string + "+ Workflow {} ({}pk: {}) is {} [{}]".format(
        w.module_class, wf_labelstring, w.pk, w.state, str_timedelta(
            now - w.ctime, negative_to_zero=True)))

    # print information on the steps only if depth is higher than 0
    if depth > 0:

        # order all steps by time and  get all the needed values
        steps_and_subwf_pks = w.steps.\
            join(DbWorkflowStep.sub_workflows, DbWorkflowStep.calculations).\
            order_by(DbWorkflowStep.time, DbWorkflow.ctime, DbNode.ctime).\
            with_entities(
                DbWorkflowStep.id, DbWorkflow.id, DbNode, DbWorkflowStep.name,
                DbWorkflowStep.nextcall, DbWorkflowStep.state
            )

        # get the list of step pks (distinct), preserving the order
        steps_pk = []
        for item in steps_and_subwf_pks:
            if item[0] not in steps_pk:
                steps_pk.append(item[0])

        # build a dictionary with all the infos for each step pk
        subwfs_of_steps = {}
        for step_pk, subwf_pk, calc_pk, name, nextcall, state in steps_and_subwf_pks:
            if step_pk not in subwfs_of_steps.keys():
                subwfs_of_steps[step_pk] = {'name': name,
                                            'nextcall': nextcall,
                                            'state': state,
                                            'subwf_pks': [],
                                            'calc_pks': [],
                                            }
            if subwf_pk:
                subwfs_of_steps[step_pk]['subwf_pks'].append(subwf_pk)
            if calc_pk:
                subwfs_of_steps[step_pk]['calc_pks'].append(calc_pk)
Esempio n. 15
0
    def configure_user(self, *args):
        """
        Configure the user that can run the daemon.
        """
        if not is_dbenv_loaded():
            from aiida.backends.utils import load_dbenv
            load_dbenv(process='daemon')

        if args:
            print >> sys.stderr, (
                "No arguments allowed for the '{}' command.".format(
                    self.get_full_command_name()))
            sys.exit(1)

        from aiida.utils import timezone
        from aiida.backends.utils import get_daemon_user, set_daemon_user
        from aiida.common.utils import (get_configured_user_email,
                                        query_yes_no, query_string)
        from aiida.daemon.timestamps import get_most_recent_daemon_timestamp
        from aiida.common.utils import str_timedelta
        from aiida.orm.user import User

        old_daemon_user = get_daemon_user()
        this_user = get_configured_user_email()

        print("> Current default user: {}".format(this_user))
        print("> Currently configured user who can run the daemon: {}".format(
            old_daemon_user))
        if old_daemon_user == this_user:
            print(
                "  (therefore, at the moment you are the user who can run "
                "the daemon)")
            pid = self.get_daemon_pid()
            if pid is not None:
                print("The daemon is running! I will not proceed.")
                sys.exit(1)
        else:
            print("  (therefore, you cannot run the daemon, at the moment)")

        most_recent_timestamp = get_most_recent_daemon_timestamp()

        print "*" * 76
        print "* {:72s} *".format("WARNING! Change this setting only if you "
                                  "are sure of what you are doing.")
        print "* {:72s} *".format("Moreover, make sure that the "
                                  "daemon is stopped.")

        if most_recent_timestamp is not None:
            timestamp_delta = timezone.now() - most_recent_timestamp
            last_check_string = (
                "[The most recent timestamp from the daemon was {}]".format(
                    str_timedelta(timestamp_delta)))
            print "* {:72s} *".format(last_check_string)

        print "*" * 76

        answer = query_yes_no(
            "Are you really sure that you want to change "
            "the daemon user?",
            default="no")
        if not answer:
            sys.exit(0)

        print ""
        print "Enter below the email of the new user who can run the daemon."
        new_daemon_user_email = query_string("New daemon user: "******"ERROR! The user you specified ({}) does "
                "not exist in the database!!".format(new_daemon_user_email))
            print("The available users are {}".format(
                [_.email for _ in User.search_for_users()]))
            sys.exit(1)

        set_daemon_user(new_daemon_user_email)

        print "The new user that can run the daemon is now {} {}.".format(
            found_users[0].first_name, found_users[0].last_name)
Esempio n. 16
0
    def daemon_status(self, *args):
        """
        Print the status of the daemon
        """
        if not is_dbenv_loaded():
            from aiida.backends.utils import load_dbenv
            load_dbenv(process='daemon')

        if args:
            print >> sys.stderr, (
                "No arguments allowed for the '{}' command.".format(
                    self.get_full_command_name()))
            sys.exit(1)

        import supervisor
        import supervisor.supervisorctl
        import xmlrpclib

        from aiida.utils import timezone

        from aiida.daemon.timestamps import get_most_recent_daemon_timestamp
        from aiida.common.utils import str_timedelta
        from pytz import UTC

        most_recent_timestamp = get_most_recent_daemon_timestamp()

        if most_recent_timestamp is not None:
            timestamp_delta = (timezone.datetime.now(tz=UTC) -
                               most_recent_timestamp)
            print("# Most recent daemon timestamp:{}".format(
                str_timedelta(timestamp_delta)))
        else:
            print("# Most recent daemon timestamp: [Never]")

        pid = self.get_daemon_pid()
        if pid is None:
            print "Daemon not running (cannot find the PID for it)"
            return

        c = supervisor.supervisorctl.ClientOptions()
        s = c.read_config(self.conffile_full_path)
        proxy = xmlrpclib.ServerProxy(
            'http://127.0.0.1',
            transport=supervisor.xmlrpc.SupervisorTransport(
                s.username, s.password, s.serverurl))
        try:
            running_processes = proxy.supervisor.getAllProcessInfo()
        except xmlrpclib.Fault as e:
            if e.faultString == "SHUTDOWN_STATE":
                print "The daemon is shutting down..."
                return
            else:
                raise
        except Exception as e:
            import socket
            if isinstance(e, socket.error):
                print "Could not reach the daemon, I got a socket.error: "
                print "  -> [Errno {}] {}".format(e.errno, e.strerror)
            else:
                print "Could not reach the daemon, I got a {}: {}".format(
                    e.__class__.__name__, e.message)
            print "You can try to stop the daemon and start it again."
            return

        if running_processes:
            print "## Found {} process{} running:".format(
                len(running_processes),
                '' if len(running_processes) == 1 else 'es')
            for process in running_processes:
                print "   * {:<22} {:<10} {}".format(
                    "{}[{}]".format(process['group'], process['name']),
                    process['statename'], process['description'])
        else:
            print "I was able to connect to the daemon, but I did not find any process..."