Пример #1
0
def main():
    def show_examples_and_exit(err_msg=None, error_code=1):
        """Display the usage of the script."""
        sys.stderr.write(get_epilog())
        if err_msg:
            sys.stderr.write("Fatal Error\n" + err_msg + "\n")
        sys.exit(error_code)

    parser = get_parser(with_epilog=True)

    # Parse the command line.
    try:
        options = parser.parse_args()
    except Exception:
        show_examples_and_exit(error_code=1)

    # loglevel is bound to the string value obtained from the command line argument.
    # Convert to upper case to allow the user to specify --loglevel=DEBUG or --loglevel=debug
    import logging
    numeric_level = getattr(logging, options.loglevel.upper(), None)
    if not isinstance(numeric_level, int):
        raise ValueError('Invalid log level: %s' % options.loglevel)
    logging.basicConfig(level=numeric_level)

    if options.no_colors:
        # Disable colors
        termcolor.enable(False)

    if options.show_managers:
        # Show table with manager configuration files.
        return show_managers(options)

    if options.create_config:
        # Install Yaml configuration files for manager and scheduler.
        abilab.install_config_files(workdir=None,
                                    force_reinstall=options.force_reinstall)

    errmsg = abilab.abicheck(verbose=options.verbose)
    if errmsg:
        cprint(errmsg, "red")
        cprint(
            "TIP: Use `--show-managers` to print the manager files provided by AbiPy.\n"
            +
            "If abicheck.py is failing because it cannot find the manager.yml configuration file",
            "yellow")
        return 2
    else:
        cprint("\nAbipy requirements are properly configured\n", "green")

    if not options.with_flow:
        return 0

    retcode = run_flow(options)
    if retcode == 0:
        cprint("\nTest flow completed successfully\n", "green")

    return retcode
Пример #2
0
    def test_remove_non_ascii(self):
        enable(True)
        print('Current terminal type: %s' % os.getenv('TERM'))
        print('Test basic colors:')
        cprint('Grey color', 'grey')
        cprint('Red color', 'red')
        cprint('Green color', 'green')
        cprint('Yellow color', 'yellow')
        cprint('Blue color', 'blue')
        cprint('Magenta color', 'magenta')
        cprint('Cyan color', 'cyan')
        cprint('White color', 'white')
        print(('-' * 78))

        print('Test highlights:')
        cprint('On grey color', on_color='on_grey')
        cprint('On red color', on_color='on_red')
        cprint('On green color', on_color='on_green')
        cprint('On yellow color', on_color='on_yellow')
        cprint('On blue color', on_color='on_blue')
        cprint('On magenta color', on_color='on_magenta')
        cprint('On cyan color', on_color='on_cyan')
        cprint('On white color', color='grey', on_color='on_white')
        print('-' * 78)

        print('Test attributes:')
        cprint('Bold grey color', 'grey', attrs=['bold'])
        cprint('Dark red color', 'red', attrs=['dark'])
        cprint('Underline green color', 'green', attrs=['underline'])
        cprint('Blink yellow color', 'yellow', attrs=['blink'])
        cprint('Reversed blue color', 'blue', attrs=['reverse'])
        cprint('Concealed Magenta color', 'magenta', attrs=['concealed'])
        cprint('Bold underline reverse cyan color',
               'cyan',
               attrs=['bold', 'underline', 'reverse'])
        cprint('Dark blink concealed white color',
               'white',
               attrs=['dark', 'blink', 'concealed'])
        print(('-' * 78))

        print('Test mixing:')
        cprint('Underline red on grey color', 'red', 'on_grey', ['underline'])
        cprint('Reversed green on red color', 'green', 'on_red', ['reverse'])

        # Test cprint_keys
        cprint_map("Hello world", {"Hello": "red"})
        cprint_map("Hello world",
                   {"Hello": {
                       "color": "blue",
                       "on_color": "on_red"
                   }})

        # Test terminal size.
        print("terminal size: %s", get_terminal_size())
        enable(False)
Пример #3
0
    def test_remove_non_ascii(self):
        enable(True)
        print("Current terminal type: %s" % os.getenv("TERM"))
        print("Test basic colors:")
        cprint("Grey color", "grey")
        cprint("Red color", "red")
        cprint("Green color", "green")
        cprint("Yellow color", "yellow")
        cprint("Blue color", "blue")
        cprint("Magenta color", "magenta")
        cprint("Cyan color", "cyan")
        cprint("White color", "white")
        print(("-" * 78))

        print("Test highlights:")
        cprint("On grey color", on_color="on_grey")
        cprint("On red color", on_color="on_red")
        cprint("On green color", on_color="on_green")
        cprint("On yellow color", on_color="on_yellow")
        cprint("On blue color", on_color="on_blue")
        cprint("On magenta color", on_color="on_magenta")
        cprint("On cyan color", on_color="on_cyan")
        cprint("On white color", color="grey", on_color="on_white")
        print("-" * 78)

        print("Test attributes:")
        cprint("Bold grey color", "grey", attrs=["bold"])
        cprint("Dark red color", "red", attrs=["dark"])
        cprint("Underline green color", "green", attrs=["underline"])
        cprint("Blink yellow color", "yellow", attrs=["blink"])
        cprint("Reversed blue color", "blue", attrs=["reverse"])
        cprint("Concealed Magenta color", "magenta", attrs=["concealed"])
        cprint(
            "Bold underline reverse cyan color",
            "cyan",
            attrs=["bold", "underline", "reverse"],
        )
        cprint(
            "Dark blink concealed white color",
            "white",
            attrs=["dark", "blink", "concealed"],
        )
        print(("-" * 78))

        print("Test mixing:")
        cprint("Underline red on grey color", "red", "on_grey", ["underline"])
        cprint("Reversed green on red color", "green", "on_red", ["reverse"])

        # Test cprint_keys
        cprint_map("Hello world", {"Hello": "red"})
        cprint_map("Hello world", {"Hello": {"color": "blue", "on_color": "on_red"}})

        # Test terminal size.
        print("terminal size: %s", get_terminal_size())
        enable(False)
Пример #4
0
    def test_remove_non_ascii(self):
        enable(True)
        print('Current terminal type: %s' % os.getenv('TERM'))
        print('Test basic colors:')
        cprint('Grey color', 'grey')
        cprint('Red color', 'red')
        cprint('Green color', 'green')
        cprint('Yellow color', 'yellow')
        cprint('Blue color', 'blue')
        cprint('Magenta color', 'magenta')
        cprint('Cyan color', 'cyan')
        cprint('White color', 'white')
        print(('-' * 78))

        print('Test highlights:')
        cprint('On grey color', on_color='on_grey')
        cprint('On red color', on_color='on_red')
        cprint('On green color', on_color='on_green')
        cprint('On yellow color', on_color='on_yellow')
        cprint('On blue color', on_color='on_blue')
        cprint('On magenta color', on_color='on_magenta')
        cprint('On cyan color', on_color='on_cyan')
        cprint('On white color', color='grey', on_color='on_white')
        print('-' * 78)

        print('Test attributes:')
        cprint('Bold grey color', 'grey', attrs=['bold'])
        cprint('Dark red color', 'red', attrs=['dark'])
        cprint('Underline green color', 'green', attrs=['underline'])
        cprint('Blink yellow color', 'yellow', attrs=['blink'])
        cprint('Reversed blue color', 'blue', attrs=['reverse'])
        cprint('Concealed Magenta color', 'magenta', attrs=['concealed'])
        cprint('Bold underline reverse cyan color', 'cyan',
               attrs=['bold', 'underline', 'reverse'])
        cprint('Dark blink concealed white color', 'white',
               attrs=['dark', 'blink', 'concealed'])
        print(('-' * 78))

        print('Test mixing:')
        cprint('Underline red on grey color', 'red', 'on_grey',
               ['underline'])
        cprint('Reversed green on red color', 'green', 'on_red', ['reverse'])

        # Test cprint_keys
        cprint_map("Hello world", {"Hello": "red"})
        cprint_map("Hello world",
                   {"Hello": {"color": "blue", "on_color": "on_red"}})

        # Test terminal size.
        print("terminal size: %s", get_terminal_size())
        enable(False)
Пример #5
0
def main():
    def str_examples():
        return """\
Usage example:
    dojodata.py plot H.psp8                ==> Plot dojo data for pseudo H.psp8
    dojodata.py compare H.psp8 H-low.psp8  ==> Plot and compare dojo data for pseudos H.psp8 and H-low.psp8
    dojodata.py nbcompare H.psp8 H-low.psp8 ==> Plot and compare dojo data in ipython notebooks.
    dojodata.py trials H.psp8 -r 1
    dojodata.py table .                    ==> Build table (find all psp8 files within current directory)
    dojodata.py figures .                  ==> Plot periodic table figures
    dojodata.py notebook H.psp8            ==> Generate ipython notebook and open it in the browser
    dojodata.py check table/*/*.psp8 -v --check-trials=gbrv_fcc,gbrv_bcc
    dojodata.py raren .
"""

    def show_examples_and_exit(err_msg=None, error_code=1):
        """Display the usage of the script."""
        sys.stderr.write(str_examples())
        if err_msg: sys.stderr.write("Fatal Error\n" + err_msg + "\n")
        sys.exit(error_code)

    def parse_rows(s):
        if not s: return []
        tokens = s.split(",")
        return list(map(int, tokens)) if tokens else []

    def parse_symbols(s):
        if not s: return []
        return s.split(",")

    # Parent parser for commands that need to know on which subset of pseudos we have to operate.
    copts_parser = argparse.ArgumentParser(add_help=False)
    copts_parser.add_argument('pseudos', nargs="+", help="Pseudopotential file or directory containing pseudos")
    copts_parser.add_argument('-s', "--symbols", type=parse_symbols,
        help=("List of chemical symbols to include or exclude."
              "Example --symbols=He,Li to include He and Li, --symbols=-He to exclude He"))
    copts_parser.add_argument('-v', '--verbose', default=0, action='count', # -vv --> verbose=2
                         help='Verbose, can be supplied multiple times to increase verbosity')

    copts_parser.add_argument('--loglevel', default="ERROR", type=str,
                        help="set the loglevel. Possible values: CRITICAL, ERROR (default), WARNING, INFO, DEBUG")
    copts_parser.add_argument('--no-colors', default=False, help='Disable ASCII colors')
    copts_parser.add_argument('--seaborn', action="store_true", help="Use seaborn settings")

    # Options for pseudo selection.
    group = copts_parser.add_mutually_exclusive_group()
    group.add_argument("-r", '--rows', default="", type=parse_rows, help="Select these rows of the periodic table.")
    group.add_argument("-f", '--family', type=str, default="", help="Select this family of the periodic table.")

    # Build the main parser.
    parser = argparse.ArgumentParser(epilog=str_examples(), formatter_class=argparse.RawDescriptionHelpFormatter)

    # Create the parsers for the sub-commands
    subparsers = parser.add_subparsers(dest='command', help='sub-command help', description="Valid subcommands")

    plot_options_parser = argparse.ArgumentParser(add_help=False)
    plot_options_parser.add_argument("-w", "--what-plot", type=str, default="all",
                                      help="Quantity to plot e.g df for deltafactor, gbrv for GBRV tests")
    plot_options_parser.add_argument("-e", "--eos", action="store_true", help="Plot EOS curve")

    # Subparser for plot command.
    p_plot = subparsers.add_parser('plot', parents=[copts_parser, plot_options_parser],
                                   help=dojo_plot.__doc__)

    # Subparser for notebook command.
    p_notebook = subparsers.add_parser('notebook', parents=[copts_parser],
                                       help=dojo_notebook.__doc__)
    parser.add_argument('--foreground', action='store_true', default=False,
                         help="Run jupyter notebook in the foreground.")
    p_notebook.add_argument('--no-validation', action='store_true', default=False,
                             help="Don't add the validation cell.")
    p_notebook.add_argument('--hide-code', action='store_true', default=False,
                            help="Add a cell that hides the raw code.")
    p_notebook.add_argument('--no-tmp', action='store_true', default=False,
                            help="Don't use temporary file for notebook.")

    # Subparser for compare.
    p_compare = subparsers.add_parser('compare', parents=[copts_parser, plot_options_parser],
                                      help=dojo_compare.__doc__)

    # Subparser for nbcompare.
    p_nbcompare = subparsers.add_parser('nbcompare', parents=[copts_parser, plot_options_parser],
                                        help=dojo_nbcompare.__doc__)

    # Subparser for figures
    p_figures = subparsers.add_parser('figures', parents=[copts_parser], help=dojo_figures.__doc__)

    # Subparser for table command.
    p_table = subparsers.add_parser('table', parents=[copts_parser], help=dojo_table.__doc__)
    p_table.add_argument("-j", '--json', default=False, action="store_true",
                         help="Dump table in json format to file table.json")
    p_table.add_argument("-b", '--best', default=False, action="store_true",
                         help="Select best pseudos according to deltafactor")

    p_nbtable = subparsers.add_parser('nbtable', parents=[copts_parser], help=dojo_nbtable.__doc__)

    # Subparser for dist command.
    p_dist = subparsers.add_parser('dist', parents=[copts_parser], help=dojo_dist.__doc__)

    # Subparser for trials command.
    p_trials = subparsers.add_parser('trials', parents=[copts_parser], help=dojo_trials.__doc__)
    p_trials.add_argument("--savefig", type=str, default="", help="Save plot to savefig file")

    # Subparser for check command.
    def parse_trials(s):
        if s is None: return s
        #if s == "all": return DojoReport.ALL_TRIALS
        return s.split(",")

    p_check = subparsers.add_parser('check', parents=[copts_parser], help=dojo_check.__doc__)
    p_check.add_argument("--check-trials", type=parse_trials, default=None, help="List of trials to check")

    # Subparser for validate command.
    #p_validate = subparsers.add_parser('validate', parents=[copts_parser], help=dojo_validate.__doc__)

    # Subparser for raren command.
    p_raren = subparsers.add_parser('raren', parents=[copts_parser], help=dojo_raren.__doc__)

    # Parse command line.
    try:
        options = parser.parse_args()
    except Exception as exc:
        show_examples_and_exit(error_code=1)

    # loglevel is bound to the string value obtained from the command line argument.
    # Convert to upper case to allow the user to specify --loglevel=DEBUG or --loglevel=debug
    import logging
    numeric_level = getattr(logging, options.loglevel.upper(), None)
    if not isinstance(numeric_level, int):
        raise ValueError('Invalid log level: %s' % options.loglevel)
    logging.basicConfig(level=numeric_level)

    if options.no_colors:
        # Disable colors
        termcolor.enable(False)

    def get_pseudos(options):
        """
        Find pseudos in paths, return :class:`DojoTable` object sorted by atomic number Z.
        Accepts filepaths or directory.
        """
        exts = ("psp8", "xml")

        paths = options.pseudos

        if len(paths) == 1:
            # Handle directory argument
            if os.path.isdir(paths[0]):
                top = os.path.abspath(paths[0])
                paths = find_exts(top, exts, exclude_dirs="_*")
            # Handle glob syntax e.g. "./*.psp8"
            elif "*" in paths[0]:
                paths = glob.glob(paths[0])

        if options.verbose > 1: print("Will read pseudo from: %s" % paths)

        pseudos = []
        for p in paths:
            try:
                pseudo = dojopseudo_from_file(p)
                if pseudo is None:
                    cprint("[%s] Pseudo.from_file returned None. Something wrong in file!" % p, "red")
                    continue
                pseudos.append(pseudo)

            except Exception as exc:
                cprint("[%s] Python exception. This pseudo will be ignored" % p, "red")
                if options.verbose: print(exc)

        table = DojoTable(pseudos)

        # Here we select a subset of pseudos according to family or rows
        if options.rows:
            table = table.select_rows(options.rows)
        elif options.family:
            table = table.select_families(options.family)

        # here we select chemical symbols.
        if options.symbols:
            table = table.select_symbols(options.symbols)

        return table.sort_by_z()

    # Build DojoTable from the paths specified by the user.
    options.pseudos = get_pseudos(options)
    if not options.pseudos:
        cprint("Empty pseudopotential list. Returning", "magenta")
        return 1
    if options.verbose: print(options.pseudos)

    if options.seaborn:
        import seaborn as sns
        sns.set(style="dark", palette="Set2")
        #sns.set(style='ticks', palette='Set2')
        #And to remove "chartjunk", do:
        #sns.despine()
        #plt.tight_layout()
        #sns.despine(offset=10, trim=True)

    # Dispatch
    return globals()["dojo_" + options.command](options)
Пример #6
0
def main():

    # Decorate argparse classes to add portable support for aliases in add_subparsers
    class MyArgumentParser(argparse.ArgumentParser):
        def add_subparsers(self, **kwargs):
            new = super(MyArgumentParser, self).add_subparsers(**kwargs)
            # Use my class
            new.__class__ = MySubParserAction
            return new

    class MySubParserAction(argparse._SubParsersAction):
        def add_parser(self, name, **kwargs):
            """Allows one to pass the aliases option even if this version of ArgumentParser does not support it."""
            try:
                return super(MySubParserAction, self).add_parser(name, **kwargs)
            except Exception as exc:
                if "aliases" in kwargs: 
                    # Remove aliases and try again.
                    kwargs.pop("aliases")
                    return super(MySubParserAction, self).add_parser(name, **kwargs)
                else:
                    # Wrong call.
                    raise exc

    parser = MyArgumentParser(epilog=str_examples(), formatter_class=argparse.RawDescriptionHelpFormatter)

    parser.add_argument('-v', '--verbose', default=0, action='count', # -vv --> verbose=2
                        help='verbose, can be supplied multiple times to increase verbosity')

    parser.add_argument('--no-colors', default=False, help='Disable ASCII colors')

    parser.add_argument('--loglevel', default="ERROR", type=str,
                        help="set the loglevel. Possible values: CRITICAL, ERROR (default), WARNING, INFO, DEBUG")

    parser.add_argument('path', nargs="?", help=("File or directory containing the ABINIT flow\n" +
                                                 "If not given, the first flow in the current workdir is selected"))

    # Create the parsers for the sub-commands
    subparsers = parser.add_subparsers(dest='command', help='sub-command help', description="Valid subcommands")

    # Subparser for single command.
    p_single = subparsers.add_parser('singleshot', aliases=["single"], help="Run single task.")

    # Subparser for rapidfire command.
    p_rapid = subparsers.add_parser('rapidfire', aliases=["rapid"], help="Run all tasks in rapidfire mode")

    # Subparser for scheduler command.
    p_scheduler = subparsers.add_parser('scheduler', aliases=["sched"], help="Run all tasks with a Python scheduler.")

    p_scheduler.add_argument('-w', '--weeks', default=0, type=int, help="number of weeks to wait")

    p_scheduler.add_argument('-d', '--days', default=0, type=int, help="number of days to wait")

    p_scheduler.add_argument('-hs', '--hours', default=0, type=int, help="number of hours to wait")

    p_scheduler.add_argument('-m', '--minutes', default=0, type=int, help="number of minutes to wait")

    p_scheduler.add_argument('-s', '--seconds', default=0, type=int, help="number of seconds to wait")

    # Subparser for status command.
    p_status = subparsers.add_parser('status', help="Show task status.")
    p_status.add_argument('-d', '--delay', default=0, type=int, help=("If 0, exit after the first analysis.\n" + 
                          "If > 0, enter an infinite loop and delay execution for the given number of seconds."))

    # Subparser for cancel command.
    p_cancel = subparsers.add_parser('cancel', help="Cancel the tasks in the queue.")
    p_cancel.add_argument("-r", "--rmtree", action="store_true", default=False, help="Remove flow directory.")

    # Subparser for restart command.
    p_restart = subparsers.add_parser('restart', help="Restart the tasks of the flow that are not converged.")

    # Subparser for restart command.
    p_reset = subparsers.add_parser('reset', help="Reset the tasks of the flow with the specified status.")
    p_reset.add_argument('task_status', default="QueueCritical") 

    # Subparser for open command.
    p_open = subparsers.add_parser('open', help="Open files in $EDITOR, type `abirun.py ... open --help` for help)")
    p_open.add_argument('what', default="o", 
        help="""\
Specify the files to open. Possible choices:\n
    i ==> input_file\n
    o ==> output_file\n
    f ==> files_file\n              
    j ==> job_file\n                
    l ==> log_file\n                
    e ==> stderr_file\n             
    q ==> qerr_file\n
""")

    # Subparser for gui command.
    p_gui = subparsers.add_parser('gui', help="Open GUI.")
    p_gui.add_argument("--chroot", default="", type=str, help=("Use chroot as new directory of the flow.\n" +
                       "Mainly used for opening a flow located on a remote filesystem mounted with sshfs.\n" +
                       "In this case chroot is the absolute path to the flow on the **localhost**\n",
                       "Note that it is not possible to change the flow from remote when chroot is used."))

    p_new_manager = subparsers.add_parser('new_manager', help="Change the TaskManager.")
    p_new_manager.add_argument("manager_file", default="", type=str, help="YAML file with the new manager")

    p_tail = subparsers.add_parser('tail', help="Use tail to follow the main output file of the flow.")

    # Parse command line.
    try:
        options = parser.parse_args()
    except Exception as exc: 
        show_examples_and_exit(error_code=1)

    # loglevel is bound to the string value obtained from the command line argument. 
    # Convert to upper case to allow the user to specify --loglevel=DEBUG or --loglevel=debug
    import logging
    numeric_level = getattr(logging, options.loglevel.upper(), None)
    if not isinstance(numeric_level, int):
        raise ValueError('Invalid log level: %s' % options.loglevel)
    logging.basicConfig(level=numeric_level)

    if options.no_colors:
        termcolor.enable(False)

    # Read the flow from the pickle database.
    if options.path is None:
        # Will try to figure out the location of the Flow.
        options.path = os.getcwd()

    flow = abilab.AbinitFlow.pickle_load(options.path)
    retcode = 0

    if options.command == "gui":
        if options.chroot:
            # Change the workdir of flow.
            print("Will chroot to %s..." % options.chroot)
            flow.chroot(options.chroot)

        from abipy.gui.flowviewer import wxapp_flow_viewer
        wxapp_flow_viewer(flow).MainLoop()

    elif options.command == "new_manager":
        # Read the new manager from file.
        new_manager = abilab.TaskManager.from_file(options.manager_file)

        # Change the manager of the errored tasks.
        status = "S_QUEUECRITICAL"
        #status = "S_ERROR"
        for task, wi, ti in flow.iflat_tasks_wti(status=status):
            task.reset()
            task.set_manager(new_manager)
            
        # Update the database.
        return flow.build_and_pickle_dump()

    else:
        retcode = treat_flow(flow, options)

    return retcode
Пример #7
0
def main():

    def str_examples():
        examples = """\
Usage example:\n

    abirun.py [FLOWDIR] rapid                    => Keep repeating, stop when no task can be executed.
    abirun.py [FLOWDIR] scheduler                => Execute flow with the scheduler
    abirun.py [FLOWDIR] events                   => Print ABINIT events (Warning/Error/Comment)
    abirun.py [FLOWDIR] history                  => Print Task history.
    abirun.py [FLOWDIR] gui                      => Open the GUI.
    abirun.py [FLOWDIR] manager slurm            => Document the TaskManager options availabe for Slurm.
    abirun.py [FLOWDIR] manager script           => Show the job script that will be produced.
    nohup abirun.py [FLOWDIR] sheduler -s 30 &   => Start the scheduler to schedule task submission.

    If FLOWDIR is not given, abirun.py automatically selects the database located within 
    the working directory. An Exception is raised if multiple databases are found.

    Options for developers:

        abirun.py prof ABIRUN_ARGS               => to profile abirun.py
        abirun.py tracemalloc ABIRUN_ARGS        => to trace memory blocks allocated by Python
"""
        return examples

    def show_examples_and_exit(err_msg=None, error_code=1):
        """Display the usage of the script."""
        sys.stderr.write(str_examples())
        if err_msg: sys.stderr.write("Fatal Error\n" + err_msg + "\n")
        sys.exit(error_code)

    def parse_nids(s):
        """parse nids argument"""
        if s is None: return s
        try:
            if "," in s:
                return [int(t) for t in s.split(",")]
            else:
                # Convert string to slice and return list.
                s = as_slice(s)
                if s.stop is None: raise argparse.ArgumentTypeError("stop must be specified")
                return list(range(s.start, s.stop, s.step))
        except:
            raise argparse.ArgumentTypeError("Invalid nids string %s\n Expecting None or int or comma-separated integers or slice sintax" % s)

    def parse_wslice(s):
        s = as_slice(s)
        if s is None: return s
        if s.stop is None: raise argparse.ArgumentTypeError("stop must be specified")
        return s

    # Parent parser for commands that need to know on which subset of tasks/workflows we have to operate.
    # wslide and nids are mutually exclusive.
    flow_selector_parser = argparse.ArgumentParser(add_help=False)
    group = flow_selector_parser.add_mutually_exclusive_group()
    group.add_argument("-n", '--nids', default=None, type=parse_nids, help=(
        "Node identifier(s) used to select the task. Integer or comma-separated list of integers. Use `status` command to get the node ids."
        "Examples: --nids=12 --nids=12,13,16 --nids=10:12 to select 10 and 11, --nids=2:5:2 to select 2,4"  
        ))

    group.add_argument("-w", '--wslice', default=None, type=parse_wslice, 
                                      help=("Select the list of works to analyze (python syntax for slices):"
                                      "Examples: --wslice=1 to select the second workflow, --wslice=:3 for 0,1,2,"
                                      "--wslice=-1 for the last workflow, --wslice::2 for even indices"))

    group.add_argument("-S", '--task-status', default=None, type=Status.as_status, 
                        help="Select only the tasks with the given status. Default: None i.e. ignored. Possible values: %s" %
                        Status.all_status_strings())
    #group.add_argument("-p", "--task-pos", default=None, type=parse_wslice, help="List of tuples with the position of the tasl in the flow.")

    # Parent parse for common options.
    copts_parser = argparse.ArgumentParser(add_help=False)

    copts_parser.add_argument('-v', '--verbose', default=0, action='count', # -vv --> verbose=2
                              help='verbose, can be supplied multiple times to increase verbosity')
    copts_parser.add_argument('--remove-lock', default=False, type=bool, help="Remove the lock file of the pickle file storing the flow.")
    copts_parser.add_argument('--no-colors', default=False, help='Disable ASCII colors')
    copts_parser.add_argument('--loglevel', default="ERROR", type=str,
                               help="set the loglevel. Possible values: CRITICAL, ERROR (default), WARNING, INFO, DEBUG")

    # Build the main parser.
    parser = argparse.ArgumentParser(epilog=str_examples(), formatter_class=argparse.RawDescriptionHelpFormatter)

    parser.add_argument('flowdir', nargs="?", help=("File or directory containing the ABINIT flow"
                                                    "If not given, the first flow in the current workdir is selected"))

    # Create the parsers for the sub-commands
    subparsers = parser.add_subparsers(dest='command', help='sub-command help', description="Valid subcommands")

    subparsers.add_parser('version', parents=[copts_parser], help='Show version number and exit')

    # Subparser for single command.
    p_single = subparsers.add_parser('single', parents=[copts_parser], help="Run single task.")

    # Subparser for rapidfire command.
    p_rapid = subparsers.add_parser('rapid', parents=[copts_parser], help="Run all tasks in rapidfire mode")

    # Subparser for scheduler command.
    p_scheduler = subparsers.add_parser('scheduler', parents=[copts_parser], help="Run all tasks with a Python scheduler.")

    p_scheduler.add_argument('-w', '--weeks', default=0, type=int, help="number of weeks to wait")
    p_scheduler.add_argument('-d', '--days', default=0, type=int, help="number of days to wait")
    p_scheduler.add_argument('-hs', '--hours', default=0, type=int, help="number of hours to wait")
    p_scheduler.add_argument('-m', '--minutes', default=0, type=int, help="number of minutes to wait")
    p_scheduler.add_argument('-s', '--seconds', default=0, type=int, help="number of seconds to wait")

    # Subparser for batch command.
    p_batch = subparsers.add_parser('batch', parents=[copts_parser], help="Run scheduler in batch script.")
    p_batch.add_argument("-t", '--timelimit', default=None, help=("Time limit for batch script. "
                         "Accept int with seconds or string with time given in the slurm convention: "
                         "`days-hours:minutes:seconds`. If timelimit is None, the default value specified"
                         " in the `batch_adapter` entry of `manager.yml` is used."))

    # Subparser for status command.
    p_status = subparsers.add_parser('status', parents=[copts_parser, flow_selector_parser], help="Show task status.")
    p_status.add_argument('-d', '--delay', default=0, type=int, help=("If 0, exit after the first analysis.\n" + 
                          "If > 0, enter an infinite loop and delay execution for the given number of seconds."))
    p_status.add_argument('-s', '--summary', default=False, action="store_true", help="Print short version with status counters.")

    # Subparser for set_status command.
    p_set_status = subparsers.add_parser('set_status', parents=[copts_parser, flow_selector_parser], 
        help="Change the status of the task. WARNING: Option for developers!")
    p_set_status.add_argument('new_status', help="New value of status. Possible values: %s" % Status.all_status_strings())

    # Subparser for cancel command.
    p_cancel = subparsers.add_parser('cancel', parents=[copts_parser, flow_selector_parser], help="Cancel the tasks in the queue.")
    p_cancel.add_argument("-r", "--rmtree", action="store_true", default=False, help="Remove flow directory.")

    # Subparser for restart command.
    p_restart = subparsers.add_parser('restart', parents=[copts_parser, flow_selector_parser], 
                help="Restart the tasks of the flow. By default, only the task with status==Unconverged are restarted."
                     "Use -S `status` and/or -n node_ids to select particular tasks.")

    # Subparser for reset command.
    p_reset = subparsers.add_parser('reset', parents=[copts_parser, flow_selector_parser], 
                                    help="Reset the tasks of the flow with the specified status.")

    # Subparser for move command.
    p_move = subparsers.add_parser('move', parents=[copts_parser], help="Move the flow to a new directory and change the absolute paths")
    p_move.add_argument('dest', nargs=1) 

    # Subparser for open command.
    p_open = subparsers.add_parser('open', parents=[copts_parser, flow_selector_parser], 
                                   help="Open files in $EDITOR, type `abirun.py FLOWDIR open --help` for help)")
    p_open.add_argument('what', nargs="?", default="o", 
        help="""\
Specify the files to open. Possible choices:
    i ==> input_file
    o ==> output_file
    f ==> files_file
    j ==> job_file
    l ==> log_file
    e ==> stderr_file
    q ==> qout_file
    all ==> all files.
""")

    p_ncopen = subparsers.add_parser('ncopen', parents=[copts_parser, flow_selector_parser], 
                                      help="Open netcdf files in ipython. Use --help` for more info")
    p_ncopen.add_argument('ncext', nargs="?", default="GSR", help="Select the type of file to open")

    # Subparser for gui command.
    p_gui = subparsers.add_parser('gui', help="Open the GUI (requires wxPython).")
    p_gui.add_argument("--chroot", default="", type=str, help=("Use chroot as new directory of the flow." +
                       "Mainly used for opening a flow located on a remote filesystem mounted with sshfs." +
                       "In this case chroot is the absolute path to the flow on the **localhost**",
                       "Note that it is not possible to change the flow from remote when chroot is used."))

    p_new_manager = subparsers.add_parser('new_manager', parents=[copts_parser, flow_selector_parser], help="Change the TaskManager.")
    p_new_manager.add_argument("manager_file", default="", type=str, help="YAML file with the new manager")

    p_tail = subparsers.add_parser('tail', parents=[copts_parser, flow_selector_parser], help="Use tail to follow the main output files of the flow.")
    p_tail.add_argument('what_tail', nargs="?", type=str, default="o", help="What to follow: o for output (default), l for logfile, e for stderr")

    p_qstat = subparsers.add_parser('qstat', parents=[copts_parser], help="Show additional info on the jobs in the queue.")

    p_deps = subparsers.add_parser('deps', parents=[copts_parser], help="Show dependencies.")

    p_robot = subparsers.add_parser('robot', parents=[copts_parser, flow_selector_parser], 
                                    help="Use a robot to analyze the results of multiple tasks (requires ipython)")
    p_robot.add_argument('robot_ext', nargs="?", type=str, default="GSR", help="The file extension of the netcdf file")

    p_plot = subparsers.add_parser('plot', parents=[copts_parser, flow_selector_parser], help="Plot data. Use --help for more info.")
    p_plot.add_argument("what", nargs="?", type=str, default="ebands", help="Object to plot")

    p_inspect = subparsers.add_parser('inspect', parents=[copts_parser, flow_selector_parser], help="Inspect the tasks")

    p_inputs= subparsers.add_parser('inputs', parents=[copts_parser, flow_selector_parser], help="Show the input files of the tasks")

    p_manager = subparsers.add_parser('manager', parents=[copts_parser], help="Document the TaskManager options")
    p_manager.add_argument("qtype", nargs="?", default=None, help=("Write job script to terminal if qtype='script' else" 
        " document the qparams for the given QueueAdapter qtype e.g. slurm"))

    p_events = subparsers.add_parser('events', parents=[copts_parser, flow_selector_parser], 
                                    help="Show ABINIT events (error messages, warnings, comments)")
    #p_events.add_argument("-t", "event-type", default=)

    p_corrections = subparsers.add_parser('corrections', parents=[copts_parser, flow_selector_parser], help="Show abipy corrections")

    p_history = subparsers.add_parser('history', parents=[copts_parser, flow_selector_parser], help="Show Node history.")
    p_history.add_argument("-m", "--metadata", action="store_true", default=False, help="Print history metadata")
    #p_history.add_argument("-t", "--task-history", action="store_true", default=True, help=)

    p_handlers = subparsers.add_parser('handlers', parents=[copts_parser], help="Show event handlers installed in the flow")
    p_handlers.add_argument("-d", "--doc", action="store_true", default=False, 
                            help="Show documentation about all the handlers that can be installed.")

    p_notebook = subparsers.add_parser('notebook', parents=[copts_parser], help="Create and open an ipython notebook to interact with the flow.")

    p_ipython = subparsers.add_parser('ipython', parents=[copts_parser], help="Embed IPython. Useful for advanced operations or debugging purposes.")
    p_ipython.add_argument('--argv', nargs="?", default="", type=shlex.split, 
                           help="Command-line options passed to ipython. Must be enclosed by quotes. "
                                "Example: --argv='--matplotlib=wx'")

    p_tar = subparsers.add_parser('tar', parents=[copts_parser], help="Create tarball file.")
    p_tar.add_argument("-s", "--max-filesize", default=None, 
                       help="Exclude file whose size > max-filesize bytes. Accept integer or string e.g `1Mb`.")

    def parse_strings(s): return s.split(",") if s is not None else s
    p_tar.add_argument("-e", "--exclude-exts", default=None, type=parse_strings,
                       help="Exclude file extensions. Accept string or comma-separated strings. Ex: -eWFK or --exclude-exts=WFK,GSR")

    p_tar.add_argument("-d", "--exclude-dirs", default=None, type=parse_strings,
                       help="Exclude directories. Accept string or comma-separated strings. Ex: --exlude-dirs=indir,outdir")

    p_tar.add_argument("-l", "--light", default=False, action="store_true",
                       help="Create light-weight version of the tarball for debugging purposes. Other options are ignored.")

    p_debug = subparsers.add_parser('debug', parents=[copts_parser, flow_selector_parser], 
                                     help="Scan error files and log files for possible error messages.")

    p_group = subparsers.add_parser('group', parents=[copts_parser, flow_selector_parser], 
                                     help="Group tasks according to property.")

    p_diff = subparsers.add_parser('diff', parents=[copts_parser, flow_selector_parser], 
                                   help="Compare files produced by two or three nodes.")
    p_diff.add_argument('what_diff', nargs="?", type=str, default="i", 
                        help="What to diff: i for input (default), o for output, l for logfile, e for stderr")

    p_networkx = subparsers.add_parser('networkx', parents=[copts_parser], #, flow_selector_parser], 
                                     help="Draw flow and node dependecies with networkx package.")
    p_networkx.add_argument('--nxmode', default="status",
                            help="Type of network plot. Possible values: `status`, `network`")
    p_networkx.add_argument('--edge-labels', action="store_true", default=False, help="Show edge labels")

    p_listext = subparsers.add_parser('listext', parents=[copts_parser], 
                                     help="List all the output files with the given extension that have been produced by the nodes of the flow.")
    p_listext.add_argument('listexts', nargs="+", help="List of Abinit file extensions. e.g DDB, GSR, WFK etc")

    # Parse command line.
    try:
        options = parser.parse_args()
    except Exception as exc: 
        show_examples_and_exit(error_code=1)

    # loglevel is bound to the string value obtained from the command line argument. 
    # Convert to upper case to allow the user to specify --loglevel=DEBUG or --loglevel=debug
    import logging
    numeric_level = getattr(logging, options.loglevel.upper(), None)
    if not isinstance(numeric_level, int):
        raise ValueError('Invalid log level: %s' % options.loglevel)
    logging.basicConfig(level=numeric_level)

    if options.command == "version":
        from abipy.core.release import version
        print(version)
        return 0

    if options.no_colors:
        # Disable colors
        termcolor.enable(False)

    if options.command == "manager":
        # Document TaskManager options and qparams.
        qtype = options.qtype

        if qtype == "script":
            manager = abilab.TaskManager.from_user_config()
            script = manager.qadapter.get_script_str(
                job_name="job_name", 
                launch_dir="workdir",
                executable="executable",
                qout_path="qout_file.path",
                qerr_path="qerr_file.path",
                stdin="stdin", 
                stdout="stdout",
                stderr="stderr",
            )
            print(script)

        else:
            print(abilab.TaskManager.autodoc())
            from pymatgen.io.abinitio.qadapters import show_qparams, all_qtypes
                                                                                                 
            print("qtype supported: %s" % all_qtypes())
            print("Use `abirun.py . manager slurm` to have the list of qparams for slurm.\n")

            if qtype is not None:
                print("QPARAMS for %s" % qtype)
                show_qparams(qtype)

        sys.exit(0)

    # Read the flow from the pickle database.
    if options.flowdir is None:
        # Will try to figure out the location of the Flow.
        options.flowdir = os.getcwd()

    flow = abilab.Flow.pickle_load(options.flowdir, remove_lock=options.remove_lock)
    #flow.set_spectator_mode(False)
    retcode = 0

    if options.command == "gui":
        if options.chroot:
            # Change the workdir of flow.
            print("Will chroot to %s..." % options.chroot)
            flow.chroot(options.chroot)

        from abipy.gui.flowviewer import wxapp_flow_viewer
        wxapp_flow_viewer(flow).MainLoop()

    elif options.command == "new_manager":
        # Read the new manager from file.
        new_manager = abilab.TaskManager.from_file(options.manager_file)

        # Default status for reset is QCritical
        if options.task_status: options.task_status = Status.as_status("QCritical")

        # Change the manager of the errored tasks.
        print("Resetting tasks with status: %s" % options.task_status)
        for task in flow.iflat_tasks(status=options.task_status, nids=selected_nids(flow, options)):
            task.reset()
            task.set_manager(new_manager)
            
        # Update the database.
        return flow.build_and_pickle_dump()

    elif options.command == "events":
        nrows, ncols = get_terminal_size()

        for task in flow.iflat_tasks(status=options.task_status, nids=selected_nids(flow, options)):
            report = task.get_event_report()
            print(make_banner(str(task), width=ncols, mark="="))
            #report = report.filter_types()
            print(report)

    elif options.command == "corrections":
        nrows, ncols = get_terminal_size()
        count = 0
        for task in flow.iflat_tasks(status=options.task_status, nids=selected_nids(flow, options)):
            if task.num_corrections == 0: continue
            count += 1
            print(make_banner(str(task), width=ncols, mark="="))
            for corr in task.corrections:
                pprint(corr)

        if not count: 
            print("No correction found.")

    elif options.command == "history":
        nrows, ncols = get_terminal_size()

        works_done = []
        # Loop on the tasks and show the history of the work is not in works_done
        for task in flow.iflat_tasks(status=options.task_status, nids=selected_nids(flow, options)):
            work = task.work
            if work not in works_done:
                works_done.append(work)
                print(make_banner(str(work), width=ncols, mark="="))
                print(work.history.to_string(metadata=options.metadata))

            print(make_banner(str(task), width=ncols, mark="="))
            print(task.history.to_string(metadata=options.metadata))

        # Print the history of the flow.
        print(make_banner(str(flow), width=ncols, mark="="))
        print(flow.history.to_string(metadata=options.metadata))

    elif options.command == "handlers":
        if options.doc:
            autodoc_event_handlers()
        else:
            flow.show_event_handlers()

    elif options.command  == "single":
        nlaunch = flow.single_shot()
        print("Number of tasks launched: %d" % nlaunch)
        if nlaunch: flow.show_status()

    elif options.command == "rapid":
        nlaunch = flow.rapidfire()
        print("Number of tasks launched: %d" % nlaunch)
        if nlaunch: flow.show_status()

    elif options.command == "scheduler":
        # Check that the env on the local machine is properly configured before starting the scheduler.
        abilab.abicheck()

        sched_options = {oname: getattr(options, oname) for oname in 
            ("weeks", "days", "hours", "minutes", "seconds")}

        if all(v == 0 for v in sched_options.values()):
            sched = flow.make_scheduler()
        else:
            sched = flow.make_scheduler(**sched_options)

        print(sched)
        return sched.start()

    elif options.command == "batch":
        return flow.batch(timelimit=options.timelimit)

    elif options.command == "status":
        # Select the method to call.
        show_func = flow.show_status if not options.summary else flow.show_summary

        if options.delay:
            cprint("Entering infinite loop. Press CTRL+C to exit", color="magenta", end="", flush=True)
            try:
                while True:
                    print(2*"\n" + time.asctime() + "\n")
                    flow.check_status()
                    show_func(verbose=options.verbose, nids=selected_nids(flow, options))
                    if flow.all_ok: break
                    time.sleep(options.delay)
            except KeyboardInterrupt:
                pass
        else:
            show_func(verbose=options.verbose, nids=selected_nids(flow, options))
            if options.verbose and flow.manager.has_queue:
                print("Total number of jobs in queue: %s" % flow.manager.get_njobs_in_queue())

    elif options.command == "set_status":
        # Default status for reset is QCritical
        if options.task_status is None: options.task_status = Status.as_status("QCritical")
        new_status = Status.as_status(options.new_status)
        print("Will set all tasks with status: ", options.task_status, " to new_status", new_status)

        count = 0
        for task in flow.iflat_tasks(status=options.task_status, nids=selected_nids(flow, options)):
            task.set_status(new_status, msg="Changed by abirun from %s to %s" % (task.status, new_status))
            count += 1

        print("Number of tasks modified: %s" % count)
        if count:
            # update database
            flow.pickle_dump()

    elif options.command == "open":
        flow.open_files(what=options.what, status=None, op="==", nids=selected_nids(flow, options))

    elif options.command == "ncopen":
        # The name of the method associated to this netcdf file.
        methname = "open_" + options.ncext.lower()
        # List of netcdf file objects.
        ncfiles = [getattr(task, methname)() for task in flow.select_tasks(nids=options.nids, wslice=options.wslice) 
                    if hasattr(task, methname)]
        
        if ncfiles:
            # Start ipython shell with namespace 
            import IPython
            if len(ncfiles) == 1:
                IPython.start_ipython(argv=[], user_ns={"ncfile": ncfiles[0]})
            else:
                IPython.start_ipython(argv=[], user_ns={"ncfiles": ncfiles})
        else:
            cprint("Cannot find any netcdf file with extension %s" % options.ncext, color="magenta")

    elif options.command == "cancel":
        print("Number of jobs cancelled %d" % flow.cancel(nids=selected_nids(flow, options)))
        # Remove directory
        if options.rmtree: flow.rmtree()

    elif options.command == "restart":
        # Default status for reset is Unconverged if no option is provided by the user.
        if options.task_status is None and options.nids is None:
            options.task_status = Status.as_status("Unconverged")

        nlaunch, excs = 0, []
        for task in flow.iflat_tasks(status=options.task_status, nids=selected_nids(flow, options)):
            if options.verbose: 
                print("Will try to restart %s, with status %s" % (task, task.status))
            try:
                fired = task.restart()
                if fired: nlaunch += 1
            except Exception:
                excs.append(straceback())

        cprint("Number of jobs restarted %d" % nlaunch, "blue")
        if nlaunch:
            # update database
            flow.pickle_dump()

        if excs:
            print("Exceptions raised\n")
            pprint(excs)

    elif options.command == "reset":
        # Default status for reset is QCritical
        if options.task_status: options.task_status = Status.as_status("QCritical")
        print("Will reset tasks with status: %s" % options.task_status)

        count = 0
        for task in flow.iflat_tasks(status=options.task_status, nids=selected_nids(flow, options)):
            print("Resetting task %s" % task)
            failed = task.reset()
            if failed:
                print("Task %s couldn't be reset" % task)
            else:
                count += 1

        cprint("%d tasks have been reset" % count, "blue")
        nlaunch = flow.rapidfire()
        flow.show_status()
        print("Number of tasks launched: %d" % nlaunch)

        if nlaunch == 0:
            g = flow.find_deadlocks()
            #print("deadlocked:", gdeadlocked)
            #print("runnables:", grunnables)
            #print("running:", g.running)
            if g.deadlocked and not (g.runnables or g.running):
                print("*** Flow is deadlocked ***")

        flow.pickle_dump()

    elif options.command == "move":
        print("Will move flow to %s..." % options.dest)
        flow.chroot(options.dest)
        flow.move(options.dest)

    elif options.command == "tail":
        def get_path(task):
            """Helper function used to select the files of a task."""
            choices = {
                "o": task.output_file,
                "l": task.log_file,
                "e": task.stderr_file,
            }
            return getattr(choices[options.what_tail], "path")

        # Default status for tail is Running
        if options.status is None: status = Status.as_status("Running")

        paths = [get_path(task) for task in flow.iflat_tasks(status=options.task_status, nids=selected_nids(flow, options))]

        if not paths:
            cprint("No job is running. Exiting!", "red")
        else:
            cprint("Press CTRL+C to interrupt. Number of output files %d" % len(paths), color="magenta", end="", flush=True)
            try:
                os.system("tail -f %s" % " ".join(paths))
            except KeyboardInterrupt:
                pass

    elif options.command == "qstat":
        #for task in flow.select_tasks(nids=options.nids, wslice=options.wslice):
        for task in flow.iflat_tasks():
            if not task.qjob: continue
            print("qjob", task.qjob)
            print("info", task.qjob.get_info())
            print("e start-time", task.qjob.estimated_start_time())
            print("qstats", task.qjob.get_stats())

    elif options.command == "deps":
        flow.check_status()
        flow.show_dependencies()

    elif options.command == "robot":
        import IPython
        with abilab.abirobot(flow, options.robot_ext, nids=selected_nids(flow, options)) as robot:
            #IPython.embed(header=str(robot) + "\nType `robot` in the terminal and use <TAB> to list its methods",  robot=robot)
            IPython.start_ipython(argv=[], user_ns={"robot": robot})

    elif options.command == "plot":
        fext = dict(
            ebands="gsr",
        )[options.what]

        open_method = "open_" + fext
        plot_method = "plot_" + options.what

        for task in flow.select_tasks(nids=options.nids, wslice=options.wslice):
            try:
                with getattr(task, open_method)() as ncfile: 
                    getattr(ncfile, plot_method)()
            except Exception as exc:
                print(exc)

    elif options.command == "inspect":
        tasks = flow.select_tasks(nids=options.nids, wslice=options.wslice)

        # Use different thread to inspect the task so that master can catch KeyboardInterrupt and exit.
        # One could use matplotlib non-blocking interface with show(block=False) but this one seems to work well.
        from multiprocessing import Process

        def plot_graphs():
            for task in tasks:
                if hasattr(task, "inspect"):
                    try:
                        task.inspect()
                    except Exception as exc:
                        cprint("%s: inspect method raised %s " % (task, exc), color="blue")
                        
                else:
                    cprint("Task %s does not provide an inspect method" % task, color="blue")

        plot_graphs()

        # This works with py3k but not with py2
        #p = Process(target=plot_graphs)
        #p.start()
        #num_tasks = len(tasks)

        #if num_tasks == 1:
        #    p.join()
        #else:
        #    cprint("Will produce %d matplotlib plots. Press CTRL+C to interrupt..." % num_tasks, color="magenta", end="", flush=True)
        #    try:
        #        p.join()
        #    except KeyboardInterrupt:
        #        print("\nTerminating thread...")
        #        p.terminate()

    elif options.command == "inputs":
        flow.show_inputs(nids=selected_nids(flow, options))

    elif options.command == "notebook":
        write_notebook(flow, options)

    elif options.command == "ipython":
        import IPython
        #IPython.embed(header="")
        #print("options:", options.argv)
        IPython.start_ipython(argv=options.argv, user_ns={"flow": flow})# , header="flow.show_status()")

    elif options.command == "tar":
        if not options.light:
            tarfile = flow.make_tarfile(name=None, 
                                        max_filesize=options.max_filesize, 
                                        exclude_exts=options.exclude_exts, 
                                        exclude_dirs=options.exclude_dirs,
                                        verbose=options.verbose)
            print("Created tarball file %s" % tarfile)
        else:
            tarfile = flow.make_light_tarfile()
            print("Created light tarball file %s" % tarfile)

    elif options.command == "debug":
        nrows, ncols = get_terminal_size()

        # Test for scheduler exceptions first.
        sched_excfile = os.path.join(flow.workdir, "_exceptions")
        if os.path.exists(sched_excfile):
            with open(sched_excfile, "r") as fh:
                cprint(fh.read(), color="red")
                return 0

        if options.task_status is not None: 
            tasks = list(flow.iflat_tasks(status=options.task_status, nids=selected_nids(flow, options)))
        else:
            errors = list(flow.iflat_tasks(status=flow.S_ERROR, nids=selected_nids(flow, options)))
            qcriticals = list(flow.iflat_tasks(status=flow.S_QCRITICAL, nids=selected_nids(flow, options)))
            abicriticals = list(flow.iflat_tasks(status=flow.S_ABICRITICAL, nids=selected_nids(flow, options)))
            tasks = errors + qcriticals + abicriticals

        # For each task selected:
        #
        #     1) Check the error files of the task. If not empty, print the content to stdout and we are done.
        #     2) If error files are empty, look at the master log file for possible errors 
        #     3) If also this check failes, scan all the process log files.
        #        TODO: This check is not needed if we introduce a new __abinit_error__ file 
        #        that is created by the first MPI process that invokes MPI abort!
        #     
        ntasks = 0
        for task in tasks:
            print(make_banner(str(task), width=ncols, mark="="))
            ntasks += 1

            #  Start with error files.
            for efname in ["qerr_file", "stderr_file",]:
                err_file = getattr(task, efname)
                if err_file.exists:
                    s = err_file.read()
                    if not s: continue
                    print(make_banner(str(err_file), width=ncols, mark="="))
                    cprint(s, color="red")
                    #count += 1 

            # Check main log file.
            try:
                report = task.get_event_report()
                if report and report.num_errors: 
                    print(make_banner(os.path.basename(report.filename), width=ncols, mark="="))
                    s = "\n".join(str(e) for e in report.errors)
                else:
                    s = None
            except Exception as exc:
                s = str(exc)

            count = 0 # count > 0 means we found some useful info that could explain the failures.
            if s is not None:
                cprint(s, color="red")
                count += 1

            if not count:
                # Inspect all log files produced by the other nodes.
                log_files = task.tmpdir.list_filepaths(wildcard="*LOG_*")
                if not log_files:
                    cprint("No *LOG_* file in tmpdir. This usually happens if you are running with many CPUs", color="magenta")

                for log_file in log_files:
                    try:
                        report = EventsParser().parse(log_file)
                        if report.errors:
                            print(report)
                            count += 1
                            break
                    except Exception as exc:
                        cprint(str(exc), color="red")
                        count += 1
                        break

            if not count:
                cprint("Houston, we could not find any error message that can explain the problem", color="magenta")

        print("Number of tasks analyzed: %d" % ntasks)

    elif options.command == "group":
        d = defaultdict(list)
        for task in flow.iflat_tasks(status=options.task_status, nids=selected_nids(flow, options)):
            key = task.status
            d[key].append(task.node_id)

        print("Mapping status --> List of node identifiers")
        for k, v in d.items():
            print("   ",k, " --> ", v)

    elif options.command == "diff":
        if options.nids is None:
            raise ValueError("nids must be specified when using diff command")

        tasks = list(flow.iflat_tasks(nids=selected_nids(flow, options)))

        if len(tasks) not in (2, 3):
            if len(tasks) == 1: 
                cprint("task == task, returning\n" , color="magenta", end="", flush=True)
                return 0
            else:
                raise ValueError("Don't know how to compare files produced by %d tasks" % len(tasks))

        # Build list of lists. Each sub-list contains the files associated to the i-th task.
        files_for_task = [None] * len(tasks)
        for i, task in enumerate(tasks):
            files_for_task[i] = task.select_files(options.what_diff)

        for diff_files in zip(*files_for_task):
            print("Comparing", ", ".join(os.path.relpath(p) for p in diff_files))
            args = " ".join(os.path.relpath(p) for p in diff_files)
            # TODO: I should have written a Differ object somewhere!
            os.system("vimdiff %s" % args)

    elif options.command == "networkx":
        flow.plot_networkx(mode=options.nxmode, 
                           with_edge_labels=options.edge_labels)

    elif options.command == "listext":
        for ext in options.listexts:
            flow.listext(ext)
            print("")

    else:
        raise RuntimeError("Don't know what to do with command %s!" % options.command)

    return retcode
Пример #8
0
def main():
    def str_examples():
        return """\
Usage example:
    dojodata.py plot H.psp8                ==> Plot dojo data for pseudo H.psp8
    dojodata.py compare H.psp8 H-low.psp8  ==> Plot and compare dojo data for pseudos H.psp8 and H-low.psp8
    dojodata.py trials H.psp8 -r 1
    dojodata.py table .                    ==> Build table (find all psp8 files within current directory)
    dojodata.py figures .                  ==> Plot periodic table figures
    dojodata.py notebook H.psp8            ==> Generate ipython notebook and open it in the browser
    dojodata.py check table/*/*_r.psp8 -v --check-trials=gbrv_fcc,gbrv_bcc
"""

    def show_examples_and_exit(err_msg=None, error_code=1):
        """Display the usage of the script."""
        sys.stderr.write(str_examples())
        if err_msg: sys.stderr.write("Fatal Error\n" + err_msg + "\n")
        sys.exit(error_code)

    def parse_rows(s):
        if not s: return []
        tokens = s.split(",")
        return list(map(int, tokens)) if tokens else []

    def parse_symbols(s):
        if not s: return []
        return s.split(",")

    # Parent parser for commands that need to know on which subset of pseudos we have to operate.
    copts_parser = argparse.ArgumentParser(add_help=False)
    copts_parser.add_argument('pseudos', nargs="+", help="Pseudopotential file or directory containing pseudos")
    copts_parser.add_argument('-s', "--symbols", type=parse_symbols,
        help=("List of chemical symbols to include or exclude."
              "Example --symbols=He,Li to include He and Li, --symbols=-He to exclude He"))
    copts_parser.add_argument('-v', '--verbose', default=0, action='count', # -vv --> verbose=2
                         help='Verbose, can be supplied multiple times to increase verbosity')

    copts_parser.add_argument('--loglevel', default="ERROR", type=str,
                        help="set the loglevel. Possible values: CRITICAL, ERROR (default), WARNING, INFO, DEBUG")
    copts_parser.add_argument('--no-colors', default=False, help='Disable ASCII colors')
    copts_parser.add_argument('--seaborn', action="store_true", help="Use seaborn settings")

    # Options for pseudo selection.
    group = copts_parser.add_mutually_exclusive_group()
    group.add_argument("-r", '--rows', default="", type=parse_rows, help="Select these rows of the periodic table.")
    group.add_argument("-f", '--family', type=str, default="", help="Select this family of the periodic table.")

    # Build the main parser.
    parser = argparse.ArgumentParser(epilog=str_examples(), formatter_class=argparse.RawDescriptionHelpFormatter)

    # Create the parsers for the sub-commands
    subparsers = parser.add_subparsers(dest='command', help='sub-command help', description="Valid subcommands")

    plot_options_parser = argparse.ArgumentParser(add_help=False)
    plot_options_parser.add_argument("-w", "--what-plot", type=str, default="all",
                                      help="Quantity to plot e.g df for deltafactor, gbrv for GBRV tests")
    plot_options_parser.add_argument("-e", "--eos", action="store_true", help="Plot EOS curve")

    # Subparser for plot command.
    p_plot = subparsers.add_parser('plot', parents=[copts_parser, plot_options_parser],
                                   help=dojo_plot.__doc__)

    # Subparser for notebook command.
    p_notebook = subparsers.add_parser('notebook', parents=[copts_parser],
                                       help=dojo_notebook.__doc__)
    # Subparser for compare.
    p_compare = subparsers.add_parser('compare', parents=[copts_parser, plot_options_parser],
                                      help=dojo_compare.__doc__)
    # Subparser for figures
    p_figures = subparsers.add_parser('figures', parents=[copts_parser], help=dojo_figures.__doc__)

    # Subparser for table command.
    p_table = subparsers.add_parser('table', parents=[copts_parser], help=dojo_table.__doc__)
    p_table.add_argument("-j", '--json', default=False, action="store_true",
                         help="Dump table in json format to file table.json")
    p_table.add_argument("-b", '--best', default=False, action="store_true",
                         help="Select best pseudos according to deltafactor")

    p_nbtable = subparsers.add_parser('nbtable', parents=[copts_parser], help=dojo_nbtable.__doc__)

    # Subparser for dist command.
    p_dist = subparsers.add_parser('dist', parents=[copts_parser], help=dojo_dist.__doc__)

    # Subparser for trials command.
    p_trials = subparsers.add_parser('trials', parents=[copts_parser], help=dojo_trials.__doc__)
    p_trials.add_argument("--savefig", type=str, default="", help="Save plot to savefig file")

    # Subparser for check command.
    def parse_trials(s):
        if s is None: return s
        #if s == "all": return DojoReport.ALL_TRIALS
        return s.split(",")

    p_check = subparsers.add_parser('check', parents=[copts_parser], help=dojo_check.__doc__)
    p_check.add_argument("--check-trials", type=parse_trials, default=None, help="List of trials to check")

    # Subparser for validate command.
    p_validate = subparsers.add_parser('validate', parents=[copts_parser], help=dojo_validate.__doc__)

    # Subparser for make_hints command.
    p_make_hints = subparsers.add_parser('make_hints', parents=[copts_parser],
                                         help=dojo_make_hints.__doc__)

    # Parse command line.
    try:
        options = parser.parse_args()
    except Exception as exc:
        show_examples_and_exit(error_code=1)

    # loglevel is bound to the string value obtained from the command line argument.
    # Convert to upper case to allow the user to specify --loglevel=DEBUG or --loglevel=debug
    import logging
    numeric_level = getattr(logging, options.loglevel.upper(), None)
    if not isinstance(numeric_level, int):
        raise ValueError('Invalid log level: %s' % options.loglevel)
    logging.basicConfig(level=numeric_level)

    if options.no_colors:
        # Disable colors
        termcolor.enable(False)

    def get_pseudos(options):
        """
        Find pseudos in paths, return :class:`DojoTable` object sorted by atomic number Z.
        Accepts filepaths or directory.
        """
        exts = ("psp8", "xml")

        paths = options.pseudos

        if len(paths) == 1:
            # Handle directory argument
            if os.path.isdir(paths[0]):
                top = os.path.abspath(paths[0])
                paths = find_exts(top, exts, exclude_dirs="_*")
            # Handle glob syntax e.g. "./*.psp8"
            elif "*" in paths[0]:
                paths = glob.glob(paths[0])

        if options.verbose > 1: print("Will read pseudo from: %s" % paths)

        pseudos = []
        for p in paths:
            try:
                pseudo = dojopseudo_from_file(p)
                if pseudo is None:
                    cprint("[%s] Pseudo.from_file returned None. Something wrong in file!" % p, "red")
                    continue
                pseudos.append(pseudo)

            except Exception as exc:
                cprint("[%s] Python exception. This pseudo will be ignored" % p, "red")
                if options.verbose: print(exc)

        table = DojoTable(pseudos)

        # Here we select a subset of pseudos according to family or rows
        if options.rows:
            table = table.select_rows(options.rows)
        elif options.family:
            table = table.select_families(options.family)

        # here we select chemical symbols.
        if options.symbols:
            table = table.select_symbols(options.symbols)

        return table.sort_by_z()

    # Build DojoTable from the paths specified by the user.
    options.pseudos = get_pseudos(options)
    if not options.pseudos:
	cprint("Empty pseudopotential list. Returning", "magenta")
	return 1
    if options.verbose: print(options.pseudos)

    if options.seaborn:
        import seaborn as sns
        #sns.set(style='ticks', palette='Set2')
        sns.set(style="dark", palette="Set2")
        #And to remove "chartjunk", do:
        #sns.despine()
        #plt.tight_layout()
        #sns.despine(offset=10, trim=True)

    # Dispatch
    return globals()["dojo_" + options.command](options)
Пример #9
0
def main():

    def str_examples():
        examples = """\
Usage example:\n
    abirun.py [DIRPATH] single                   => Fetch the first available task and run it.
    abirun.py [DIRPATH] rapid                    => Keep repeating, stop when no task can be executed
                                                    due to inter-dependency.
    abirun.py [DIRPATH] gui                      => Open the GUI 
    nohup abirun.py [DIRPATH] sheduler -s 30 &   => Use a scheduler to schedule task submission

    If DIRPATH is not given, abirun.py automatically selects the database located within 
    the working directory. An Exception is raised if multiple databases are found.

    Options for developers:
        abirun.py prof ABIRUN_OPTIONS      to profile abirun.py
        abirun.py tracemalloc ABIRUN_ARGS  to trace memory blocks allocated by Python
"""
        return examples

    def show_examples_and_exit(err_msg=None, error_code=1):
        """Display the usage of the script."""
        sys.stderr.write(str_examples())
        if err_msg: sys.stderr.write("Fatal Error\n" + err_msg + "\n")
        sys.exit(error_code)

    def parse_nids(s):
        """parse nids argument"""
        if s is None: return s
        try:
            if "," in s:
                return [int(t) for t in s.split(",")]
            else:
                # Convert string to slice and return list.
                s = as_slice(s)
                #print(s)
                if s.stop is None: raise argparse.ArgumentTypeError("stop must be specified")
                return list(range(s.start, s.stop, s.step))
        except:
            raise argparse.ArgumentTypeError("Invalid nids string %s\n Expecting None or int or comma-separated integers or slice sintax" % s)

    def parse_wslice(s):
        s = as_slice(s)
        if s is None: return s
        if s.stop is None: raise argparse.ArgumentTypeError("stop must be specified")
        #return list(range(s.start, s.stop, s.step))
        return s

    # Parent parser for commands that need to know on which subset of tasks/workflows we have to operate.
    # wslide and nids are mutually exclusive.
    flow_selector_parser = argparse.ArgumentParser(add_help=False)
    group = flow_selector_parser.add_mutually_exclusive_group()
    group.add_argument("-n", '--nids', default=None, type=parse_nids, help=(
        "Node identifier(s) used to select the task. Integer or comma-separated list of integers. Use `status` command to get the node ids.\n"
        "Examples: --nids=12 --nids=12,13,16 --nids=10:12 to select 10 and 11, --nids=2:5:2 to select 2,4"  
        ))

    group.add_argument('--wslice', default=None, type=parse_wslice, 
                                      help=("Select the list of works to analyze (python syntax for slices):\n"
                                      "Examples: --wslice=1 to select the second workflow, --wslice=:3 for 0,1,2,"
                                      "--wslice=-1 for the last workflow, --wslice::2 for even indices"))

    #flow_selector_parser.add_argument('--wti', default=None, help="Index of workflow:task")

    # Build the main parser.
    parser = argparse.ArgumentParser(epilog=str_examples(), formatter_class=argparse.RawDescriptionHelpFormatter)

    parser.add_argument('-v', '--verbose', default=0, action='count', # -vv --> verbose=2
                        help='verbose, can be supplied multiple times to increase verbosity')

    parser.add_argument('--remove-lock', default=False, type=bool, help="Remove the lock file of the pickle file storing the flow.")

    parser.add_argument('--no-colors', default=False, help='Disable ASCII colors')

    parser.add_argument('--loglevel', default="ERROR", type=str,
                        help="set the loglevel. Possible values: CRITICAL, ERROR (default), WARNING, INFO, DEBUG")

    parser.add_argument('path', nargs="?", help=("File or directory containing the ABINIT flow\n" +
                                                 "If not given, the first flow in the current workdir is selected"))

    # Create the parsers for the sub-commands
    subparsers = parser.add_subparsers(dest='command', help='sub-command help', description="Valid subcommands")

    # Subparser for single command.
    p_single = subparsers.add_parser('single', help="Run single task.")

    # Subparser for rapidfire command.
    p_rapid = subparsers.add_parser('rapid', help="Run all tasks in rapidfire mode")

    # Subparser for scheduler command.
    p_scheduler = subparsers.add_parser('scheduler', help="Run all tasks with a Python scheduler.")

    p_scheduler.add_argument('-w', '--weeks', default=0, type=int, help="number of weeks to wait")
    p_scheduler.add_argument('-d', '--days', default=0, type=int, help="number of days to wait")
    p_scheduler.add_argument('-hs', '--hours', default=0, type=int, help="number of hours to wait")
    p_scheduler.add_argument('-m', '--minutes', default=0, type=int, help="number of minutes to wait")
    p_scheduler.add_argument('-s', '--seconds', default=0, type=int, help="number of seconds to wait")

    # Subparser for status command.
    p_status = subparsers.add_parser('status', parents=[flow_selector_parser], help="Show task status.")
    p_status.add_argument('-d', '--delay', default=0, type=int, help=("If 0, exit after the first analysis.\n" + 
                          "If > 0, enter an infinite loop and delay execution for the given number of seconds."))

    # Subparser for cancel command.
    p_cancel = subparsers.add_parser('cancel', parents=[flow_selector_parser], help="Cancel the tasks in the queue.")
    p_cancel.add_argument("-r", "--rmtree", action="store_true", default=False, help="Remove flow directory.")

    # Subparser for restart command.
    p_restart = subparsers.add_parser('restart', help="Restart the tasks of the flow that are not converged.")

    # Subparser for restart command.
    p_reset = subparsers.add_parser('reset', parents=[flow_selector_parser], help="Reset the tasks of the flow with the specified status.")
    p_reset.add_argument('task_status', default="QCritical") 

    # Subparser for unlock command.
    #p_unlock = subparsers.add_parser('unlock', parents=[flow_selector_parser], help="Reset the tasks of the flow with the specified status.")
    #p_reset.add_argument('task_status', default="QCritical") 

    # Subparser for unlock command.
    p_move = subparsers.add_parser('move', help="Move the flow to a new directory and change the absolute paths")
    p_move.add_argument('dest', nargs=1) 

    # Subparser for open command.
    p_open = subparsers.add_parser('open', parents=[flow_selector_parser], help="Open files in $EDITOR, type `abirun.py DIRPATH open --help` for help)")
    p_open.add_argument('what', default="o", 
        help="""\
Specify the files to open. Possible choices:
    i ==> input_file
    o ==> output_file
    f ==> files_file
    j ==> job_file
    l ==> log_file
    e ==> stderr_file
    q ==> qerr_file
""")

    p_ncopen = subparsers.add_parser('ncopen', parents=[flow_selector_parser], 
                                      help="Open netcdf files in ipython, type `abirun.py DIRPATH ncopen --help` for help)")
    p_ncopen.add_argument('ncext', nargs="?", default="GSR", help="Select the type of file to open")

    # Subparser for gui command.
    p_gui = subparsers.add_parser('gui', help="Open GUI.")
    p_gui.add_argument("--chroot", default="", type=str, help=("Use chroot as new directory of the flow.\n" +
                       "Mainly used for opening a flow located on a remote filesystem mounted with sshfs.\n" +
                       "In this case chroot is the absolute path to the flow on the **localhost**\n",
                       "Note that it is not possible to change the flow from remote when chroot is used."))

    p_new_manager = subparsers.add_parser('new_manager', parents=[flow_selector_parser], help="Change the TaskManager.")
    p_new_manager.add_argument("manager_file", default="", type=str, help="YAML file with the new manager")

    p_tail = subparsers.add_parser('tail', parents=[flow_selector_parser], help="Use tail to follow the main output file of the flow.")
    p_tail.add_argument('what_tail', nargs="?", type=str, default="o", help="What to follow: o for output (default), l for logfile, e for stderr")

    p_qstat = subparsers.add_parser('qstat', help="Show additional info on the jobs in the queue.")
    #p_qstat.add_argument('what_tail', nargs="?", type=str, default="o", help="What to follow: o for output (default), l for logfile, e for stderr")

    p_deps = subparsers.add_parser('deps', help="Show dependencies.")

    p_robot = subparsers.add_parser('robot', parents=[flow_selector_parser], help="Use a robot to analyze the results of multiple tasks (requires ipython)")
    p_robot.add_argument('robot_ext', nargs="?", type=str, default="GSR", help="The file extension of the netcdf file")

    p_plot = subparsers.add_parser('plot', parents=[flow_selector_parser], help="Plot data")
    p_plot.add_argument("what", nargs="?", type=str, default="ebands", help="Object to plot")

    p_inspect = subparsers.add_parser('inspect', parents=[flow_selector_parser], help="Inspect the tasks")

    p_inputs= subparsers.add_parser('inputs', parents=[flow_selector_parser], help="Show the input files of the tasks")

    p_analyze= subparsers.add_parser('analyze', help="Analyze the results produced by the flow (requires a flow with analyze method)")

    p_docmanager = subparsers.add_parser('docmanager', help="Document the TaskManager options")
    p_docmanager.add_argument("qtype", nargs="?", default=None, help="Document qparams section for the given qtype")

    p_embed = subparsers.add_parser('embed', help=( 
        "Embed IPython. Useful for debugging or for performing advanced operations.\n"
        "THIS OPTION IF FOR EXPERT USERS!"))

    # Parse command line.
    try:
        options = parser.parse_args()
    except Exception as exc: 
        show_examples_and_exit(error_code=1)

    # loglevel is bound to the string value obtained from the command line argument. 
    # Convert to upper case to allow the user to specify --loglevel=DEBUG or --loglevel=debug
    import logging
    numeric_level = getattr(logging, options.loglevel.upper(), None)
    if not isinstance(numeric_level, int):
        raise ValueError('Invalid log level: %s' % options.loglevel)
    logging.basicConfig(level=numeric_level)

    if options.no_colors:
        # Disable colors
        termcolor.enable(False)

    if options.command == "docmanager":
        print(abilab.TaskManager.autodoc())

        import yaml
        QDICT = yaml.load("""\
priority: 5
queue:
  qtype: slurm
  qname: Oban
  qparams:
      account: user_account
      mail_user: [email protected]
limits:
  timelimit: 10:00
  min_cores: 3
  max_cores: 16
job:
  mpi_runner: mpirun
  # pre_run is a string in verbatim mode (note |)
  setup:
      - echo ${SLURM_JOB_NODELIST}
      - ulimit -s unlimited
  modules:
      - intel/compilerpro/13.0.1.117
      - fftw3/intel/3.3
  shell_env:
      PATH: /home/user/bin:$PATH
hardware:
   # Mandatory
   num_nodes: 2
   sockets_per_node: 2
   cores_per_socket: 4
   mem_per_node: 8 Gb
""")
        from pymatgen.io.abinitio.qadapters import make_qadapter
        if options.qtype is not None:
            qad = make_qadapter(**QDICT)
            print(qad.QTEMPLATE)
            #print(qad.supported_qparams)

        sys.exit(0)

    # Read the flow from the pickle database.
    if options.path is None:
        # Will try to figure out the location of the Flow.
        options.path = os.getcwd()

    flow = abilab.Flow.pickle_load(options.path, remove_lock=options.remove_lock)
    retcode = 0

    if options.command == "gui":
        if options.chroot:
            # Change the workdir of flow.
            print("Will chroot to %s..." % options.chroot)
            flow.chroot(options.chroot)

        from abipy.gui.flowviewer import wxapp_flow_viewer
        wxapp_flow_viewer(flow).MainLoop()

    elif options.command == "new_manager":
        # Read the new manager from file.
        new_manager = abilab.TaskManager.from_file(options.manager_file)

        # Change the manager of the errored tasks.
        status = "S_QCRITICAL"
        #status = "S_ERROR"
        #print("Resetting tasks with status: %s" % options.task_status)
        for task in flow.iflat_tasks(status=status, nids=selected_nids(flow, options)):
            task.reset()
            task.set_manager(new_manager)
            
        # Update the database.
        return flow.build_and_pickle_dump()

    elif options.command in ("single", "singleshot"):
        nlaunch = PyLauncher(flow).single_shot()
        flow.show_status()
        print("Number of tasks launched: %d" % nlaunch)

    elif options.command in ("rapid", "rapidfire"):
        nlaunch = PyLauncher(flow).rapidfire()
        flow.show_status()
        print("Number of tasks launched: %d" % nlaunch)

    elif options.command == "scheduler":
        sched_options = {oname: getattr(options, oname) for oname in 
            ("weeks", "days", "hours", "minutes", "seconds")}

        if all(v == 0 for v in sched_options.values()):
            sched = PyFlowScheduler.from_user_config()
        else:
            sched = PyFlowScheduler(**sched_options)

        # Check that the env on the local machine is properly setup before starting the scheduler.
        abilab.abicheck()

        sched.add_flow(flow)
        print(sched)
        try:
            sched.start()
        except KeyboardInterrupt:
            # Save the status of the flow before exiting.
            flow.pickle_dump()

    elif options.command == "status":
        if options.delay:
            cprint("Entering infinite loop. Press CTRL+C to exit", color="magenta", end="", flush=True)
            try:
                while True:
                    print(2*"\n" + time.asctime() + "\n")
                    flow.check_status()
                    flow.show_status(verbose=options.verbose, nids=selected_nids(flow, options))
                    if flow.all_ok: break
                    time.sleep(options.delay)
            except KeyboardInterrupt:
                pass
        else:
            flow.show_status(verbose=options.verbose, nids=selected_nids(flow, options))
            if flow.manager.has_queue:
                print("Total number of jobs in queue: %s" % flow.manager.get_njobs_in_queue())

    elif options.command == "open":
        flow.open_files(what=options.what, status=None, op="==", nids=selected_nids(flow, options))

    elif options.command == "ncopen":
        # The name of the method associated to this netcdf file.
        methname = "open_" + options.ncext.lower()
        # List of netcdf file objects.
        ncfiles = [getattr(task, methname)() for task in selected_tasks(flow, options) if hasattr(task, methname)]
        
        if ncfiles:
            # Start ipython shell with namespace 
            import IPython
            if len(ncfiles) == 1:
                IPython.start_ipython(argv=[], user_ns={"ncfile": ncfiles[0]})
            else:
                IPython.start_ipython(argv=[], user_ns={"ncfiles": ncfiles})
        else:
            cprint("Cannot find any netcdf file with extension %s" % options.ncext, color="magenta")

    elif options.command == "cancel":
        print("Number of jobs cancelled %d" % flow.cancel(nids=selected_nids(flow, options)))
        # Remove directory
        if options.rmtree: flow.rmtree()

    elif options.command == "restart":
        nlaunch, excs = 0, []
        for task in flow.unconverged_tasks:
            try:
                fired = task.restart()
                if fired: nlaunch += 1
            except Exception:
                excs.append(straceback())

        cprint("Number of jobs restarted %d" % nlaunch, "blue")
        if nlaunch:
            # update database
            flow.pickle_dump()

        if excs:
            print("Exceptions raised\n")
            pprint(excs)

    elif options.command == "reset":
        print("Will reset tasks with status: %s" % options.task_status)

        count = 0
        for task in flow.iflat_tasks(status=options.task_status, nids=selected_nids(flow, options)):
            print("Resetting task %s" % task)
            task.reset()
            count += 1	

        cprint("%d tasks have been reset" % count, "blue")
        nlaunch = PyLauncher(flow).rapidfire()
        flow.show_status()
        print("Number of tasks launched: %d" % nlaunch)

        if nlaunch == 0:
            deadlocked, runnables, running = flow.deadlocked_runnables_running()
            print("deadlocked:", deadlocked)
            print("runnables:", runnables)
            print("running:", running)
            if deadlocked and not (runnables or running):
                print("*** Flow is deadlocked ***")

        flow.pickle_dump()

    #elif options.command == "unlock":
    #    self.start_lockfile.remove()

    elif options.command == "move":
        print("Will move flow to %s..." % options.dest)
        flow.chroot(options.dest)
        flow.move(options.dest)

    elif options.command == "tail":
        def get_path(task):
            """Helper function used to select the files of a task."""
            choices = {
                "o": task.output_file,
                "l": task.log_file,
                "e": task.stderr_file,
            }
            return getattr(choices[options.what_tail], "path")

        paths = [get_path(task) for task in flow.iflat_tasks(status="Running", nids=selected_nids(flow, options))]

        if not paths:
            cprint("No job is running. Exiting!", "red")
        else:
            cprint("Press CTRL+C to interrupt. Number of output files %d" % len(paths), color="magenta", end="", flush=True)
            try:
                os.system("tail -f %s" % " ".join(paths))
            except KeyboardInterrupt:
                pass

    elif options.command == "qstat":
        for task in selected_tasks(flow, options):
            if not task.qjob: continue
            print("qjob", task.qjob)
            print("info", task.qjob.get_info())
            print("e start-time", task.qjob.estimated_start_time())
            print("qstats", task.qjob.get_stats())

    elif options.command == "deps":
        flow.check_status()
        flow.show_dependencies()

    elif options.command == "robot":
        import IPython
        with abilab.abirobot(flow, options.robot_ext, nids=selected_nids(flow, options)) as robot:
            #IPython.embed(header=str(robot) + "\nType `robot` in the terminal and use <TAB> to list its methods",  robot=robot)
            IPython.start_ipython(argv=[], user_ns={"robot": robot})

    elif options.command == "plot":
        fext = dict(
            ebands="gsr",
        )[options.what]

        open_method = "open_" + fext
        plot_method = "plot_" + options.what

        for task in selected_tasks(flow, options):
            try:
                with getattr(task, open_method)() as ncfile: 
                    print(ncfile)
                    #print(dir(ncfile))
                    getattr(ncfile, plot_method)()
            except:
                pass

    elif options.command == "inspect":
        tasks = selected_tasks(flow, options)

        # Use different thread to inspect the task so that master can catch KeyboardInterrupt and exit.
        # One could use matplotlib non-blocking interface with show(block=False) but this one seems to work well.
        from multiprocessing import Process

        def plot_graphs():
            for task in tasks:
                if hasattr(task, "inspect"):
                    task.inspect()
                else:
                    cprint("Task %s does not provide an inspect method" % task, color="blue")

        p = Process(target=plot_graphs)
        p.start()
        num_tasks = len(tasks)

        if num_tasks == 1:
            p.join()
        else:
            cprint("Will produce %d matplotlib plots. Press CTRL+C to interrupt..." % num_tasks, color="magenta", end="", flush=True)
            try:
                p.join()
            except KeyboardInterrupt:
                print("\nTerminating thread...")
                p.terminate()

    elif options.command == "inputs":
        flow.show_inputs(nids=selected_nids(flow, options))

    elif options.command == "analyze":
        if not hasattr(flow, "analyze"):
            cprint("Flow does not provide the `analyze` method!", "red")
            return 1
            flow.analyze()

    elif options.command == "embed":
        import IPython
        IPython.embed(header="")

    else:
        raise RuntimeError("Don't know what to do with command %s!" % options.command)

    return retcode
Пример #10
0
def main():

    def str_examples():
        return """\
Usage example:
    abirun.py [FLOWDIR] rapid                 => Keep repeating, stop when no task can be executed.
    abirun.py [FLOWDIR] scheduler             => Execute flow with the scheduler.
    abirun.py [FLOWDIR] events                => Print ABINIT events (Warning/Error/Comment).
    abirun.py [FLOWDIR] history               => Print Task history.
    abirun.py [FLOWDIR] debug                 => Analyze error files and log files for possible error messages.
    abirun.py [FLOWDIR] gui                   => Open the GUI.
    abirun.py [FLOWDIR] doc_manager slurm     => Document the TaskManager options availabe for Slurm.
    abirun.py . doc_manager script            => Show the job script that will be produced with the current settings.
    abirun.py . doc_scheduler                 => Document the options available in scheduler.yml.

    nohup abirun.py [FLOWDIR] scheduler -s 30 &  => Start the scheduler to schedule task submission.

    If FLOWDIR is not given, abirun.py automatically selects the database located within
    the working directory. An Exception is raised if multiple databases are found.

    Note, moreover, that you can also replace FLOWDIR with the directory of a work/task
    to make the command operate on this node of the flow without having to specify --nids.
    To have the list of events of the task in `FLOWDIR/w0/t1`, for example, use:

        abirun.py FLOWDIR/w0/t1 events

    instead of

        abirun.py FLOWDIR events -n 123

    where 123 is the node identifier associated to w0/t1.

Options for developers:
    abirun.py prof ABIRUN_ARGS               => to profile abirun.py
    abirun.py tracemalloc ABIRUN_ARGS        => to trace memory blocks allocated by Python
"""
    def show_examples_and_exit(err_msg=None, error_code=1):
        """Display the usage of the script."""
        sys.stderr.write(str_examples())
        if err_msg: sys.stderr.write("Fatal Error\n" + err_msg + "\n")
        sys.exit(error_code)

    def parse_nids(s):
        """parse nids argument"""
        if s is None: return s
        try:
            if "," in s:
                return [int(t) for t in s.split(",")]
            else:
                # Convert string to slice and return list.
                s = as_slice(s)
                if s.stop is None: raise argparse.ArgumentTypeError("stop must be specified")
                return list(range(s.start, s.stop, s.step))
        except Exception:
            raise argparse.ArgumentTypeError("Invalid nids string %s\n Expecting None or int or comma-separated integers or slice sintax" % s)

    def parse_wslice(s):
        s = as_slice(s)
        if s is None: return s
        if s.stop is None: raise argparse.ArgumentTypeError("stop must be specified")
        return s

    # Parent parser for commands that need to know on which subset of tasks/workflows we have to operate.
    # wslide and nids are mutually exclusive.
    flow_selector_parser = argparse.ArgumentParser(add_help=False)
    group = flow_selector_parser.add_mutually_exclusive_group()
    group.add_argument("-n", '--nids', default=None, type=parse_nids, help=(
        "Node identifier(s) used to select the task. Integer or comma-separated list of integers. "
        "Use `status` command to get the node ids. "
        "Examples: --nids=12 --nids=12,13,16 --nids=10:12 to select 10 and 11, --nids=2:5:2 to select 2,4."
        ))

    group.add_argument("-w", '--wslice', default=None, type=parse_wslice,
                                      help=("Select the list of works to analyze (python syntax for slices): "
                                      "Examples: --wslice=1 to select the second workflow, --wslice=:3 for 0,1,2, "
                                      "--wslice=-1 for the last workflow, --wslice::2 for even indices."))

    group.add_argument("-S", '--task-status', default=None, type=Status.as_status,
                        help="Select only the tasks with the given status. Default: None i.e. ignored. Possible values: %s." %
                        Status.all_status_strings())
    #group.add_argument("-p", "--task-pos", default=None, type=parse_wslice, help="List of tuples with the position of the tasl in the flow.")

    # Parent parser for common options.
    copts_parser = argparse.ArgumentParser(add_help=False)
    copts_parser.add_argument('-v', '--verbose', default=0, action='count', # -vv --> verbose=2
                              help='verbose, can be supplied multiple times to increase verbosity.')

    copts_parser.add_argument('--no-colors', default=False, action="store_true", help='Disable ASCII colors.')
    copts_parser.add_argument('--no-logo', default=False, action="store_true", help='Disable AbiPy logo.')
    copts_parser.add_argument('--loglevel', default="ERROR", type=str,
                        help="set the loglevel. Possible values: CRITICAL, ERROR (default), WARNING, INFO, DEBUG.")
    copts_parser.add_argument('--remove-lock', default=False, action="store_true",
                       help="Remove the lock file of the pickle file storing the flow.")

    # Build the main parser.
    parser = argparse.ArgumentParser(epilog=str_examples(), formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('flowdir', nargs="?", help=("File or directory containing the ABINIT flow/work/task. "
                                                    "If not given, the flow in the current workdir is selected."))
    parser.add_argument('-V', '--version', action='version', version="%(prog)s version " + abilab.__version__)

    # Create the parsers for the sub-commands
    subparsers = parser.add_subparsers(dest='command', help='sub-command help', description="Valid subcommands")

    # Subparser for single command.
    p_single = subparsers.add_parser('single', parents=[copts_parser], help="Run single task and exit.")

    # Subparser for rapidfire command.
    p_rapid = subparsers.add_parser('rapid', parents=[copts_parser], help="Run all tasks in rapidfire mode.")

    # Subparser for scheduler command.
    p_scheduler = subparsers.add_parser('scheduler', parents=[copts_parser],
                                        help="Run all tasks with a Python scheduler. Requires scheduler.yml.")

    p_scheduler.add_argument('-w', '--weeks', default=0, type=int, help="number of weeks to wait.")
    p_scheduler.add_argument('-d', '--days', default=0, type=int, help="number of days to wait.")
    p_scheduler.add_argument('-hs', '--hours', default=0, type=int, help="number of hours to wait.")
    p_scheduler.add_argument('-m', '--minutes', default=0, type=int, help="number of minutes to wait.")
    p_scheduler.add_argument('-s', '--seconds', default=0, type=int, help="number of seconds to wait.")

    # Subparser for batch command.
    p_batch = subparsers.add_parser('batch', parents=[copts_parser], help="Run scheduler in batch script.")
    p_batch.add_argument("-t", '--timelimit', default=None, help=("Time limit for batch script. "
                         "Accept int with seconds or string with time given in the slurm convention: "
                         "`days-hours:minutes:seconds`. If timelimit is None, the default value specified"
                         " in the `batch_adapter` entry of `manager.yml` is used."))

    # Subparser for status command.
    p_status = subparsers.add_parser('status', parents=[copts_parser, flow_selector_parser], help="Show status table.")
    p_status.add_argument('-d', '--delay', nargs="?", const=5, default=0, type=int,
                          help=("Enter an infinite loop and delay execution for the given number of seconds. (default: 5 secs)."))
    p_status.add_argument('-s', '--summary', default=False, action="store_true",
                          help="Print short version with status counters.")

    # Subparser for set_status command.
    p_set_status = subparsers.add_parser('set_status', parents=[copts_parser, flow_selector_parser],
        help="Change the status of the task. WARNING: Option for developers!")
    p_set_status.add_argument('new_status', help="New value of status. Possible values: %s." % Status.all_status_strings())

    # Subparser for cancel command.
    p_cancel = subparsers.add_parser('cancel', parents=[copts_parser, flow_selector_parser],
                                     help="Cancel the tasks in the queue. Not available if qtype==shell.")
    p_cancel.add_argument("-r", "--rmtree", action="store_true", default=False, help="Remove flow directory.")

    # Subparser for restart command.
    p_restart = subparsers.add_parser('restart', parents=[copts_parser, flow_selector_parser],
                help="Restart the tasks of the flow. By default, only the task with status==Unconverged are restarted. "
                     "Use -S `status` and/or -n node_ids to select particular tasks.")

    # Subparser for reset command.
    p_reset = subparsers.add_parser('reset', parents=[copts_parser, flow_selector_parser],
                                    help="Reset the tasks of the flow with the specified status.")
    p_reset.add_argument("--relaunch", action="store_true", default=False,
                         help="Relaunch tasks in rapid mode after reset.")

    # Subparser for move command.
    p_move = subparsers.add_parser('move', parents=[copts_parser],
                                    help="Move the flow to a new directory and change the absolute paths.")
    p_move.add_argument('dest', nargs=1)

    # Subparser for open command.
    p_open = subparsers.add_parser('open', parents=[copts_parser, flow_selector_parser],
                                   help="Open files in $EDITOR, type `abirun.py FLOWDIR open --help` for help).")
    p_open.add_argument('what', nargs="?", default="o",
        help="""\
Specify the files to open. Possible choices:
    i ==> input_file
    o ==> output_file
    f ==> files_file
    j ==> job_file
    l ==> log_file
    e ==> stderr_file
    q ==> qout_file
    all ==> all files.
""")

    # Subparser for ncopen.
    p_ncopen = subparsers.add_parser('ncopen', parents=[copts_parser, flow_selector_parser],
                                      help="Open netcdf files in ipython. Use --help for more info.")
    p_ncopen.add_argument('ncext', nargs="?", default="GSR", help="Select the type of file to open.")

    # Subparser for abibuild
    p_abibuild = subparsers.add_parser('abibuild', parents=[copts_parser, flow_selector_parser],
                                       help="Show Abinit build information and exit.")

    # Subparser for doc_scheduler
    p_docsched = subparsers.add_parser('doc_scheduler', parents=[copts_parser],
                                       help="Document the options available in scheduler.yml.")

    # Subparser for gui command.
    p_gui = subparsers.add_parser('gui', parents=[copts_parser], help="Open the GUI (requires wxPython).")
    p_gui.add_argument("--chroot", default="", type=str, help=("Use chroot as new directory of the flow. " +
                       "Mainly used for opening a flow located on a remote filesystem mounted with sshfs. " +
                       "In this case chroot is the absolute path to the flow on the **localhost** ",
                       "Note that it is not possible to change the flow from remote when chroot is used."))

    # Subparser for new_manager.
    p_new_manager = subparsers.add_parser('new_manager', parents=[copts_parser, flow_selector_parser],
                                          help="Change the TaskManager.")
    p_new_manager.add_argument("manager_file", default="", type=str, help="YAML file with the new manager.")

    # Subparser for tail.
    p_tail = subparsers.add_parser('tail', parents=[copts_parser, flow_selector_parser],
                                   help="Use tail to follow the main output files of the flow.")
    p_tail.add_argument('what_tail', nargs="?", type=str, default="o",
                        help="What to follow: `o` for output (default), `l` for logfile, `e` for stderr.")

    # Subparser for qstat.
    p_qstat = subparsers.add_parser('qstat', parents=[copts_parser], help="Show additional info on the jobs in the queue.")

    # Subparser for deps.
    p_deps = subparsers.add_parser('deps', parents=[copts_parser], help="Show dependencies.")

    # Subparser for robot.
    p_robot = subparsers.add_parser('robot', parents=[copts_parser, flow_selector_parser],
                                    help="Use a robot to analyze the results of multiple tasks (requires ipython).")
    p_robot.add_argument('robot_ext', nargs="?", type=str, default="GSR", help="The file extension of the netcdf file.")

    # Subparser for plot.
    p_plot = subparsers.add_parser('plot', parents=[copts_parser, flow_selector_parser],
                                   help="Plot data. Use --help for more info.")
    p_plot.add_argument("what", nargs="?", type=str, default="ebands", help="Object to plot.")

    # Subparser for inspect.
    p_inspect = subparsers.add_parser('inspect', parents=[copts_parser, flow_selector_parser], help="Inspect the tasks.")

    # Subparser for inputs.
    p_inputs = subparsers.add_parser('inputs', parents=[copts_parser, flow_selector_parser],
                                     help="Show the input files of the tasks.")
    p_inputs.add_argument("-vn", "--varnames", nargs="?", default=None, type=parse_strings,
                           help="Comma-separated variable names. Can be used to print only these variables.")

    # Subparser for manager.
    p_manager = subparsers.add_parser('doc_manager', parents=[copts_parser], help="Document the TaskManager options.")
    p_manager.add_argument("qtype", nargs="?", default=None, help=("Write job script to terminal if qtype='script' else "
        "document the qparams for the given QueueAdapter qtype e.g. slurm."))

    # Subparser for events.
    p_events = subparsers.add_parser('events', parents=[copts_parser, flow_selector_parser],
                                    help="Show ABINIT events (error messages, warnings, comments).")
    #p_events.add_argument("-t", "event-type", default=)

    # Subparser for corrections.
    p_corrections = subparsers.add_parser('corrections', parents=[copts_parser, flow_selector_parser],
                                          help="Show abipy corrections.")

    # Subparser for history.
    p_history = subparsers.add_parser('history', parents=[copts_parser, flow_selector_parser], help="Show Node history.")
    p_history.add_argument("-m", "--metadata", action="store_true", default=False, help="Print history metadata.")
    p_history.add_argument("-f", "--full-history", action="store_true", default=False,
                           help="Print full history set, including nodes with an empty history.")
    #p_history.add_argument("-t", "--task-history", action="store_true", default=True, help=)

    # Subparser for handlers.
    p_handlers = subparsers.add_parser('handlers', parents=[copts_parser], help="Show event handlers installed in the flow.")
    p_handlers.add_argument("-d", "--doc", action="store_true", default=False,
                            help="Show documentation about all the handlers that can be installed.")

    # Subparser for notebook.
    p_notebook = subparsers.add_parser('notebook', parents=[copts_parser],
                                       help="Create and open an ipython notebook to interact with the flow.")

    # Subparser for ipython.
    p_ipython = subparsers.add_parser('ipython', parents=[copts_parser],
                                      help="Embed IPython. Useful for advanced operations or debugging purposes.")
    p_ipython.add_argument('--argv', nargs="?", default="", type=shlex.split,
                           help="Command-line options passed to ipython. Must be enclosed by quotes. "
                                "Example: --argv='--matplotlib=wx'")

    # Subparser for tar.
    p_tar = subparsers.add_parser('tar', parents=[copts_parser], help="Create tarball file.")
    p_tar.add_argument("-s", "--max-filesize", default=None,
                       help="Exclude file whose size > max-filesize bytes. Accept integer or string e.g `1Mb`.")

    p_tar.add_argument("-e", "--exclude-exts", default=None, type=parse_strings,
                       help="Exclude file extensions. Accept string or comma-separated strings. Ex: -eWFK or --exclude-exts=WFK,GSR")

    p_tar.add_argument("-d", "--exclude-dirs", default=None, type=parse_strings,
                       help="Exclude directories. Accept string or comma-separated strings. Ex: --exlude-dirs=indir,outdir")

    p_tar.add_argument("-l", "--light", default=False, action="store_true",
                       help="Create light-weight version of the tarball for debugging purposes. Other options are ignored.")

    # Subparser for debug.
    p_debug = subparsers.add_parser('debug', parents=[copts_parser, flow_selector_parser],
                                     help="Analyze error files and log files for possible error messages.")

    # Subparser for group.
    p_group = subparsers.add_parser('group', parents=[copts_parser, flow_selector_parser],
                                     help="Group tasks according to property.")

    # Subparser for diff.
    p_diff = subparsers.add_parser('diff', parents=[copts_parser, flow_selector_parser],
                                   help="Compare files produced by two or three nodes.")
    p_diff.add_argument('what_diff', nargs="?", type=str, default="i",
                        help="What to diff: `i` for input (default), `o` for output, `l` for logfile, `e` for stderr.")

    # Subparser for networkx.
    p_networkx = subparsers.add_parser('networkx', parents=[copts_parser], #, flow_selector_parser],
                                     help="Draw flow and node dependecies with networkx package.")
    p_networkx.add_argument('--nxmode', default="status",
                            help="Type of network plot. Possible values: `status`, `network`. Default: `status`.")
    p_networkx.add_argument('--edge-labels', action="store_true", default=False, help="Show edge labels.")

    # Subparser for listext.
    p_listext = subparsers.add_parser('listext', parents=[copts_parser],
                                     help="List all the output files with the given extension that have been produced by the nodes.")
    p_listext.add_argument('listexts', nargs="+", help="List of Abinit file extensions. e.g DDB, GSR, WFK etc")

    # Subparser for timer.
    p_timer = subparsers.add_parser('timer', parents=[copts_parser, flow_selector_parser],
                                    help=("Read the section with timing info from the main ABINIT output file (requires timopt != 0)"
                                          "Open Ipython terminal to inspect data."))

    # Parse command line.
    try:
        options = parser.parse_args()
    except Exception as exc:
        show_examples_and_exit(error_code=1)

    # loglevel is bound to the string value obtained from the command line argument.
    # Convert to upper case to allow the user to specify --loglevel=DEBUG or --loglevel=debug
    import logging
    numeric_level = getattr(logging, options.loglevel.upper(), None)
    if not isinstance(numeric_level, int):
        raise ValueError('Invalid log level: %s' % options.loglevel)
    logging.basicConfig(level=numeric_level)

    # Documentation options that do not need a flow.
    # Print docs and exit immediately.
    if options.command == "doc_manager":
        # Document TaskManager options and qparams.
        qtype = options.qtype

        if qtype == "script":
            manager = abilab.TaskManager.from_user_config()
            script = manager.qadapter.get_script_str(
                job_name="job_name",
                launch_dir="workdir",
                executable="executable",
                qout_path="qout_file.path",
                qerr_path="qerr_file.path",
                stdin="stdin",
                stdout="stdout",
                stderr="stderr",
            )
            print(script)

        else:
            print(abilab.TaskManager.autodoc())
            from pymatgen.io.abinit.qadapters import show_qparams, all_qtypes

            print("qtype supported: %s" % all_qtypes())
            print("Use `abirun.py . manager slurm` to have the list of qparams for slurm.\n")

            if qtype is not None:
                print("QPARAMS for %s" % qtype)
                show_qparams(qtype)

        sys.exit(0)

    if options.command == "doc_scheduler":
        print("Options that can be specified in scheduler.yml:")
        print(abilab.PyFlowScheduler.autodoc())
        sys.exit(0)

    # After this point we start to operate on the flow.
    # 0) Print logo
    # 1) Read flow from pickle file and construct nids set if needed.
    # 2) Operate on the flow depending on the options specified by the user on the CLI.
    if options.no_colors:
        # Disable colors
        termcolor.enable(False)

    if not options.no_logo:
        nrows, ncols = get_terminal_size()
        if ncols > 100: cprint(abilab.abipy_logo1(), "yellow")

        system, node, release, version, machine, processor = platform.uname()
        cprint("Running on %s -- system %s -- Python %s -- %s" % (
              gethostname(), system, platform.python_version(), "abirun" + "-" + abilab.__version__),
              'yellow', attrs=['underline'])

    wname, tname = None, None
    if options.flowdir is None:
        # Will try to figure out the location of the Flow.
        options.flowdir = os.getcwd()
    else:
        # Sometimes one wants to inspect a work or a task by just using `abirun.py flow/w0/t0 inspect`
        # without knowing its node id. flowdir_wname_tname will solve the problem!
        options.flowdir, wname, tname = flowdir_wname_tname(options.flowdir)

    # Read the flow from the pickle database.
    flow = abilab.Flow.pickle_load(options.flowdir, remove_lock=options.remove_lock)
    #flow.show_info()

    # If we have selected a work/task, we have to convert wname/tname into node ids (nids)
    if wname or tname:
        if wname and tname:
            # Task
            for w_pos, work in enumerate(flow):
                if os.path.basename(work.workdir) == wname: break
            else:
                raise RuntimeError("Cannot find work from name %s" % wname)

            for t_pos, task in enumerate(flow[w_pos]):
                if os.path.basename(task.workdir) == tname: break
            else:
                raise RuntimeError("Cannot find task from name %s" % tname)

            # Create options.nids here
            options.nids = set([flow[w_pos].node_id, flow[w_pos][t_pos].node_id])

        else:
            # Work
            for w_pos, work in enumerate(flow):
                if os.path.basename(work.workdir) == wname: break
            else:
                raise RuntimeError("Cannot find work from name %s" % wname)

            # Create options.nids here
            options.nids = set([flow[w_pos].node_id] + [task.node_id for task in flow[w_pos]])

    if options.verbose > 1: print("options.nids:", options.nids)

    retcode = 0

    if options.command == "abibuild":
        #abilab.abicheck():
        abinit_build = abilab.AbinitBuild()
        print()
        print(abinit_build)
        print()
        if not options.verbose:
            print("Use --verbose for additional info")
        else:
            print(abinit_build.info)
        #print(flow.manager)

    elif options.command == "gui":
        if options.chroot:
            # Change the workdir of flow.
            print("Will chroot to %s..." % options.chroot)
            flow.chroot(options.chroot)

        from abipy.gui.flowviewer import wxapp_flow_viewer
        wxapp_flow_viewer(flow).MainLoop()

    elif options.command == "new_manager":
        # Read the new manager from file.
        new_manager = abilab.TaskManager.from_file(options.manager_file)

        # Default status for new_manager is QCritical
        if options.task_status is None:
            options.task_status = Status.as_status("QCritical")

        # Change the manager of the errored tasks.
        print("Resetting tasks with status: %s" % options.task_status)
        for task in flow.iflat_tasks(status=options.task_status, nids=selected_nids(flow, options)):
            task.reset()
            task.set_manager(new_manager)

        # Update the database.
        return flow.build_and_pickle_dump()

    elif options.command == "events":
        flow.show_events(status=options.task_status, nids=selected_nids(flow, options))
        return 0

    elif options.command == "corrections":
        flow.show_corrections(status=options.task_status, nids=selected_nids(flow, options))
        return 0

    elif options.command == "history":
        flow.show_history(status=options.task_status, nids=selected_nids(flow, options),
                         full_history=options.full_history, metadata=options.metadata)
        return 0

    elif options.command == "handlers":
        if options.doc:
            autodoc_event_handlers()
        else:
            flow.show_event_handlers()

    elif options.command  == "single":
        nlaunch = flow.single_shot()
        print("Number of tasks launched: %d" % nlaunch)
        if nlaunch: flow.show_status()

    elif options.command == "rapid":
        nlaunch = flow.rapidfire()
        print("Number of tasks launched: %d" % nlaunch)
        if nlaunch: flow.show_status()

    elif options.command == "scheduler":
        # Check that the env on the local machine is properly configured before starting the scheduler.
        abilab.abicheck()

        sched_options = {oname: getattr(options, oname) for oname in
            ("weeks", "days", "hours", "minutes", "seconds")}

        if all(v == 0 for v in sched_options.values()):
            sched = flow.make_scheduler()
        else:
            sched = flow.make_scheduler(**sched_options)

        print(sched)
        return sched.start()

    elif options.command == "batch":
        return flow.batch(timelimit=options.timelimit)

    elif options.command == "status":
        # Select the method to call.
        show_func = flow.show_status if not options.summary else flow.show_summary

        if options.delay:
            cprint("Entering infinite loop (delay: %d s). Only changes are shown\nPress <CTRL+C> to exit" %
                   options.delay, color="magenta", end="", flush=True)

            # Total counter and dicts used to detect changes.
            tot_count = 0
            before_task2stat, now_task2stat = {}, {}
            # Progressbar setup
            from tqdm import tqdm
            pbar, pbar_count, pbar_total = None, 0, 100

            exit_code = 0
            def exit_now():
                """
                Function used to test if we have to exit from the infinite loop below.
                Return: != 0 if we must exit. > 0 if some error occurred.
                """
                if flow.all_ok:
                    cprint("Flow reached all_ok", "green")
                    return -1
                if any(st.is_critical for st in before_task2stat.values()):
                    cprint(boxed("Found tasks with critical status"), "red")
                    return 1
                return 0

            try:
                while True:
                    tot_count += 1
                    flow.check_status()

                    # Here I test whether there's been some change in the flow
                    # before printing the status table.
                    # Note that the flow in memory could not correspond to the one that
                    # is being executed by the scheduler. This is the reason why we
                    # reload it when we reach pbar_count.
                    if tot_count == 1:
                        for task in flow.iflat_tasks(nids=selected_nids(flow, options)):
                            before_task2stat[task] = task.status
                    else:
                        for task in flow.iflat_tasks(nids=selected_nids(flow, options)):
                            now_task2stat[task] = task.status

                        if (len(before_task2stat) == len(now_task2stat) and
                            all(now_task2stat[t] == before_task2stat[t] for t in now_task2stat)):
                            # In principle this is not needed but ...
                            exit_code = exit_now()
                            if exit_code: break

                            # Progress bar section.
                            if pbar is None:
                                print("No change detected in the flow. Won't print status table till next change...")
                                pbar = tqdm(total=pbar_total)

                            if pbar_count <= pbar_total:
                                pbar_count += 1
                                pbar.update(1)
                            else:
                                pbar_count = 0
                                pbar.close()
                                pbar = tqdm(total=pbar_total)
                                flow.reload()

                            time.sleep(options.delay)
                            continue

                        # copy now --> before
                        before_task2stat = now_task2stat.copy()

                    # Print status table. Exit if success or critical errors.
                    print(2*"\n" + time.asctime() + "\n")
                    show_func(verbose=options.verbose, nids=selected_nids(flow, options))
                    # Add summary table to status table.
                    if show_func is flow.show_status: flow.show_summary()

                    exit_code = exit_now()
                    if exit_code: break
                    time.sleep(options.delay)

                # Print status table if something bad happened.
                if exit_code == 1:
                    flow.show_status()

            except KeyboardInterrupt:
                cprint("Received KeyboardInterrupt from user\n", "yellow")
        else:
            show_func(verbose=options.verbose, nids=selected_nids(flow, options))
            if options.verbose and flow.manager.has_queue:
                print("Total number of jobs in queue: %s" % flow.manager.get_njobs_in_queue())

    elif options.command == "set_status":
        # Default status for reset is QCritical
        if options.task_status is None: options.task_status = Status.as_status("QCritical")
        new_status = Status.as_status(options.new_status)
        print("Will set all tasks with status: ", options.task_status, " to new_status", new_status)

        count = 0
        for task in flow.iflat_tasks(status=options.task_status, nids=selected_nids(flow, options)):
            task.set_status(new_status, msg="Changed by abirun from %s to %s" % (task.status, new_status))
            count += 1

        print("Number of tasks modified: %s" % count)
        if count:
            # update database
            flow.pickle_dump()

    elif options.command == "open":
        flow.open_files(what=options.what, status=None, op="==", nids=selected_nids(flow, options))

    elif options.command == "ncopen":
        # The name of the method associated to this netcdf file.
        methname = "open_" + options.ncext.lower()
        # List of netcdf file objects.
        ncfiles = [getattr(task, methname)() for task in flow.select_tasks(nids=options.nids, wslice=options.wslice)
                    if hasattr(task, methname)]

        if ncfiles:
            # Start ipython shell with namespace
            import IPython
            if len(ncfiles) == 1:
                IPython.start_ipython(argv=[], user_ns={"ncfile": ncfiles[0]})
            else:
                IPython.start_ipython(argv=[], user_ns={"ncfiles": ncfiles})
        else:
            cprint("Cannot find any netcdf file with extension %s" % options.ncext, color="magenta")

    elif options.command == "cancel":
        print("Number of jobs cancelled %d" % flow.cancel(nids=selected_nids(flow, options)))
        # Remove directory
        if options.rmtree: flow.rmtree()

    elif options.command == "restart":
        # Default status for reset is Unconverged if no option is provided by the user.
        if options.task_status is None and options.nids is None:
            options.task_status = Status.as_status("Unconverged")

        nlaunch, excs = 0, []
        for task in flow.iflat_tasks(status=options.task_status, nids=selected_nids(flow, options)):
            if options.verbose:
                print("Will try to restart %s, with status %s" % (task, task.status))
            try:
                fired = task.restart()
                if fired: nlaunch += 1
            except Exception:
                excs.append(straceback())

        cprint("Number of jobs restarted %d" % nlaunch, "blue")
        if nlaunch:
            # update database
            flow.pickle_dump()

        if excs:
            print("Exceptions raised\n")
            pprint(excs)

    elif options.command == "reset":
        # Default status for reset is QCritical
        if options.task_status is None: options.task_status = Status.as_status("QCritical")
        print("Will reset tasks with status: %s" % options.task_status)

        count = 0
        for task in flow.iflat_tasks(status=options.task_status, nids=selected_nids(flow, options)):
            print("Resetting task %s" % task)
            failed = task.reset()
            if failed:
                print("Task %s couldn't be reset" % task)
            else:
                count += 1

        cprint("%d tasks have been reset" % count, "blue")
        if options.relaunch:
            nlaunch = flow.rapidfire()
            print("Number of tasks launched: %d" % nlaunch)
        flow.show_status()

        if nlaunch == 0:
            g = flow.find_deadlocks()
            #print("deadlocked:", gdeadlocked, "\nrunnables:", grunnables, "\nrunning:", g.running)
            if g.deadlocked and not (g.runnables or g.running):
                cprint("*** Flow is deadlocked ***", "red")

        flow.pickle_dump()

    elif options.command == "move":
        print("Will move flow to %s..." % options.dest)
        flow.chroot(options.dest)
        flow.move(options.dest)

    elif options.command == "tail":
        def get_path(task):
            """Helper function used to select the files of a task."""
            choices = {
                "o": task.output_file,
                "l": task.log_file,
                "e": task.stderr_file,
            }
            return getattr(choices[options.what_tail], "path")

        # Default status for tail is Running
        if options.task_status is None: options.task_status = Status.as_status("Running")

        paths = [get_path(task) for task in flow.iflat_tasks(status=options.task_status, nids=selected_nids(flow, options))]

        if not paths:
            cprint("No job is running. Exiting!", "magenta")
        else:
            cprint("Press <CTRL+C> to interrupt. Number of output files %d\n" % len(paths), color="magenta", end="", flush=True)
            try:
                os.system("tail -f %s" % " ".join(paths))
            except KeyboardInterrupt:
                cprint("Received KeyboardInterrupt from user\n", "yellow")

    elif options.command == "qstat":
        #for task in flow.select_tasks(nids=options.nids, wslice=options.wslice):
        for task in flow.iflat_tasks():
            if not task.qjob: continue
            print("qjob", task.qjob)
            print("info", task.qjob.get_info())
            print("estimated_start-time", task.qjob.estimated_start_time())
            print("qstats", task.qjob.get_stats())

    elif options.command == "deps":
        flow.check_status()
        flow.show_dependencies()

    elif options.command == "robot":
        import IPython
        with abilab.abirobot(flow, options.robot_ext, nids=selected_nids(flow, options)) as robot:
            #IPython.embed(header=str(robot) + "\nType `robot` in the terminal and use <TAB> to list its methods",  robot=robot)
            IPython.start_ipython(argv=[], user_ns={"robot": robot})

    elif options.command == "plot":
        fext = dict(
            ebands="gsr",
        )[options.what]

        open_method = "open_" + fext
        plot_method = "plot_" + options.what

        for task in flow.select_tasks(nids=options.nids, wslice=options.wslice):
            try:
                with getattr(task, open_method)() as ncfile:
                    getattr(ncfile, plot_method)()
            except Exception as exc:
                print(exc)

    elif options.command == "inspect":
        tasks = flow.select_tasks(nids=options.nids, wslice=options.wslice)

        # Use different thread to inspect the task so that master can catch KeyboardInterrupt and exit.
        # One could use matplotlib non-blocking interface with show(block=False) but this one seems to work well.
        from multiprocessing import Process

        def plot_graphs():
            for task in tasks:
                if hasattr(task, "inspect"):
                    try:
                        task.inspect()
                    except Exception as exc:
                        cprint("%s: inspect method raised %s " % (task, exc), color="blue")

                else:
                    cprint("Task %s does not provide an inspect method" % task, color="blue")

        plot_graphs()

        # This works with py3k but not with py2
        #p = Process(target=plot_graphs)
        #p.start()
        #num_tasks = len(tasks)

        #if num_tasks == 1:
        #    p.join()
        #else:
        #    cprint("Will produce %d matplotlib plots. Press <CTRL+C> to interrupt..." % num_tasks, color="magenta", end="", flush=True)
        #    try:
        #        p.join()
        #    except KeyboardInterrupt:
        #        print("\nTerminating thread...")
        #        p.terminate()

    elif options.command == "inputs":
        flow.show_inputs(varnames=options.varnames, nids=selected_nids(flow, options))

    elif options.command == "notebook":
        return write_open_notebook(flow, options)

    elif options.command == "ipython":
        import IPython
        #IPython.embed(header="")
        #print("options:", options.argv)
        IPython.start_ipython(argv=options.argv, user_ns={"flow": flow})# , header="flow.show_status()")

    elif options.command == "tar":
        if not options.light:
            tarfile = flow.make_tarfile(name=None,
                                        max_filesize=options.max_filesize,
                                        exclude_exts=options.exclude_exts,
                                        exclude_dirs=options.exclude_dirs,
                                        verbose=options.verbose)
            print("Created tarball file %s" % tarfile)
        else:
            tarfile = flow.make_light_tarfile()
            print("Created light tarball file %s" % tarfile)

    elif options.command == "debug":
        flow.debug(status=options.task_status, nids=selected_nids(flow, options))
        return 0

    elif options.command == "group":
        d = defaultdict(list)
        for task in flow.iflat_tasks(status=options.task_status, nids=selected_nids(flow, options)):
            d[task.status].append(task.node_id)

        print("Mapping status --> List of node identifiers")
        for k, v in d.items():
            print("   ",k, " --> ", v)

    elif options.command == "diff":
        if options.nids is None:
            raise ValueError("nids must be specified when using diff command")

        tasks = list(flow.iflat_tasks(nids=selected_nids(flow, options)))

        if len(tasks) not in (2, 3):
            if len(tasks) == 1:
                cprint("task == task, returning\n" , color="magenta", end="", flush=True)
                return 0
            else:
                raise ValueError("Don't know how to compare files produced by %d tasks" % len(tasks))

        # Build list of lists. Each sub-list contains the files associated to the i-th task.
        files_for_task = [None] * len(tasks)
        for i, task in enumerate(tasks):
            files_for_task[i] = task.select_files(options.what_diff)

        for diff_files in zip(*files_for_task):
            print("Comparing", ", ".join(os.path.relpath(p) for p in diff_files))
            args = " ".join(os.path.relpath(p) for p in diff_files)
            # TODO: I should have written a Differ object somewhere!
            os.system("vimdiff %s" % args)

    elif options.command == "networkx":
        flow.plot_networkx(mode=options.nxmode, with_edge_labels=options.edge_labels)

    elif options.command == "listext":
        for ext in options.listexts:
            flow.listext(ext)
            print("")

    elif options.command == "timer":
        print("Warning this option is still under development")
        timer = flow.parse_timing()
        if timer is None:
            cprint("Cannot parse time data!", color="magenta", end="", flush=True)
            return 1

        import IPython
        IPython.start_ipython(argv=[], user_ns={"timer": timer})

    else:
        raise RuntimeError("Don't know what to do with command %s!" % options.command)

    return retcode
Пример #11
0
def main():
    def str_examples():
        return """\
Usage example:
    abicheck.py
"""

    def show_examples_and_exit(err_msg=None, error_code=1):
        """Display the usage of the script."""
        sys.stderr.write(str_examples())
        if err_msg:
            sys.stderr.write("Fatal Error\n" + err_msg + "\n")
        sys.exit(error_code)

    parser = argparse.ArgumentParser(
        epilog=str_examples(),
        formatter_class=argparse.RawDescriptionHelpFormatter)

    parser.add_argument(
        '--loglevel',
        default="ERROR",
        type=str,
        help=
        "set the loglevel. Possible values: CRITICAL, ERROR (default), WARNING, INFO, DEBUG"
    )
    parser.add_argument('-V',
                        '--version',
                        action='version',
                        version="%(prog)s version " + abilab.__version__)
    parser.add_argument(
        '-v',
        '--verbose',
        default=0,
        action='count',  # -vv --> verbose=2
        help='verbose, can be supplied multiple times to increase verbosity.')
    parser.add_argument('--no-colors',
                        default=False,
                        action="store_true",
                        help='Disable ASCII colors.')

    # Parse the command line.
    try:
        options = parser.parse_args()
    except Exception:
        show_examples_and_exit(error_code=1)

    # loglevel is bound to the string value obtained from the command line argument.
    # Convert to upper case to allow the user to specify --loglevel=DEBUG or --loglevel=debug
    import logging
    numeric_level = getattr(logging, options.loglevel.upper(), None)
    if not isinstance(numeric_level, int):
        raise ValueError('Invalid log level: %s' % options.loglevel)
    logging.basicConfig(level=numeric_level)

    if options.no_colors:
        # Disable colors
        termcolor.enable(False)

    errmsg = abilab.abicheck(verbose=options.verbose)
    if errmsg:
        cprint(errmsg, "red")
    else:
        print()
        cprint("Abipy requirements are properly configured", "green")

    return len(errmsg)