Exemple #1
0
  def wrap(**options):
    sched = Scheduler()
    sched.start()
    seconds = 5
    interval = timedelta(seconds = seconds)
    #trigger = RandomizedIntervalTrigger(interval, randomize = lambda : uniform(seconds / 8, seconds / 4))
    trigger = RandomizedIntervalTrigger(interval)

    sched.add_job(trigger, func, None, None, **{} )
Exemple #2
0
class Scheduler(object):

    schedulr = None
    aps3 = True

    def __init__(self):

        #####
        # ApScheduler version detection
        try:
            # APScheduler 3.x implementation
            from apscheduler.schedulers.background import BackgroundScheduler
            self.schedulr = BackgroundScheduler()
            self.aps3 = True
        except ImportError:
            # APScheduler 2.x implementation
            from apscheduler.scheduler import Scheduler
            self.schedulr = Scheduler()
            self.aps3 = False

    def start(self):
        return self.schedulr.start()

    def get_job(self, name):
        if self.aps3:
            return self.schedulr.get_job(name)
        else:
            jobs = self.schedulr.get_jobs()
            for job in jobs:
                if job.name == name:
                    return job

            return None

    def add_job(self, func, trigger, args=None, kwargs=None, id=None, **trigger_args):
        if self.aps3:
            return self.schedulr.add_job(func, trigger, id=id, args=args, kwargs=kwargs, **trigger_args)
        else:
            if trigger is 'date':
                run_date = trigger_args['run_date']   # by intention: to raise if not set!
                del trigger_args['run_date']
                return self.schedulr.add_date_job(func, run_date, name=id, args=args, kwargs=kwargs)
            elif trigger is 'interval':
                # only partially implemented!!
                seconds = 0
                minutes = 0
                hours = 0
                if 'seconds' in trigger_args:
                    seconds = trigger_args.get('seconds', 0)
                    del trigger_args['seconds']

                if 'minutes' in trigger_args:
                    minutes = trigger_args.get('minutes', 0)
                    del trigger_args['minutes']

                if 'hours' in trigger_args:
                    hours = trigger_args.get('hours', 0)
                    del trigger_args['hours']

                return self.schedulr.add_interval_job(func, name=id,
                                                      hours=hours, minutes=minutes, seconds=seconds,
                                                      args=args, kwargs=kwargs)
            elif trigger is 'cron':
                # only partially implemented!!
                second = 0
                minute = 0
                hour = 0
                if 'second' in trigger_args:
                    second = trigger_args.get('second', 0)
                    del trigger_args['second']

                if 'minute' in trigger_args:
                    minute = trigger_args.get('minute', 0)
                    del trigger_args['minute']

                if 'hour' in trigger_args:
                    hour = trigger_args.get('hour', 0)
                    del trigger_args['hour']

                return self.schedulr.add_cron_job(func, name=id, hour=hour, minute=minute, second=second)
            else:
                raise NotImplementedError

    def shutdown(self):
        return self.schedulr.shutdown()

    # https://github.com/ralphwetzel/theonionbox/issues/19#issuecomment-263110953
    def check_tz(self):
        from tzlocal import get_localzone

        try:
            # APScheduler 3.x
            from apscheduler.util import astimezone

        except ImportError:
            # https://github.com/ralphwetzel/theonionbox/issues/31
            # APScheduler 2.x
            # import six
            from pytz import timezone, utc
            from datetime import tzinfo

            # copied here from apscheduler/util.py (version 3.4)
            # copyright Alex Grönholm
            # https://github.com/agronholm/apscheduler

            def astimezone(obj):
                """
                Interprets an object as a timezone.

                :rtype: tzinfo

                """
                # if isinstance(obj, six.string_types):
                if isinstance(obj, (str, unicode)):
                    return timezone(obj)
                if isinstance(obj, tzinfo):
                    if not hasattr(obj, 'localize') or not hasattr(obj, 'normalize'):
                        raise TypeError('Only timezones from the pytz library are supported')
                    if obj.zone == 'local':
                        raise ValueError(
                            'Unable to determine the name of the local timezone -- you must explicitly '
                            'specify the name of the local timezone. Please refrain from using timezones like '
                            'EST to prevent problems with daylight saving time. Instead, use a locale based '
                            'timezone name (such as Europe/Helsinki).')
                    return obj
                if obj is not None:
                    raise TypeError('Expected tzinfo, got %s instead' % obj.__class__.__name__)

        tz = get_localzone()
        try:
            res = astimezone(tz)
        except ValueError as ve:
            return False

        return True
Exemple #3
0
class PyFlowScheduler(object):
    """
    This object schedules the submission of the tasks in a :class:`Flow`.
    There are two types of errors that might occur during the execution of the jobs:

        #. Python exceptions
        #. Errors in the ab-initio code

    Python exceptions are easy to detect and are usually due to a bug in the python code or random errors such as IOError.
    The set of errors in the ab-initio is much much broader. It includes wrong input data, segmentation
    faults, problems with the resource manager, etc. The flow tries to handle the most common cases
    but there's still a lot of room for improvement.
    Note, in particular, that `PyFlowScheduler` will shutdown automatically in the following cases:

        #. The number of python exceptions is > max_num_pyexcs

        #. The number of task errors (i.e. the number of tasks whose status is S_ERROR) is > max_num_abierrs

        #. The number of jobs launched becomes greater than (`safety_ratio` * total_number_of_tasks).

        #. The scheduler will send an email to the user (specified by `mailto`) every `remindme_s` seconds.
           If the mail cannot be sent, the scheduler will shutdown automatically.
           This check prevents the scheduler from being trapped in an infinite loop.
    """
    # Configuration file.
    YAML_FILE = "scheduler.yml"
    USER_CONFIG_DIR = os.path.join(os.path.expanduser("~"), ".abinit", "abipy")

    Error = PyFlowSchedulerError

    @classmethod
    def autodoc(cls):
        i = cls.__init__.__doc__.index("Args:")
        return cls.__init__.__doc__[i+5:]

    def __init__(self, **kwargs):
        """
        Args:
            weeks: number of weeks to wait (DEFAULT: 0).
            days: number of days to wait (DEFAULT: 0).
            hours: number of hours to wait (DEFAULT: 0).
            minutes: number of minutes to wait (DEFAULT: 0).
            seconds: number of seconds to wait (DEFAULT: 0).
            mailto: The scheduler will send an email to `mailto` every `remindme_s` seconds.
                (DEFAULT: None i.e. not used).
            verbose: (int) verbosity level. (DEFAULT: 0)
            use_dynamic_manager: "yes" if the :class:`TaskManager` must be re-initialized from
                file before launching the jobs. (DEFAULT: "no")
            max_njobs_inqueue: Limit on the number of jobs that can be present in the queue. (DEFAULT: 200)
            remindme_s: The scheduler will send an email to the user specified by `mailto` every `remindme_s` seconds.
                (int, DEFAULT: 1 day).
            max_num_pyexcs: The scheduler will exit if the number of python exceptions is > max_num_pyexcs
                (int, DEFAULT: 0)
            max_num_abierrs: The scheduler will exit if the number of errored tasks is > max_num_abierrs
                (int, DEFAULT: 0)
            safety_ratio: The scheduler will exits if the number of jobs launched becomes greater than
               `safety_ratio` * total_number_of_tasks_in_flow. (int, DEFAULT: 5)
            max_nlaunches: Maximum number of tasks launched in a single iteration of the scheduler.
                (DEFAULT: -1 i.e. no limit)
            debug: Debug level. Use 0 for production (int, DEFAULT: 0)
            fix_qcritical: "yes" if the launcher should try to fix QCritical Errors (DEFAULT: "yes")
            rmflow: If "yes", the scheduler will remove the flow directory if the calculation
                completed successfully. (DEFAULT: "no")
            killjobs_if_errors: "yes" if the scheduler should try to kill all the runnnig jobs
                before exiting due to an error. (DEFAULT: "yes")
        """
        # Options passed to the scheduler.
        self.sched_options = AttrDict(
            weeks=kwargs.pop("weeks", 0),
            days=kwargs.pop("days", 0),
            hours=kwargs.pop("hours", 0),
            minutes=kwargs.pop("minutes", 0),
            seconds=kwargs.pop("seconds", 0),
            #start_date=kwargs.pop("start_date", None),
        )
        if all(not v for v in self.sched_options.values()):
            raise self.Error("Wrong set of options passed to the scheduler.")

        self.mailto = kwargs.pop("mailto", None)
        self.verbose = int(kwargs.pop("verbose", 0))
        self.use_dynamic_manager = as_bool(kwargs.pop("use_dynamic_manager", False))
        self.max_njobs_inqueue = kwargs.pop("max_njobs_inqueue", 200)
        self.max_ncores_used = kwargs.pop("max_ncores_used", None)
        self.contact_resource_manager = as_bool(kwargs.pop("contact_resource_manager", False))

        self.remindme_s = float(kwargs.pop("remindme_s", 1 * 24 * 3600))
        self.max_num_pyexcs = int(kwargs.pop("max_num_pyexcs", 0))
        self.max_num_abierrs = int(kwargs.pop("max_num_abierrs", 0))
        self.safety_ratio = int(kwargs.pop("safety_ratio", 5))
        #self.max_etime_s = kwargs.pop("max_etime_s", )
        self.max_nlaunches = kwargs.pop("max_nlaunches", -1)
        self.debug = kwargs.pop("debug", 0)
        self.fix_qcritical = as_bool(kwargs.pop("fix_qcritical", True))
        self.rmflow = as_bool(kwargs.pop("rmflow", False))
        self.killjobs_if_errors = as_bool(kwargs.pop("killjobs_if_errors", True))

        self.customer_service_dir = kwargs.pop("customer_service_dir", None)
        if self.customer_service_dir is not None:
            self.customer_service_dir = Directory(self.customer_service_dir)
            self._validate_customer_service()

        if kwargs:
            raise self.Error("Unknown arguments %s" % kwargs)

        if not has_apscheduler:
            raise RuntimeError("Install apscheduler with pip")

        if has_sched_v3:
            logger.warning("Using scheduler v>=3.0.0")
            from apscheduler.schedulers.blocking import BlockingScheduler
            self.sched = BlockingScheduler()
        else:
            from apscheduler.scheduler import Scheduler
            self.sched = Scheduler(standalone=True)

        self.nlaunch = 0
        self.num_reminders = 1

        # Used to keep track of the exceptions raised while the scheduler is running
        self.exceptions = deque(maxlen=self.max_num_pyexcs + 10)

        # Used to push additional info during the execution.
        self.history = deque(maxlen=100)

    @classmethod
    def from_file(cls, filepath):
        """Read the configuration parameters from a Yaml file."""
        with open(filepath, "rt") as fh:
            return cls(**yaml.safe_load(fh))

    @classmethod
    def from_string(cls, s):
        """Create an istance from string s containing a YAML dictionary."""
        stream = cStringIO(s)
        stream.seek(0)
        return cls(**yaml.safe_load(stream))

    @classmethod
    def from_user_config(cls):
        """
        Initialize the :class:`PyFlowScheduler` from the YAML file 'scheduler.yml'.
        Search first in the working directory and then in the configuration directory of abipy.

        Raises:
            `RuntimeError` if file is not found.
        """
        # Try in the current directory.
        path = os.path.join(os.getcwd(), cls.YAML_FILE)

        if os.path.exists(path):
            return cls.from_file(path)

        # Try in the configuration directory.
        path = os.path.join(cls.USER_CONFIG_DIR, cls.YAML_FILE)

        if os.path.exists(path):
            return cls.from_file(path)

        raise cls.Error("Cannot locate %s neither in current directory nor in %s" % (cls.YAML_FILE, path))

    def __str__(self):
        """String representation."""
        lines = [self.__class__.__name__ + ", Pid: %d" % self.pid]
        app = lines.append
        app("Scheduler options: %s" % str(self.sched_options))

        if self.flow is not None:
            app(80 * "=")
            app(str(self.flow))

        return "\n".join(lines)

    @property
    def pid(self):
        """The pid of the process associated to the scheduler."""
        try:
            return self._pid
        except AttributeError:
            self._pid = os.getpid()
            return self._pid

    @property
    def pid_file(self):
        """
        Absolute path of the file with the pid.
        The file is located in the workdir of the flow
        """
        return self._pid_file

    @property
    def flow(self):
        """`Flow`."""
        try:
            return self._flow
        except AttributeError:
            return None

    @property
    def num_excs(self):
        """Number of exceptions raised so far."""
        return len(self.exceptions)

    def get_delta_etime(self):
        """Returns a `timedelta` object representing with the elapsed time."""
        return timedelta(seconds=(time.time() - self.start_time))

    def add_flow(self, flow):
        """
        Add an :class:`Flow` flow to the scheduler.
        """
        if hasattr(self, "_flow"):
            raise self.Error("Only one flow can be added to the scheduler.")

        # Check if we are already using a scheduler to run this flow
        flow.check_pid_file()
        flow.set_spectator_mode(False)

        # Build dirs and files (if not yet done)
        flow.build()

        with open(flow.pid_file, "wt") as fh:
            fh.write(str(self.pid))

        self._pid_file = flow.pid_file
        self._flow = flow

    def _validate_customer_service(self):
        """
        Validate input parameters if customer service is on then
        create directory for tarball files with correct premissions for user and group.
        """
        direc = self.customer_service_dir
        if not direc.exists:
            mode = 0o750
            print("Creating customer_service_dir %s with mode %s" % (direc, mode))
            direc.makedirs()
            os.chmod(direc.path, mode)

        if self.mailto is None:
            raise RuntimeError("customer_service_dir requires mailto option in scheduler.yml")

    def _do_customer_service(self):
        """
        This method is called before the shutdown of the scheduler.
        If customer_service is on and the flow didn't completed successfully,
        a lightweight tarball file with inputs and the most important output files
        is created in customer_servide_dir.
        """
        if self.customer_service_dir is None: return
        doit = self.exceptions or not self.flow.all_ok
        doit = True
        if not doit: return

        prefix = os.path.basename(self.flow.workdir) + "_"

        import tempfile, datetime
        suffix = str(datetime.datetime.now()).replace(" ", "-")
        # Remove milliseconds
        i = suffix.index(".")
        if i != -1: suffix = suffix[:i]
        suffix += ".tar.gz"

        #back = os.getcwd()
        #os.chdir(self.customer_service_dir.path)

        _, tmpname = tempfile.mkstemp(suffix="_" + suffix, prefix=prefix,
                                      dir=self.customer_service_dir.path, text=False)

        print("Dear customer,\n We are about to generate a tarball in\n  %s" % tmpname)
        self.flow.make_light_tarfile(name=tmpname)
        #os.chdir(back)

    def start(self):
        """
        Starts the scheduler in a new thread. Returns 0 if success.
        In standalone mode, this method will block until there are no more scheduled jobs.
        """
        self.history.append("Started on %s" % time.asctime())
        self.start_time = time.time()

        if not has_apscheduler:
            raise RuntimeError("Install apscheduler with pip")

        if has_sched_v3:
            self.sched.add_job(self.callback, "interval", **self.sched_options)
        else:
            self.sched.add_interval_job(self.callback, **self.sched_options)

        errors = self.flow.look_before_you_leap()
        if errors:
            self.exceptions.append(errors)
            return 1

        # Try to run the job immediately. If something goes wrong return without initializing the scheduler.
        self._runem_all()

        if self.exceptions:
            self.cleanup()
            self.send_email(msg="Error while trying to run the flow for the first time!\n %s" % self.exceptions)
            return 1

        try:
            self.sched.start()
            return 0

        except KeyboardInterrupt:
            self.shutdown(msg="KeyboardInterrupt from user")
            if ask_yesno("Do you want to cancel all the jobs in the queue? [Y/n]"):
                print("Number of jobs cancelled:", self.flow.cancel())

            self.flow.pickle_dump()
            return -1

    def _runem_all(self):
        """
        This function checks the status of all tasks,
        tries to fix tasks that went unconverged, abicritical, or queuecritical
        and tries to run all the tasks that can be submitted.+
        """
        excs = []
        flow = self.flow

        # Allow to change the manager at run-time
        if self.use_dynamic_manager:
            from pymatgen.io.abinit.tasks import TaskManager
            new_manager = TaskManager.from_user_config()
            for work in flow:
                work.set_manager(new_manager)

        nqjobs = 0
        if self.contact_resource_manager: # and flow.TaskManager.qadapter.QTYPE == "shell":
            # This call is expensive and therefore it's optional (must be activate in manager.yml)
            nqjobs = flow.get_njobs_in_queue()
            if nqjobs is None:
                nqjobs = 0
                if flow.manager.has_queue:
                    logger.warning('Cannot get njobs_inqueue')
        else:
            # Here we just count the number of tasks in the flow who are running.
            # This logic breaks down if there are multiple schedulers runnig
            # but it's easy to implement without having to contact the resource manager.
            nqjobs = (len(list(flow.iflat_tasks(status=flow.S_RUN))) +
                      len(list(flow.iflat_tasks(status=flow.S_SUB))))

        if nqjobs >= self.max_njobs_inqueue:
            print("Too many jobs in the queue: %s. No job will be submitted." % nqjobs)
            flow.check_status(show=False)
            return

        if self.max_nlaunches == -1:
            max_nlaunch = self.max_njobs_inqueue - nqjobs
        else:
            max_nlaunch = min(self.max_njobs_inqueue - nqjobs, self.max_nlaunches)

        # check status.
        flow.check_status(show=False)

        # This check is not perfect, we should make a list of tasks to sumbit
        # and select only the subset so that we don't exceeed mac_ncores_used
        # Many sections of this code should be rewritten.
        #if self.max_ncores_used is not None and flow.ncores_used > self.max_ncores_used:
        if self.max_ncores_used is not None and flow.ncores_allocated > self.max_ncores_used:
            print("Cannot exceed max_ncores_used %s" % self.max_ncores_used)
            return

        # Try to restart the unconverged tasks
        # TODO: do not fire here but prepare for fireing in rapidfire
        for task in self.flow.unconverged_tasks:
            try:
                logger.info("Flow will try restart task %s" % task)
                fired = task.restart()
                if fired:
                    self.nlaunch += 1
                    max_nlaunch -= 1
                    if max_nlaunch == 0:
                        logger.info("Restart: too many jobs in the queue, returning")
                        flow.pickle_dump()
                        return

            except task.RestartError:
                excs.append(straceback())

        # Temporarily disable by MG because I don't know if fix_critical works after the
        # introduction of the new qadapters
        # reenabled by MsS disable things that do not work at low level
        # fix only prepares for restarting, and sets to ready
        if self.fix_qcritical:
            nfixed = flow.fix_queue_critical()
            if nfixed: print("Fixed %d QCritical error(s)" % nfixed)

        nfixed = flow.fix_abicritical()
        if nfixed: print("Fixed %d AbiCritical error(s)" % nfixed)

        # update database
        flow.pickle_dump()

        # Submit the tasks that are ready.
        try:
            nlaunch = PyLauncher(flow).rapidfire(max_nlaunch=max_nlaunch, sleep_time=10)
            self.nlaunch += nlaunch
            if nlaunch:
                cprint("[%s] Number of launches: %d" % (time.asctime(), nlaunch), "yellow")

        except Exception:
            excs.append(straceback())

        # check status.
        flow.show_status()

        if excs:
            logger.critical("*** Scheduler exceptions:\n *** %s" % "\n".join(excs))
            self.exceptions.extend(excs)

    def callback(self):
        """The function that will be executed by the scheduler."""
        try:
            return self._callback()
        except:
            # All exceptions raised here will trigger the shutdown!
            s = straceback()
            self.exceptions.append(s)

            # This is useful when debugging
            #try:
            #    print("Exception in callback, will cancel all tasks")
            #    for task in self.flow.iflat_tasks():
            #        task.cancel()
            #except Exception:
            #    pass

            self.shutdown(msg="Exception raised in callback!\n" + s)

    def _callback(self):
        """The actual callback."""
        if self.debug:
            # Show the number of open file descriptors
            print(">>>>> _callback: Number of open file descriptors: %s" % get_open_fds())

        self._runem_all()

        # Mission accomplished. Shutdown the scheduler.
        all_ok = self.flow.all_ok
        if all_ok:
            return self.shutdown(msg="All tasks have reached S_OK. Will shutdown the scheduler and exit")

        # Handle failures.
        err_lines = []

        # Shall we send a reminder to the user?
        delta_etime = self.get_delta_etime()

        if delta_etime.total_seconds() > self.num_reminders * self.remindme_s:
            self.num_reminders += 1
            msg = ("Just to remind you that the scheduler with pid %s, flow %s\n has been running for %s " %
                  (self.pid, self.flow, delta_etime))
            retcode = self.send_email(msg, tag="[REMINDER]")

            if retcode:
                # Cannot send mail, shutdown now!
                msg += ("\nThe scheduler tried to send an e-mail to remind the user\n" +
                        " but send_email returned %d. Aborting now" % retcode)
                err_lines.append(msg)

        #if delta_etime.total_seconds() > self.max_etime_s:
        #    err_lines.append("\nExceeded max_etime_s %s. Will shutdown the scheduler and exit" % self.max_etime_s)

        # Too many exceptions. Shutdown the scheduler.
        if self.num_excs > self.max_num_pyexcs:
            msg = "Number of exceptions %s > %s. Will shutdown the scheduler and exit" % (
                self.num_excs, self.max_num_pyexcs)
            err_lines.append(boxed(msg))

        # Paranoid check: disable the scheduler if we have submitted
        # too many jobs (it might be due to some bug or other external reasons
        # such as race conditions between difference callbacks!)
        if self.nlaunch > self.safety_ratio * self.flow.num_tasks:
            msg = "Too many jobs launched %d. Total number of tasks = %s, Will shutdown the scheduler and exit" % (
                self.nlaunch, self.flow.num_tasks)
            err_lines.append(boxed(msg))

        # Count the number of tasks with status == S_ERROR.
        if self.flow.num_errored_tasks > self.max_num_abierrs:
            msg = "Number of tasks with ERROR status %s > %s. Will shutdown the scheduler and exit" % (
                self.flow.num_errored_tasks, self.max_num_abierrs)
            err_lines.append(boxed(msg))

        # Test on the presence of deadlocks.
        g = self.flow.find_deadlocks()
        if g.deadlocked:
            # Check the flow again so that status are updated.
            self.flow.check_status()

            g = self.flow.find_deadlocks()
            print("deadlocked:\n", g.deadlocked, "\nrunnables:\n", g.runnables, "\nrunning\n", g.running)
            if g.deadlocked and not g.runnables and not g.running:
                err_lines.append("No runnable job with deadlocked tasks:\n%s." % str(g.deadlocked))

        if not g.runnables and not g.running:
            # Check the flow again so that status are updated.
            self.flow.check_status()
            g = self.flow.find_deadlocks()
            if not g.runnables and not g.running:
                err_lines.append("No task is running and cannot find other tasks to submit.")

        # Something wrong. Quit
        if err_lines:
            # Cancel all jobs.
            if self.killjobs_if_errors:
                cprint("killjobs_if_errors set to 'yes' in scheduler file. Will kill jobs before exiting.", "yellow")
                try:
                    num_cancelled = 0
                    for task in self.flow.iflat_tasks():
                        num_cancelled += task.cancel()
                    cprint("Killed %d tasks" % num_cancelled, "yellow")
                except Exception as exc:
                    cprint("Exception while trying to kill jobs:\n%s" % str(exc), "red")

            self.shutdown("\n".join(err_lines))

        return len(self.exceptions)

    def cleanup(self):
        """Cleanup routine: remove the pid file and save the pickle database"""
        try:
            os.remove(self.pid_file)
        except OSError as exc:
            logger.critical("Could not remove pid_file: %s", exc)

        # Save the final status of the flow.
        self.flow.pickle_dump()

    def shutdown(self, msg):
        """Shutdown the scheduler."""
        try:
            self.cleanup()

            self.history.append("Completed on: %s" % time.asctime())
            self.history.append("Elapsed time: %s" % self.get_delta_etime())

            if self.debug:
                print(">>>>> shutdown: Number of open file descriptors: %s" % get_open_fds())

            retcode = self.send_email(msg)
            if self.debug:
                print("send_mail retcode", retcode)

            # Write file with the list of exceptions:
            if self.exceptions:
                dump_file = os.path.join(self.flow.workdir, "_exceptions")
                with open(dump_file, "wt") as fh:
                    fh.writelines(self.exceptions)
                    fh.write("Shutdown message:\n%s" % msg)

            lines = []
            app = lines.append
            app("Submitted on: %s" % time.ctime(self.start_time))
            app("Completed on: %s" % time.asctime())
            app("Elapsed time: %s" % str(self.get_delta_etime()))

            if self.flow.all_ok:
                app("Flow completed successfully")
            else:
                app("Flow %s didn't complete successfully" % repr(self.flow.workdir))
                app("use `abirun.py FLOWDIR debug` to analyze the problem.")
                app("Shutdown message:\n%s" % msg)

            print("")
            print("\n".join(lines))
            print("")

            self._do_customer_service()

            if self.flow.all_ok:
                print("Calling flow.finalize()...")
                self.flow.finalize()
                #print("finalized:", self.flow.finalized)
                if self.rmflow:
                    app("Flow directory will be removed...")
                    try:
                        self.flow.rmtree()
                    except Exception:
                        logger.warning("Ignoring exception while trying to remove flow dir.")

        finally:
            # Shutdown the scheduler thus allowing the process to exit.
            logger.debug('This should be the shutdown of the scheduler')

            # Unschedule all the jobs before calling shutdown
            #self.sched.print_jobs()
            if not has_sched_v3:
                for job in self.sched.get_jobs():
                    self.sched.unschedule_job(job)
            #self.sched.print_jobs()

            self.sched.shutdown()
            # Uncomment the line below if shutdown does not work!
            #os.system("kill -9 %d" % os.getpid())

    def send_email(self, msg, tag=None):
        """
        Send an e-mail before completing the shutdown.
        Returns 0 if success.
        """
        try:
            return self._send_email(msg, tag)
        except:
            self.exceptions.append(straceback())
            return -2

    def _send_email(self, msg, tag):
        if self.mailto is None:
            return -1

        header = msg.splitlines()
        app = header.append

        app("Submitted on: %s" % time.ctime(self.start_time))
        app("Completed on: %s" % time.asctime())
        app("Elapsed time: %s" % str(self.get_delta_etime()))
        app("Number of errored tasks: %d" % self.flow.num_errored_tasks)
        app("Number of unconverged tasks: %d" % self.flow.num_unconverged_tasks)

        strio = cStringIO()
        strio.writelines("\n".join(header) + 4 * "\n")

        # Add the status of the flow.
        self.flow.show_status(stream=strio)

        if self.exceptions:
            # Report the list of exceptions.
            strio.writelines(self.exceptions)

        if tag is None:
            tag = " [ALL OK]" if self.flow.all_ok else " [WARNING]"

        return sendmail(subject=self.flow.name + tag, text=strio.getvalue(), mailto=self.mailto)
Exemple #4
0
class PyFlowScheduler(object):
    """
    This object schedules the submission of the tasks in an :class:`Flow`.
    There are two types of errors that might occur during the execution of the jobs:

        #. Python exceptions
        #. Abinit Errors.

    Python exceptions are easy to detect and are usually due to a bug in abinitio or random errors such as IOError.
    The set of Abinit Errors is much much broader. It includes wrong input data, segmentation
    faults, problems with the resource manager, etc. Abinitio tries to handle the most common cases
    but there's still a lot of room for improvement.
    Note, in particular, that `PyFlowScheduler` will shutdown automatically if

        #. The number of python exceptions is > MAX_NUM_PYEXC

        #. The number of Abinit Errors (i.e. the number of tasks whose status is S_ERROR) is > MAX_NUM_ERRORS

        #. The number of jobs launched becomes greater than (`safety_ratio` * total_number_of_tasks).

        #. The scheduler will send an email to the user (specified by `mailto`) every `remindme_s` seconds.
           If the mail cannot be sent, it will shutdown automatically.
           This check prevents the scheduler from being trapped in an infinite loop.
    """
    # Configuration file.
    YAML_FILE = "scheduler.yml"
    USER_CONFIG_DIR = os.path.join(os.getenv("HOME"), ".abinit", "abipy")

    Error = PyFlowSchedulerError

    def __init__(self, **kwargs):
        """
        Args:
            weeks: number of weeks to wait
            days: number of days to wait
            hours: number of hours to wait
            minutes: number of minutes to wait
            seconds: number of seconds to wait
            verbose: (int) verbosity level
            max_njobs_inque: Limit on the number of jobs that can be present in the queue
            use_dynamic_manager: True if the :class:`TaskManager` must be re-initialized from
                file before launching the jobs. Default: False
            max_nlaunches: Maximum number of tasks launched by radpifire (default -1 i.e. no limit)
        """
        # Options passed to the scheduler.
        self.sched_options = AttrDict(
            weeks=kwargs.pop("weeks", 0),
            days=kwargs.pop("days", 0),
            hours=kwargs.pop("hours", 0),
            minutes=kwargs.pop("minutes", 0),
            seconds=kwargs.pop("seconds", 0),
            #start_date=kwargs.pop("start_date", None),
        )

        if all(not v for v in self.sched_options.values()):
            raise self.Error("Wrong set of options passed to the scheduler.")

        self.mailto = kwargs.pop("mailto", None)
        self.verbose = int(kwargs.pop("verbose", 0))
        self.use_dynamic_manager = kwargs.pop("use_dynamic_manager", False)
        self.max_njobs_inqueue = kwargs.pop("max_njobs_inqueue", 200)
        self.contact_resource_manager = as_bool(kwargs.pop("contact_resource_manager", False))

        self.remindme_s = float(kwargs.pop("remindme_s", 4 * 24 * 3600))
        self.max_num_pyexcs = int(kwargs.pop("max_num_pyexcs", 0))
        self.max_num_abierrs = int(kwargs.pop("max_num_abierrs", 0))
        self.safety_ratio = int(kwargs.pop("safety_ratio", 5))
        #self.max_etime_s = kwargs.pop("max_etime_s", )
        self.max_nlaunches = kwargs.pop("max_nlaunches", -1)
        self.debug = kwargs.pop("debug", 0)

        if kwargs:
            raise self.Error("Unknown arguments %s" % kwargs)

        if has_sched_v3:
            from apscheduler.schedulers.blocking import BlockingScheduler
            self.sched = BlockingScheduler()
        else:
            from apscheduler.scheduler import Scheduler
            self.sched = Scheduler(standalone=True)

        self.nlaunch = 0
        self.num_reminders = 1

        # Used to keep track of the exceptions raised while the scheduler is running
        self.exceptions = collections.deque(maxlen=self.max_num_pyexcs + 10)

        # Used to push additional info during the execution.
        self.history = collections.deque(maxlen=100)

    @classmethod
    def from_file(cls, filepath):
        """Read the configuration parameters from a Yaml file."""
        with open(filepath, "r") as fh:
            return cls(**yaml.load(fh))

    @classmethod
    def from_string(cls, s):
        """Create an istance from string s containing a YAML dictionary."""
        stream = cStringIO(s)
        stream.seek(0)
        return cls(**yaml.load(stream))

    @classmethod
    def from_user_config(cls):
        """
        Initialize the :class:`PyFlowScheduler` from the YAML file 'scheduler.yml'.
        Search first in the working directory and then in the configuration directory of abipy.

        Raises:
            RuntimeError if file is not found.
        """
        # Try in the current directory.
        path = os.path.join(os.getcwd(), cls.YAML_FILE)

        if os.path.exists(path):
            return cls.from_file(path)

        # Try in the configuration directory.
        path = os.path.join(cls.USER_CONFIG_DIR, cls.YAML_FILE)

        if os.path.exists(path):
            return cls.from_file(path)

        raise cls.Error("Cannot locate %s neither in current directory nor in %s" % (cls.YAML_FILE, path))

    def __str__(self):
        """String representation."""
        lines = [self.__class__.__name__ + ", Pid: %d" % self.pid]
        app = lines.append

        app("Scheduler options: %s" % str(self.sched_options))
        app(80 * "=")
        app(str(self.flow))

        return "\n".join(lines)

    @property
    def pid(self):
        """The pid of the process associated to the scheduler."""
        try:
            return self._pid

        except AttributeError:
            self._pid = os.getpid()
            return self._pid

    @property
    def pid_file(self):
        """
        Absolute path of the file with the pid.
        The file is located in the workdir of the flow
        """
        return self._pid_file

    @property
    def flow(self):
        """`Flow`."""
        return self._flow

    @property
    def num_excs(self):
        """Number of exceptions raised so far."""
        return len(self.exceptions)

    def get_delta_etime(self):
        """Returns a `timedelta` object representing with the elapsed time."""
        return timedelta(seconds=(time.time() - self.start_time))

    def add_flow(self, flow):
        """Add an :class:`Flow` flow to the scheduler."""
        if hasattr(self, "_flow"):
            raise self.Error("Only one flow can be added to the scheduler.")

        pid_file = os.path.join(flow.workdir, "_PyFlowScheduler.pid")

        if os.path.isfile(pid_file):
            flow.show_status()

            raise self.Error("""\
                pid_file %s already exists
                There are two possibilities:

                   1) There's an another instance of PyFlowScheduler running
                   2) The previous scheduler didn't exit in a clean way

                To solve case 1:
                   Kill the previous scheduler (use 'kill pid' where pid is the number reported in the file)
                   Then you can restart the new scheduler.

                To solve case 2:
                   Remove the pid_file and restart the scheduler.

                Exiting""" % pid_file)

        with open(pid_file, "w") as fh:
            fh.write(str(self.pid))

        self._pid_file = pid_file
        self._flow = flow

    def start(self):
        """
        Starts the scheduler in a new thread. Returns True if success.
        In standalone mode, this method will block until there are no more scheduled jobs.
        """
        self.history.append("Started on %s" % time.asctime())
        self.start_time = time.time()

        if has_sched_v3:
            self.sched.add_job(self.callback, "interval", **self.sched_options)
        else:
            self.sched.add_interval_job(self.callback, **self.sched_options)

        errors = self.flow.look_before_you_leap()
        if errors:
            self.exceptions.append(errors)
            return False

        # Try to run the job immediately. If something goes wrong return without initializing the scheduler.
        self._runem_all()

        if self.exceptions:
            self.cleanup()
            self.send_email(msg="Error while trying to run the flow for the first time!\n %s" % self.exceptions)
            return False

        try:
            self.sched.start()
            return True

        except KeyboardInterrupt:
            self.shutdown(msg="KeyboardInterrupt from user")
            if ask_yesno("Do you want to cancel all the jobs in the queue? [Y/n]"): 
                self.flow.cancel()
            self.flow.pickle_dump()
            return False

    def _runem_all(self):
        """
        This function checks the status of all tasks,
        tries to fix tasks that went unconverged, abicritical, or queuecritical
        and tries to run all the tasks that can be submitted.+
        """
        excs = []
        flow = self.flow

        # Allow to change the manager at run-time
        if self.use_dynamic_manager:
            from pymatgen.io.abinitio.tasks import TaskManager
            new_manager = TaskManager.from_user_config()
            for work in flow:
                work.set_manager(new_manager)

        nqjobs = 0
        if self.contact_resource_manager:
            # This call is expensive and therefore it's optional
            nqjobs = flow.get_njobs_in_queue()
            if nqjobs is None:
                nqjobs = 0
                if flow.manager.has_queue: logger.warning('Cannot get njobs_inqueue')

            if nqjobs >= self.max_njobs_inqueue:
                logger.info("Too many jobs in the queue, returning")
                return

        if self.max_nlaunches == -1:
            max_nlaunch = self.max_njobs_inqueue - nqjobs
        else:
            max_nlaunch = min(self.max_njobs_inqueue - nqjobs, self.max_nlaunches)

        # check status and print it.
        flow.check_status(show=False)

        # fix problems
        # Try to restart the unconverged tasks
        # todo donot fire here but prepare for fireing in rapidfire
        for task in self.flow.unconverged_tasks:
            try:
                logger.info("Flow will try restart task %s" % task)
                fired = task.restart()
                if fired: 
                    self.nlaunch += 1
                    max_nlaunch -= 1
                    if max_nlaunch == 0:
                        logger.info("Restart: too many jobs in the queue, returning")
                        flow.pickle_dump()
                        return
            except task.RestartError:
                excs.append(straceback())

        # move here from withing rapid fire ...
        # fix only prepares for restarting, and sets to ready
        nfixed = flow.fix_abi_critical()
        if nfixed: print("Fixed %d AbiCritical errors" % nfixed)

        # Temporarily disable by MG because I don't know if fix_critical works after the
        # introduction of the new qadapters
        if False:
            nfixed = flow.fix_queue_critical()
            if nfixed: print("Fixed %d QueueCritical errors" % nfixed)

        # update database
        flow.pickle_dump()

        # Submit the tasks that are ready.
        try:
            nlaunch = PyLauncher(flow).rapidfire(max_nlaunch=max_nlaunch, sleep_time=10)
            self.nlaunch += nlaunch

            if nlaunch:
                print("[%s] Number of launches: %d" % (time.asctime(), nlaunch))

        except Exception:
            excs.append(straceback())

        flow.show_status()

        if excs:
            logger.critical("*** Scheduler exceptions:\n *** %s" % "\n".join(excs))
            self.exceptions.extend(excs)

    def callback(self):
        """The function that will be executed by the scheduler."""
        try:
            return self._callback()
        except:
            # All exceptions raised here will trigger the shutdown!
            self.exceptions.append(straceback())
            self.shutdown(msg="Exception raised in callback!")

    def _callback(self):
        """The actual callback."""
        if self.debug:
            # Show the number of open file descriptors
            print(">>>>> _callback: Number of open file descriptors: %s" % get_open_fds())

        self._runem_all()

        # Mission accomplished. Shutdown the scheduler.
        all_ok = self.flow.all_ok
        if self.verbose:
            print("all_ok", all_ok)

        if all_ok:
            self.shutdown(msg="All tasks have reached S_OK. Will shutdown the scheduler and exit")

        # Handle failures.
        err_msg = ""

        # Shall we send a reminder to the user?
        delta_etime = self.get_delta_etime()

        if delta_etime.total_seconds() > self.num_reminders * self.remindme_s:
            self.num_reminders += 1
            msg = ("Just to remind you that the scheduler with pid %s, flow %s\n has been running for %s " %
                  (self.pid, self.flow, delta_etime))
            retcode = self.send_email(msg, tag="[REMINDER]")

            if retcode:
                # Cannot send mail, shutdown now!
                msg += ("\nThe scheduler tried to send an e-mail to remind the user\n" +
                        " but send_email returned %d. Aborting now" % retcode)
                err_msg += msg

        #if delta_etime.total_seconds() > self.max_etime_s:
        #    err_msg += "\nExceeded max_etime_s %s. Will shutdown the scheduler and exit" % self.max_etime_s

        # Too many exceptions. Shutdown the scheduler.
        if self.num_excs > self.max_num_pyexcs:
            msg = "Number of exceptions %s > %s. Will shutdown the scheduler and exit" % (
                self.num_excs, self.max_num_pyexcs)
            err_msg += boxed(msg)

        # Paranoid check: disable the scheduler if we have submitted
        # too many jobs (it might be due to some bug or other external reasons 
        # such as race conditions between difference callbacks!)
        if self.nlaunch > self.safety_ratio * self.flow.num_tasks:
            msg = "Too many jobs launched %d. Total number of tasks = %s, Will shutdown the scheduler and exit" % (
                self.nlaunch, self.flow.num_tasks)
            err_msg += boxed(msg)

        # Count the number of tasks with status == S_ERROR.
        if self.flow.num_errored_tasks > self.max_num_abierrs:
            msg = "Number of tasks with ERROR status %s > %s. Will shutdown the scheduler and exit" % (
                self.flow.num_errored_tasks, self.max_num_abierrs)
            err_msg += boxed(msg)

        deadlocked, runnables, running = self.flow.deadlocked_runnables_running()
        #print("\ndeadlocked:\n", deadlocked, "\nrunnables:\n", runnables, "\nrunning\n", running)
        if deadlocked and not runnables and not running:
            msg = "No runnable job with deadlocked tasks:\n %s\nWill shutdown the scheduler and exit" % str(deadlocked)
            err_msg += boxed(msg)

        if err_msg:
            # Something wrong. Quit
            self.shutdown(err_msg)

        return len(self.exceptions)

    def cleanup(self):
        """Cleanup routine: remove the pid file and save the pickle database"""
        try:
            os.remove(self.pid_file)
        except OSError:
            logger.critical("Could not remove pid_file")

        # Save the final status of the flow.
        self.flow.pickle_dump()

    def shutdown(self, msg):
        """Shutdown the scheduler."""
        try:
            self.cleanup()

            self.history.append("Completed on %s" % time.asctime())
            self.history.append("Elapsed time %s" % self.get_delta_etime())

            if self.debug:
                print(">>>>> shutdown: Number of open file descriptors: %s" % get_open_fds())

            retcode = self.send_email(msg)
            if self.debug:
                print("send_mail retcode", retcode)

            # Write file with the list of exceptions:
            if self.exceptions:
                dump_file = os.path.join(self.flow.workdir, "_exceptions")
                with open(dump_file, "w") as fh:
                    fh.writelines(self.exceptions)
                    fh.write("Shutdown message:\n%s" % msg)

            lines = []
            app = lines.append
            app("Submitted on %s" % time.ctime(self.start_time))
            app("Completed on %s" % time.asctime())
            app("Elapsed time %s" % str(self.get_delta_etime()))
            if self.flow.all_ok:
                app("Flow completed successfully")
            else:
                app("Flow didn't complete successfully")
                app("Shutdown message:\n%s" % msg)
            print("\n".join(lines))

        finally:
            # Shutdown the scheduler thus allowing the process to exit.
            logger.debug('this should be the shutdown of the scheduler')

            # Unschedule all the jobs before calling shutdown
            #self.sched.print_jobs()
            for job in self.sched.get_jobs():
                self.sched.unschedule_job(job)
            #self.sched.print_jobs()
                
            self.sched.shutdown()
            # Uncomment the line below if shutdown does not work!
            #os.system("kill -9 %d" % os.getpid())

    def send_email(self, msg, tag=None):
        """
        Send an e-mail before completing the shutdown.
        Returns 0 if success.
        """
        try:
            return self._send_email(msg, tag)
        except:
            self.exceptions.append(straceback())
            return -2

    def _send_email(self, msg, tag):
        if self.mailto is None:
            return -1

        header = msg.splitlines()
        app = header.append

        app("Submitted on %s" % time.ctime(self.start_time))
        app("Completed on %s" % time.asctime())
        app("Elapsed time %s" % str(self.get_delta_etime()))
        app("Number of errored tasks: %d" % self.flow.num_errored_tasks)
        app("Number of unconverged tasks: %d" % self.flow.num_unconverged_tasks)

        strio = cStringIO()
        strio.writelines("\n".join(header) + 4 * "\n")

        # Add the status of the flow.
        self.flow.show_status(stream=strio)

        if self.exceptions:
            # Report the list of exceptions.
            strio.writelines(self.exceptions)

        if tag is None:
            tag = " [ALL OK]" if self.flow.all_ok else " [WARNING]"

        return sendmail(subject=self.flow.name + tag, text=strio.getvalue(), mailto=self.mailto)
Exemple #5
0
import datetime
import time
import os
from apscheduler.scheduler import Scheduler

# Start the scheduler
sched = Scheduler()
sched.daemonic = False
sched.start()

def job_function():
    os.system(" spark-submit --class ClimateClustering --master local[4] CSYE7374_Final_Spark-assembly-1.0.jar ec2-52-20-252-81.compute-1.amazonaws.com")
    os.system(" spark-submit --class ClimateARIMA --master local[4] CSYE7374_Final_Spark-assembly-1.0.jar ec2-52-20-252-81.compute-1.amazonaws.com")
    print(datetime.datetime.now())
    time.sleep(20)

# Schedules job_function to be run once each minute
sched.add_job(job_function, "cron", hour='0', minute='0',second='0')

Exemple #6
0
class PyFlowScheduler(object):
    """
    This object schedules the submission of the tasks in a :class:`Flow`.
    There are two types of errors that might occur during the execution of the jobs:

        #. Python exceptions
        #. Errors in the ab-initio code

    Python exceptions are easy to detect and are usually due to a bug in the python code or random errors such as IOError.
    The set of errors in the ab-initio is much much broader. It includes wrong input data, segmentation
    faults, problems with the resource manager, etc. The flow tries to handle the most common cases
    but there's still a lot of room for improvement.
    Note, in particular, that `PyFlowScheduler` will shutdown automatically in the following cases:

        #. The number of python exceptions is > max_num_pyexcs

        #. The number of task errors (i.e. the number of tasks whose status is S_ERROR) is > max_num_abierrs

        #. The number of jobs launched becomes greater than (`safety_ratio` * total_number_of_tasks).

        #. The scheduler will send an email to the user (specified by `mailto`) every `remindme_s` seconds.
           If the mail cannot be sent, the scheduler will shutdown automatically.
           This check prevents the scheduler from being trapped in an infinite loop.
    """
    # Configuration file.
    YAML_FILE = "scheduler.yml"
    USER_CONFIG_DIR = os.path.join(os.path.expanduser("~"), ".abinit", "abipy")

    Error = PyFlowSchedulerError

    @classmethod
    def autodoc(cls):
        i = cls.__init__.__doc__.index("Args:")
        return cls.__init__.__doc__[i + 5:]

    def __init__(self, **kwargs):
        """
        Args:
            weeks: number of weeks to wait (DEFAULT: 0).
            days: number of days to wait (DEFAULT: 0).
            hours: number of hours to wait (DEFAULT: 0).
            minutes: number of minutes to wait (DEFAULT: 0).
            seconds: number of seconds to wait (DEFAULT: 0).
            mailto: The scheduler will send an email to `mailto` every `remindme_s` seconds.
                (DEFAULT: None i.e. not used).
            verbose: (int) verbosity level. (DEFAULT: 0)
            use_dynamic_manager: "yes" if the :class:`TaskManager` must be re-initialized from
                file before launching the jobs. (DEFAULT: "no")
            max_njobs_inqueue: Limit on the number of jobs that can be present in the queue. (DEFAULT: 200)
            remindme_s: The scheduler will send an email to the user specified by `mailto` every `remindme_s` seconds.
                (int, DEFAULT: 1 day).
            max_num_pyexcs: The scheduler will exit if the number of python exceptions is > max_num_pyexcs
                (int, DEFAULT: 0)
            max_num_abierrs: The scheduler will exit if the number of errored tasks is > max_num_abierrs
                (int, DEFAULT: 0)
            safety_ratio: The scheduler will exits if the number of jobs launched becomes greater than
               `safety_ratio` * total_number_of_tasks_in_flow. (int, DEFAULT: 5)
            max_nlaunches: Maximum number of tasks launched in a single iteration of the scheduler.
                (DEFAULT: -1 i.e. no limit)
            debug: Debug level. Use 0 for production (int, DEFAULT: 0)
            fix_qcritical: "yes" if the launcher should try to fix QCritical Errors (DEFAULT: "yes")
            rmflow: If "yes", the scheduler will remove the flow directory if the calculation
                completed successfully. (DEFAULT: "no")
            killjobs_if_errors: "yes" if the scheduler should try to kill all the runnnig jobs
                before exiting due to an error. (DEFAULT: "yes")
        """
        # Options passed to the scheduler.
        self.sched_options = AttrDict(
            weeks=kwargs.pop("weeks", 0),
            days=kwargs.pop("days", 0),
            hours=kwargs.pop("hours", 0),
            minutes=kwargs.pop("minutes", 0),
            seconds=kwargs.pop("seconds", 0),
            #start_date=kwargs.pop("start_date", None),
        )
        if all(not v for v in self.sched_options.values()):
            raise self.Error("Wrong set of options passed to the scheduler.")

        self.mailto = kwargs.pop("mailto", None)
        self.verbose = int(kwargs.pop("verbose", 0))
        self.use_dynamic_manager = as_bool(
            kwargs.pop("use_dynamic_manager", False))
        self.max_njobs_inqueue = kwargs.pop("max_njobs_inqueue", 200)
        self.max_ncores_used = kwargs.pop("max_ncores_used", None)
        self.contact_resource_manager = as_bool(
            kwargs.pop("contact_resource_manager", False))

        self.remindme_s = float(kwargs.pop("remindme_s", 1 * 24 * 3600))
        self.max_num_pyexcs = int(kwargs.pop("max_num_pyexcs", 0))
        self.max_num_abierrs = int(kwargs.pop("max_num_abierrs", 0))
        self.safety_ratio = int(kwargs.pop("safety_ratio", 5))
        #self.max_etime_s = kwargs.pop("max_etime_s", )
        self.max_nlaunches = kwargs.pop("max_nlaunches", -1)
        self.debug = kwargs.pop("debug", 0)
        self.fix_qcritical = as_bool(kwargs.pop("fix_qcritical", True))
        self.rmflow = as_bool(kwargs.pop("rmflow", False))
        self.killjobs_if_errors = as_bool(
            kwargs.pop("killjobs_if_errors", True))

        self.customer_service_dir = kwargs.pop("customer_service_dir", None)
        if self.customer_service_dir is not None:
            self.customer_service_dir = Directory(self.customer_service_dir)
            self._validate_customer_service()

        if kwargs:
            raise self.Error("Unknown arguments %s" % kwargs)

        if not has_apscheduler:
            raise RuntimeError("Install apscheduler with pip")

        if has_sched_v3:
            logger.warning("Using scheduler v>=3.0.0")
            from apscheduler.schedulers.blocking import BlockingScheduler
            self.sched = BlockingScheduler()
        else:
            from apscheduler.scheduler import Scheduler
            self.sched = Scheduler(standalone=True)

        self.nlaunch = 0
        self.num_reminders = 1

        # Used to keep track of the exceptions raised while the scheduler is running
        self.exceptions = deque(maxlen=self.max_num_pyexcs + 10)

        # Used to push additional info during the execution.
        self.history = deque(maxlen=100)

    @classmethod
    def from_file(cls, filepath):
        """Read the configuration parameters from a Yaml file."""
        with open(filepath, "rt") as fh:
            return cls(**yaml.safe_load(fh))

    @classmethod
    def from_string(cls, s):
        """Create an istance from string s containing a YAML dictionary."""
        stream = cStringIO(s)
        stream.seek(0)
        return cls(**yaml.safe_load(stream))

    @classmethod
    def from_user_config(cls):
        """
        Initialize the :class:`PyFlowScheduler` from the YAML file 'scheduler.yml'.
        Search first in the working directory and then in the configuration directory of abipy.

        Raises:
            `RuntimeError` if file is not found.
        """
        # Try in the current directory.
        path = os.path.join(os.getcwd(), cls.YAML_FILE)

        if os.path.exists(path):
            return cls.from_file(path)

        # Try in the configuration directory.
        path = os.path.join(cls.USER_CONFIG_DIR, cls.YAML_FILE)

        if os.path.exists(path):
            return cls.from_file(path)

        raise cls.Error(
            "Cannot locate %s neither in current directory nor in %s" %
            (cls.YAML_FILE, path))

    def __str__(self):
        """String representation."""
        lines = [self.__class__.__name__ + ", Pid: %d" % self.pid]
        app = lines.append
        app("Scheduler options: %s" % str(self.sched_options))

        if self.flow is not None:
            app(80 * "=")
            app(str(self.flow))

        return "\n".join(lines)

    @property
    def pid(self):
        """The pid of the process associated to the scheduler."""
        try:
            return self._pid
        except AttributeError:
            self._pid = os.getpid()
            return self._pid

    @property
    def pid_file(self):
        """
        Absolute path of the file with the pid.
        The file is located in the workdir of the flow
        """
        return self._pid_file

    @property
    def flow(self):
        """`Flow`."""
        try:
            return self._flow
        except AttributeError:
            return None

    @property
    def num_excs(self):
        """Number of exceptions raised so far."""
        return len(self.exceptions)

    def get_delta_etime(self):
        """Returns a `timedelta` object representing with the elapsed time."""
        return timedelta(seconds=(time.time() - self.start_time))

    def add_flow(self, flow):
        """
        Add an :class:`Flow` flow to the scheduler.
        """
        if hasattr(self, "_flow"):
            raise self.Error("Only one flow can be added to the scheduler.")

        # Check if we are already using a scheduler to run this flow
        flow.check_pid_file()
        flow.set_spectator_mode(False)

        # Build dirs and files (if not yet done)
        flow.build()

        with open(flow.pid_file, "wt") as fh:
            fh.write(str(self.pid))

        self._pid_file = flow.pid_file
        self._flow = flow

    def _validate_customer_service(self):
        """
        Validate input parameters if customer service is on then
        create directory for tarball files with correct premissions for user and group.
        """
        direc = self.customer_service_dir
        if not direc.exists:
            mode = 0o750
            print("Creating customer_service_dir %s with mode %s" %
                  (direc, mode))
            direc.makedirs()
            os.chmod(direc.path, mode)

        if self.mailto is None:
            raise RuntimeError(
                "customer_service_dir requires mailto option in scheduler.yml")

    def _do_customer_service(self):
        """
        This method is called before the shutdown of the scheduler.
        If customer_service is on and the flow didn't completed successfully,
        a lightweight tarball file with inputs and the most important output files
        is created in customer_servide_dir.
        """
        if self.customer_service_dir is None: return
        doit = self.exceptions or not self.flow.all_ok
        doit = True
        if not doit: return

        prefix = os.path.basename(self.flow.workdir) + "_"

        import tempfile, datetime
        suffix = str(datetime.datetime.now()).replace(" ", "-")
        # Remove milliseconds
        i = suffix.index(".")
        if i != -1: suffix = suffix[:i]
        suffix += ".tar.gz"

        #back = os.getcwd()
        #os.chdir(self.customer_service_dir.path)

        _, tmpname = tempfile.mkstemp(suffix="_" + suffix,
                                      prefix=prefix,
                                      dir=self.customer_service_dir.path,
                                      text=False)

        print("Dear customer,\n We are about to generate a tarball in\n  %s" %
              tmpname)
        self.flow.make_light_tarfile(name=tmpname)
        #os.chdir(back)

    def start(self):
        """
        Starts the scheduler in a new thread. Returns 0 if success.
        In standalone mode, this method will block until there are no more scheduled jobs.
        """
        self.history.append("Started on %s" % time.asctime())
        self.start_time = time.time()

        if not has_apscheduler:
            raise RuntimeError("Install apscheduler with pip")

        if has_sched_v3:
            self.sched.add_job(self.callback, "interval", **self.sched_options)
        else:
            self.sched.add_interval_job(self.callback, **self.sched_options)

        errors = self.flow.look_before_you_leap()
        if errors:
            self.exceptions.append(errors)
            return 1

        # Try to run the job immediately. If something goes wrong return without initializing the scheduler.
        self._runem_all()

        if self.exceptions:
            self.cleanup()
            self.send_email(
                msg=
                "Error while trying to run the flow for the first time!\n %s" %
                self.exceptions)
            return 1

        try:
            self.sched.start()
            return 0

        except KeyboardInterrupt:
            self.shutdown(msg="KeyboardInterrupt from user")
            if ask_yesno(
                    "Do you want to cancel all the jobs in the queue? [Y/n]"):
                print("Number of jobs cancelled:", self.flow.cancel())

            self.flow.pickle_dump()
            return -1

    def _runem_all(self):
        """
        This function checks the status of all tasks,
        tries to fix tasks that went unconverged, abicritical, or queuecritical
        and tries to run all the tasks that can be submitted.+
        """
        excs = []
        flow = self.flow

        # Allow to change the manager at run-time
        if self.use_dynamic_manager:
            from pymatgen.io.abinit.tasks import TaskManager
            new_manager = TaskManager.from_user_config()
            for work in flow:
                work.set_manager(new_manager)

        nqjobs = 0
        if self.contact_resource_manager:
            # This call is expensive and therefore it's optional
            nqjobs = flow.get_njobs_in_queue()
            if nqjobs is None:
                nqjobs = 0
                if flow.manager.has_queue:
                    logger.warning('Cannot get njobs_inqueue')

            if nqjobs >= self.max_njobs_inqueue:
                print("Too many jobs in the queue: %s, returning" % nqjobs)
                return

        if self.max_nlaunches == -1:
            max_nlaunch = self.max_njobs_inqueue - nqjobs
        else:
            max_nlaunch = min(self.max_njobs_inqueue - nqjobs,
                              self.max_nlaunches)

        # check status.
        flow.check_status(show=False)

        # This check is not perfect, we should make a list of tasks to sumbit
        # and select only the subset so that we don't exceeed mac_ncores_used
        # Many sections of this code should be rewritten.
        #if self.max_ncores_used is not None and flow.ncores_used > self.max_ncores_used:
        if self.max_ncores_used is not None and flow.ncores_allocated > self.max_ncores_used:
            print("Cannot exceed max_ncores_use:d %s" % self.max_ncores_used)
            return

        # Try to restart the unconverged tasks
        # TODO: do not fire here but prepare for fireing in rapidfire
        for task in self.flow.unconverged_tasks:
            try:
                logger.info("Flow will try restart task %s" % task)
                fired = task.restart()
                if fired:
                    self.nlaunch += 1
                    max_nlaunch -= 1
                    if max_nlaunch == 0:
                        logger.info(
                            "Restart: too many jobs in the queue, returning")
                        flow.pickle_dump()
                        return

            except task.RestartError:
                excs.append(straceback())

        # Temporarily disable by MG because I don't know if fix_critical works after the
        # introduction of the new qadapters
        # reenabled by MsS disable things that do not work at low level
        # fix only prepares for restarting, and sets to ready
        if self.fix_qcritical:
            nfixed = flow.fix_queue_critical()
            if nfixed: print("Fixed %d QCritical error(s)" % nfixed)

        nfixed = flow.fix_abicritical()
        if nfixed: print("Fixed %d AbiCritical error(s)" % nfixed)

        # update database
        flow.pickle_dump()

        # Submit the tasks that are ready.
        try:
            nlaunch = PyLauncher(flow).rapidfire(max_nlaunch=max_nlaunch,
                                                 sleep_time=10)
            self.nlaunch += nlaunch

            if nlaunch:
                print("[%s] Number of launches: %d" %
                      (time.asctime(), nlaunch))

        except Exception:
            excs.append(straceback())

        # check status.
        flow.show_status()

        if excs:
            logger.critical("*** Scheduler exceptions:\n *** %s" %
                            "\n".join(excs))
            self.exceptions.extend(excs)

    def callback(self):
        """The function that will be executed by the scheduler."""
        try:
            return self._callback()
        except:
            # All exceptions raised here will trigger the shutdown!
            s = straceback()
            self.exceptions.append(s)

            # This is useful when debugging
            #try:
            #    print("Exception in callback, will cancel all tasks")
            #    for task in self.flow.iflat_tasks():
            #        task.cancel()
            #except Exception:
            #    pass

            self.shutdown(msg="Exception raised in callback!\n" + s)

    def _callback(self):
        """The actual callback."""
        if self.debug:
            # Show the number of open file descriptors
            print(">>>>> _callback: Number of open file descriptors: %s" %
                  get_open_fds())

        self._runem_all()

        # Mission accomplished. Shutdown the scheduler.
        all_ok = self.flow.all_ok
        if all_ok:
            return self.shutdown(
                msg=
                "All tasks have reached S_OK. Will shutdown the scheduler and exit"
            )

        # Handle failures.
        err_lines = []

        # Shall we send a reminder to the user?
        delta_etime = self.get_delta_etime()

        if delta_etime.total_seconds() > self.num_reminders * self.remindme_s:
            self.num_reminders += 1
            msg = (
                "Just to remind you that the scheduler with pid %s, flow %s\n has been running for %s "
                % (self.pid, self.flow, delta_etime))
            retcode = self.send_email(msg, tag="[REMINDER]")

            if retcode:
                # Cannot send mail, shutdown now!
                msg += (
                    "\nThe scheduler tried to send an e-mail to remind the user\n"
                    + " but send_email returned %d. Aborting now" % retcode)
                err_lines.append(msg)

        #if delta_etime.total_seconds() > self.max_etime_s:
        #    err_lines.append("\nExceeded max_etime_s %s. Will shutdown the scheduler and exit" % self.max_etime_s)

        # Too many exceptions. Shutdown the scheduler.
        if self.num_excs > self.max_num_pyexcs:
            msg = "Number of exceptions %s > %s. Will shutdown the scheduler and exit" % (
                self.num_excs, self.max_num_pyexcs)
            err_lines.append(boxed(msg))

        # Paranoid check: disable the scheduler if we have submitted
        # too many jobs (it might be due to some bug or other external reasons
        # such as race conditions between difference callbacks!)
        if self.nlaunch > self.safety_ratio * self.flow.num_tasks:
            msg = "Too many jobs launched %d. Total number of tasks = %s, Will shutdown the scheduler and exit" % (
                self.nlaunch, self.flow.num_tasks)
            err_lines.append(boxed(msg))

        # Count the number of tasks with status == S_ERROR.
        if self.flow.num_errored_tasks > self.max_num_abierrs:
            msg = "Number of tasks with ERROR status %s > %s. Will shutdown the scheduler and exit" % (
                self.flow.num_errored_tasks, self.max_num_abierrs)
            err_lines.append(boxed(msg))

        # Test on the presence of deadlocks.
        g = self.flow.find_deadlocks()
        if g.deadlocked:
            # Check the flow again so that status are updated.
            self.flow.check_status()

            g = self.flow.find_deadlocks()
            print("deadlocked:\n", g.deadlocked, "\nrunnables:\n", g.runnables,
                  "\nrunning\n", g.running)
            if g.deadlocked and not g.runnables and not g.running:
                err_lines.append(
                    "No runnable job with deadlocked tasks:\n%s." %
                    str(g.deadlocked))

        if not g.runnables and not g.running:
            # Check the flow again so that status are updated.
            self.flow.check_status()
            g = self.flow.find_deadlocks()
            if not g.runnables and not g.running:
                err_lines.append(
                    "No task is running and cannot find other tasks to submit."
                )

        # Something wrong. Quit
        if err_lines:
            # Cancel all jobs.
            if self.killjobs_if_errors:
                cprint(
                    "killjobs_if_errors set to 'yes' in scheduler file. Will kill jobs before exiting.",
                    "yellow")
                try:
                    num_cancelled = 0
                    for task in self.flow.iflat_tasks():
                        num_cancelled += task.cancel()
                    cprint("Killed %d tasks" % num_cancelled, "yellow")
                except Exception as exc:
                    cprint(
                        "Exception while trying to kill jobs:\n%s" % str(exc),
                        "red")

            self.shutdown("\n".join(err_lines))

        return len(self.exceptions)

    def cleanup(self):
        """Cleanup routine: remove the pid file and save the pickle database"""
        try:
            os.remove(self.pid_file)
        except OSError as exc:
            logger.critical("Could not remove pid_file: %s", exc)

        # Save the final status of the flow.
        self.flow.pickle_dump()

    def shutdown(self, msg):
        """Shutdown the scheduler."""
        try:
            self.cleanup()

            self.history.append("Completed on: %s" % time.asctime())
            self.history.append("Elapsed time: %s" % self.get_delta_etime())

            if self.debug:
                print(">>>>> shutdown: Number of open file descriptors: %s" %
                      get_open_fds())

            retcode = self.send_email(msg)
            if self.debug:
                print("send_mail retcode", retcode)

            # Write file with the list of exceptions:
            if self.exceptions:
                dump_file = os.path.join(self.flow.workdir, "_exceptions")
                with open(dump_file, "wt") as fh:
                    fh.writelines(self.exceptions)
                    fh.write("Shutdown message:\n%s" % msg)

            lines = []
            app = lines.append
            app("Submitted on: %s" % time.ctime(self.start_time))
            app("Completed on: %s" % time.asctime())
            app("Elapsed time: %s" % str(self.get_delta_etime()))

            if self.flow.all_ok:
                app("Flow completed successfully")
            else:
                app("Flow %s didn't complete successfully" %
                    repr(self.flow.workdir))
                app("use `abirun.py FLOWDIR debug` to analyze the problem.")
                app("Shutdown message:\n%s" % msg)

            print("")
            print("\n".join(lines))
            print("")

            self._do_customer_service()

            if self.flow.all_ok:
                print("Calling flow.finalize()...")
                self.flow.finalize()
                #print("finalized:", self.flow.finalized)
                if self.rmflow:
                    app("Flow directory will be removed...")
                    try:
                        self.flow.rmtree()
                    except Exception:
                        logger.warning(
                            "Ignoring exception while trying to remove flow dir."
                        )

        finally:
            # Shutdown the scheduler thus allowing the process to exit.
            logger.debug('This should be the shutdown of the scheduler')

            # Unschedule all the jobs before calling shutdown
            #self.sched.print_jobs()
            if not has_sched_v3:
                for job in self.sched.get_jobs():
                    self.sched.unschedule_job(job)
            #self.sched.print_jobs()

            self.sched.shutdown()
            # Uncomment the line below if shutdown does not work!
            #os.system("kill -9 %d" % os.getpid())

    def send_email(self, msg, tag=None):
        """
        Send an e-mail before completing the shutdown.
        Returns 0 if success.
        """
        try:
            return self._send_email(msg, tag)
        except:
            self.exceptions.append(straceback())
            return -2

    def _send_email(self, msg, tag):
        if self.mailto is None:
            return -1

        header = msg.splitlines()
        app = header.append

        app("Submitted on: %s" % time.ctime(self.start_time))
        app("Completed on: %s" % time.asctime())
        app("Elapsed time: %s" % str(self.get_delta_etime()))
        app("Number of errored tasks: %d" % self.flow.num_errored_tasks)
        app("Number of unconverged tasks: %d" %
            self.flow.num_unconverged_tasks)

        strio = cStringIO()
        strio.writelines("\n".join(header) + 4 * "\n")

        # Add the status of the flow.
        self.flow.show_status(stream=strio)

        if self.exceptions:
            # Report the list of exceptions.
            strio.writelines(self.exceptions)

        if tag is None:
            tag = " [ALL OK]" if self.flow.all_ok else " [WARNING]"

        return sendmail(subject=self.flow.name + tag,
                        text=strio.getvalue(),
                        mailto=self.mailto)
Exemple #7
0
class PyFlowScheduler(object):
    """
    This object schedules the submission of the tasks in an :class:`Flow`.
    There are two types of errors that might occur during the execution of the jobs:

        #. Python exceptions
        #. Abinit Errors.

    Python exceptions are easy to detect and are usually due to a bug in abinitio or random errors such as IOError.
    The set of Abinit Errors is much much broader. It includes wrong input data, segmentation
    faults, problems with the resource manager, etc. Abinitio tries to handle the most common cases
    but there's still a lot of room for improvement.
    Note, in particular, that `PyFlowScheduler` will shutdown automatically if

        #. The number of python exceptions is > MAX_NUM_PYEXC

        #. The number of Abinit Errors (i.e. the number of tasks whose status is S_ERROR) is > MAX_NUM_ERRORS

        #. The number of jobs launched becomes greater than (SAFETY_RATIO * total_number_of_tasks).

        #. The scheduler will send an email to the user (specified by mailto) every REMINDME_S seconds.
           If the mail cannot be sent, it will shutdown automatically.
           This check prevents the scheduler from being trapped in an infinite loop.
    """
    # Configuration file.
    YAML_FILE = "scheduler.yml"
    USER_CONFIG_DIR = os.path.join(os.getenv("HOME"), ".abinit", "abipy")

    DEBUG = 0

    Error = PyFlowSchedulerError

    def __init__(self, **kwargs):
        """
        Args:
            weeks: number of weeks to wait
            days: number of days to wait
            hours: number of hours to wait
            minutes: number of minutes to wait
            seconds: number of seconds to wait
            verbose: (int) verbosity level
            max_njobs_inque: Limit on the number of jobs that can be present in the queue
            use_dynamic_manager: True if the :class:`TaskManager` must be re-initialized from
                file before launching the jobs. Default: False
            max_nlaunch: Maximum number of tasks launched by radpifire (default -1 i.e. no limit)
        """
        # Options passed to the scheduler.
        self.sched_options = AttrDict(
            weeks=kwargs.pop("weeks", 0),
            days=kwargs.pop("days", 0),
            hours=kwargs.pop("hours", 0),
            minutes=kwargs.pop("minutes", 0),
            seconds=kwargs.pop("seconds", 0),
            #start_date=kwargs.pop("start_date", None),
        )

        if all(not v for v in self.sched_options.values()):
            raise self.Error("Wrong set of options passed to the scheduler.")

        self.mailto = kwargs.pop("mailto", None)
        self.verbose = int(kwargs.pop("verbose", 0))
        self.use_dynamic_manager = kwargs.pop("use_dynamic_manager", False)
        self.max_njobs_inqueue = kwargs.pop("max_njobs_inqueue", 200)

        self.REMINDME_S = float(kwargs.pop("REMINDME_S", 4 * 24 * 3600))
        self.MAX_NUM_PYEXCS = int(kwargs.pop("MAX_NUM_PYEXCS", 0))
        self.MAX_NUM_ABIERRS = int(kwargs.pop("MAX_NUM_ABIERRS", 0))
        self.SAFETY_RATIO = int(kwargs.pop("SAFETY_RATIO", 5))
        #self.MAX_ETIME_S = kwargs.pop("MAX_ETIME_S", )
        self.max_nlaunch = kwargs.pop("max_nlaunch", -1)

        if kwargs:
            raise self.Error("Unknown arguments %s" % kwargs)

        if has_sched_v3:
            from apscheduler.schedulers.blocking import BlockingScheduler
            self.sched = BlockingScheduler()
        else:
            from apscheduler.scheduler import Scheduler
            self.sched = Scheduler(standalone=True)

        self.nlaunch = 0
        self.num_reminders = 1

        # Used to keep track of the exceptions raised while the scheduler is running
        self.exceptions = collections.deque(maxlen=self.MAX_NUM_PYEXCS + 10)

        # Used to push additional info during the execution.
        self.history = collections.deque(maxlen=100)

    @classmethod
    def from_file(cls, filepath):
        """Read the configuration parameters from a Yaml file."""
        with open(filepath, "r") as fh:
            return cls(**yaml.load(fh))

    @classmethod
    def from_string(cls, s):
        """Create an istance from string s containing a YAML dictionary."""
        stream = cStringIO(s)
        stream.seek(0)

        return cls(**yaml.load(stream))

    @classmethod
    def from_user_config(cls):
        """
        Initialize the :class:`PyFlowScheduler` from the YAML file 'scheduler.yml'.
        Search first in the working directory and then in the configuration directory of abipy.

        Raises:
            RuntimeError if file is not found.
        """
        # Try in the current directory.
        path = os.path.join(os.getcwd(), cls.YAML_FILE)

        if os.path.exists(path):
            return cls.from_file(path)

        # Try in the configuration directory.
        path = os.path.join(cls.USER_CONFIG_DIR, cls.YAML_FILE)

        if os.path.exists(path):
            return cls.from_file(path)

        err_msg = "Cannot locate %s neither in current directory nor in %s" % (
            cls.YAML_FILE, path)
        raise cls.Error(err_msg)

    def __str__(self):
        """String representation."""
        lines = [self.__class__.__name__ + ", Pid: %d" % self.pid]
        app = lines.append

        app("Scheduler options: %s" % str(self.sched_options))
        app(80 * "=")
        app(str(self.flow))

        return "\n".join(lines)

    @property
    def pid(self):
        """The pid of the process associated to the scheduler."""
        try:
            return self._pid

        except AttributeError:
            self._pid = os.getpid()
            return self._pid

    @property
    def pid_file(self):
        """
        Absolute path of the file with the pid.
        The file is located in the workdir of the flow
        """
        return self._pid_file

    @property
    def flow(self):
        """`Flow`."""
        return self._flow

    @property
    def num_excs(self):
        """Number of exceptions raised so far."""
        return len(self.exceptions)

    def get_delta_etime(self):
        """Returns a `timedelta` object representing with the elapsed time."""
        return timedelta(seconds=(time.time() - self.start_time))

    def add_flow(self, flow):
        """Add an :class:`Flow` flow to the scheduler."""
        if hasattr(self, "_flow"):
            raise self.Error("Only one flow can be added to the scheduler.")

        pid_file = os.path.join(flow.workdir, "_PyFlowScheduler.pid")

        if os.path.isfile(pid_file):
            flow.show_status()

            err_msg = ("""
                pid_file %s already exists
                There are two possibilities:

                   1) There's an another instance of PyFlowScheduler running
                   2) The previous scheduler didn't exit in a clean way

                To solve case 1:
                   Kill the previous scheduler (use 'kill pid' where pid is the number reported in the file)
                   Then you can restart the new scheduler.

                To solve case 2:
                   Remove the pid_file and restart the scheduler.

                Exiting""" % pid_file)

            raise self.Error(err_msg)

        with open(pid_file, "w") as fh:
            fh.write(str(self.pid))

        self._pid_file = pid_file
        self._flow = flow

    def start(self):
        """
        Starts the scheduler in a new thread. Returns True if success.
        In standalone mode, this method will block until there are no more scheduled jobs.
        """
        self.history.append("Started on %s" % time.asctime())
        self.start_time = time.time()

        if has_sched_v3:
            self.sched.add_job(self.callback, "interval", **self.sched_options)
        else:
            self.sched.add_interval_job(self.callback, **self.sched_options)

        errors = self.flow.look_before_you_leap()
        if errors:
            print(errors)
            self.exceptions.append(errors)
            return False

        # Try to run the job immediately. If something goes wrong return without initializing the scheduler.
        self._runem_all()

        if self.exceptions:
            self.cleanup()
            self.send_email(
                msg=
                "Error while trying to run the flow for the first time!\n %s" %
                self.exceptions)
            return False

        self.sched.start()
        return True

    def _runem_all(self):
        """
        This function checks the status of all tasks,
        tries to fix tasks that went unconverged, abicritical, or queuecritical
        and tries to run all the tasks that can be submitted.+
        """
        excs = []
        flow = self.flow

        # Allow to change the manager at run-time
        if self.use_dynamic_manager:
            from pymatgen.io.abinitio.tasks import TaskManager
            new_manager = TaskManager.from_user_config()
            for work in flow:
                work.set_manager(new_manager)

        nqjobs = flow.get_njobs_in_queue()
        if nqjobs is None:
            nqjobs = 0
            print('Cannot get njobs_inqueue')

        if nqjobs >= self.max_njobs_inqueue:
            print("Too many jobs in the queue, returning")
            return

        if self.max_nlaunch == -1:
            max_nlaunch = self.max_njobs_inqueue - nqjobs
        else:
            max_nlaunch = min(self.max_njobs_inqueue - nqjobs,
                              self.max_nlaunch)

        # check status
        flow.check_status()
        flow.show_status()

        # fix problems
        # Try to restart the unconverged tasks
        # todo donot fire here but prepare for fireing in rapidfire
        for task in self.flow.unconverged_tasks:
            try:
                logger.info("Flow will try restart task %s" % task)
                fired = task.restart()
                if fired:
                    self.nlaunch += 1
                    max_nlaunch -= 1
                    if max_nlaunch == 0:
                        print("Restart: too many jobs in the queue, returning")
                        flow.pickle_dump()
                        return
            except Exception:
                excs.append(straceback())

        # move here from withing rapid fire ...
        # fix only prepares for restarting, and sets to ready
        flow.fix_critical()

        # update database
        flow.pickle_dump()

        #if self.num_restarts == self.max_num_restarts:
        #    info_msg = "Reached maximum number of restarts. Cannot restart anymore Returning"
        #    logger.info(info_msg)
        #    self.history.append(info_msg)
        #    return 1

        # Submit the tasks that are ready.
        try:
            nlaunch = PyLauncher(flow).rapidfire(max_nlaunch=max_nlaunch,
                                                 sleep_time=10)
            self.nlaunch += nlaunch

            if nlaunch:
                print("[%s] Number of launches: %d" %
                      (time.asctime(), nlaunch))

        except Exception:
            excs.append(straceback())

        flow.show_status()

        if excs:
            logger.critical("*** Scheduler exceptions:\n *** %s" %
                            "\n".join(excs))
            self.exceptions.extend(excs)

    def callback(self):
        """The function that will be executed by the scheduler."""
        try:
            return self._callback()
        except:
            # All exceptions raised here will trigger the shutdown!
            self.exceptions.append(straceback())
            self.shutdown(msg="Exception raised in callback!")

    def _callback(self):
        """The actual callback."""
        if self.DEBUG:
            # Show the number of open file descriptors
            print(">>>>> _callback: Number of open file descriptors: %s" %
                  get_open_fds())
        #print('before _runem_all in _callback')

        self._runem_all()

        # Mission accomplished. Shutdown the scheduler.
        all_ok = self.flow.all_ok
        if self.verbose:
            print("all_ok", all_ok)

        if all_ok:
            self.shutdown(
                msg=
                "All tasks have reached S_OK. Will shutdown the scheduler and exit"
            )

        # Handle failures.
        err_msg = ""

        # Shall we send a reminder to the user?
        delta_etime = self.get_delta_etime()

        if delta_etime.total_seconds() > self.num_reminders * self.REMINDME_S:
            self.num_reminders += 1
            msg = (
                "Just to remind you that the scheduler with pid %s, flow %s\n has been running for %s "
                % (self.pid, self.flow, delta_etime))
            retcode = self.send_email(msg, tag="[REMINDER]")

            if retcode:
                # Cannot send mail, shutdown now!
                msg += (
                    "\nThe scheduler tried to send an e-mail to remind the user\n"
                    + " but send_email returned %d. Aborting now" % retcode)
                err_msg += msg

        #if delta_etime.total_seconds() > self.MAX_ETIME_S:
        #    err_msg += "\nExceeded MAX_ETIME_S %s. Will shutdown the scheduler and exit" % self.MAX_ETIME_S

        # Too many exceptions. Shutdown the scheduler.
        if self.num_excs > self.MAX_NUM_PYEXCS:
            msg = "Number of exceptions %s > %s. Will shutdown the scheduler and exit" % (
                self.num_excs, self.MAX_NUM_PYEXCS)
            err_msg += boxed(msg)

        # Paranoid check: disable the scheduler if we have submitted
        # too many jobs (it might be due to some bug or other external reasons
        # such as race conditions between difference callbacks!)
        if self.nlaunch > self.SAFETY_RATIO * self.flow.num_tasks:
            msg = "Too many jobs launched %d. Total number of tasks = %s, Will shutdown the scheduler and exit" % (
                self.nlaunch, self.flow.num_tasks)
            err_msg += boxed(msg)

        # Count the number of tasks with status == S_ERROR.
        if self.flow.num_errored_tasks > self.MAX_NUM_ABIERRS:
            msg = "Number of tasks with ERROR status %s > %s. Will shutdown the scheduler and exit" % (
                self.flow.num_errored_tasks, self.MAX_NUM_ABIERRS)
            err_msg += boxed(msg)

        # Count the number of tasks with status == S_UNCONVERGED.
        #if self.flow.num_unconverged_tasks:
        #    # TODO: this is needed to avoid deadlocks, automatic restarting is not available yet
        #    msg = ("Found %d unconverged tasks."
        #           "Automatic restarting is not available yet. Will shutdown the scheduler and exit"
        #           % self.flow.num_unconverged_tasks)
        #    err_msg += boxed(msg)

        #deadlocks = self.detect_deadlocks()
        #if deadlocks:
        #    msg = ("Detected deadlocks in flow. Will shutdown the scheduler and exit"
        #           % self.flow.num_unconverged_tasks)
        #    err_msg += boxed(msg)

        if err_msg:
            # Something wrong. Quit
            self.shutdown(err_msg)

        return len(self.exceptions)

    def cleanup(self):
        """Cleanup routine: remove the pid file and save the pickle database"""
        try:
            os.remove(self.pid_file)
        except OSError:
            logger.critical("Could not remove pid_file")
            pass

        # Save the final status of the flow.
        self.flow.pickle_dump()

    def shutdown(self, msg):
        """Shutdown the scheduler."""
        try:
            self.cleanup()

            #if False and self.flow.has_db:
            #    try:
            #        self.flow.db_insert()
            #    except Exception:
            #         logger.critical("MongoDb insertion failed.")

            self.history.append("Completed on %s" % time.asctime())
            self.history.append("Elapsed time %s" % self.get_delta_etime())

            if self.DEBUG:
                print(">>>>> shutdown: Number of open file descriptors: %s" %
                      get_open_fds())

            retcode = self.send_email(msg)
            if self.DEBUG:
                print("send_mail retcode", retcode)

            # Write file with the list of exceptions:
            if self.exceptions:
                dump_file = os.path.join(self.flow.workdir, "_exceptions")
                with open(dump_file, "w") as fh:
                    fh.writelines(self.exceptions)
                    fh.write("Shutdown message:\n%s" % msg)

        finally:
            # Shutdown the scheduler thus allowing the process to exit.
            print('this should be the shutdown of the scheduler')

            # Unschedule all the jobs before calling shutdown
            self.sched.print_jobs()
            for job in self.sched.get_jobs():
                self.sched.unschedule_job(job)
            self.sched.print_jobs()

            self.sched.shutdown()
            # Uncomment the line below if shutdown does not work!
            #os.system("kill -9 %d" % os.getpid())

    def send_email(self, msg, tag=None):
        """
        Send an e-mail before completing the shutdown.
        Returns 0 if success.
        """
        try:
            return self._send_email(msg, tag)
        except:
            self.exceptions.append(straceback())
            return -2

    def _send_email(self, msg, tag):
        if self.mailto is None:
            return -1

        header = msg.splitlines()
        app = header.append

        app("Submitted on %s" % time.ctime(self.start_time))
        app("Completed on %s" % time.asctime())
        app("Elapsed time %s" % str(self.get_delta_etime()))
        app("Number of errored tasks: %d" % self.flow.num_errored_tasks)
        app("Number of unconverged tasks: %d" %
            self.flow.num_unconverged_tasks)

        strio = cStringIO()
        strio.writelines("\n".join(header) + 4 * "\n")

        # Add the status of the flow.
        self.flow.show_status(stream=strio)

        if self.exceptions:
            # Report the list of exceptions.
            strio.writelines(self.exceptions)

        if tag is None:
            tag = " [ALL OK]" if self.flow.all_ok else " [WARNING]"

        return sendmail(subject=self.flow.name + tag,
                        text=strio.getvalue(),
                        mailto=self.mailto)
        else:
            # send sun (default) and send message for me
            bot.send_message(shut_down_alert, "description to add: {0}".format(main_current))

        bot.send_message(dest, emoji.emojize("{0} - {1} \nTemperatura maxima: {2} C\nTemperatura minima: {3} C\nUmidade de {4}%\n\n==============\nCreated by @JGabrielFreitas\nPowered by OpenWeather API".format(emoji_to_show, description, temp_max, temp_min, humidity))) # dest and msg
        # print emoji.emojize("{0} - {1} \nTemperatura maxima: {2} C\nTemperatura minima: {3} C\nUmidade de {4}%\n\n==============\nCreated by @JGabrielFreitas\nPowered by OpenWeather API".format(emoji_to_show, description, temp_max, temp_min, humidity))
    except:
        bot.send_message(shut_down_alert, "Dude, your bot is down...:\n\n{0}".format(traceback.format_exc()))

# @sched.interval_schedule()
def timed_job():
    print('This job is run every one minutes.')
    job()

# @sched.scheduled_job('cron', day_of_week='mon-fri', hour=17)
# def scheduled_job():
#     print('This job is run every weekday at 5pm.')
#     job()


# run jobs
# schedule.every().hour.do(job)
# schedule.every(1).minutes.do(job)
#
# while True:
#     schedule.run_pending()
    # time.sleep(1)
# job()
sched.add_job(timed_job, 'interval', seconds=10, id="timed_job")
sched.start()
Exemple #9
0
class SchedulerContainer( DaemonContainer ):
    
    def __init__(self, environment):
        super(Scheduler, self).__init__(environment)
        gconfig = environment.get("gconfig", {})
        options = environment.get("options", {})
        self.scheduler = Scheduler(gconfig, **options)
    
    def on_start(self):
        self.scheduler.start()
    
    def on_stop(self):
        self.scheduler.stop()
        
    def unschedule_func(self, func):
        self.scheduler.unschedule_func(func)
    
    def unschedule_job(self, job):
        self.scheduler.unschedule_job(job)
        
        
    def add_interval_job(self, func, weeks=0, days=0, hours=0, minutes=0, seconds=0, start_date=None, args=None, kwargs=None, **options):
        return self.scheduler.add_interval_job(func=func, 
                                        weeks=weeks, 
                                        days=days, 
                                        hours=hours, 
                                        minutes=minutes, 
                                        seconds=seconds, 
                                        start_date=start_date, 
                                        args=args, 
                                        kwargs=kwargs, 
                                        **options)
        
    def add_cron_job(self, func, year=None, month=None, day=None, week=None, day_of_week=None, hour=None, minute=None, second=None, start_date=None, args=None, kwargs=None, **options):
        return self.scheduler.add_cron_job(func=func, 
                                    year=year, 
                                    month=month, 
                                    day=day, 
                                    week=week, 
                                    day_of_week=day_of_week, 
                                    hour=hour, 
                                    minute=minute, 
                                    second=second, 
                                    start_date=start_date, 
                                    args=args, 
                                    kwargs=kwargs,
                                    **options)
    
    def add_date_job(self, func, date, args=None, kwargs=None, **options):
        return self.scheduler.add_date_job(func=func, 
                                    date=date, 
                                    args=args, 
                                    kwargs=kwargs,
                                    **options)
    
    def get_jobs(self):
        return self.scheduler.get_jobs()
    
    def add_job(self, trigger, func, args, kwargs, jobstore='default', **options):
        return self.scheduler.add_job(trigger=trigger, 
                                      func=func, 
                                      args=args, 
                                      kwargs=kwargs, 
                                      jobstore=jobstore,
                                      **options)
        
    def add_listener(self, callback, mask):
        self.scheduler.add_listener(callback, mask)
    
    def remove_listener(self, callback):
        self.scheduler.remove_listener(callback)
Exemple #10
0
class FreshList(object):
  ''' A dictionary of fresh StateObjects maintained in soft state fashion. (*NOTE: perhaps should rename it to FreshDict or something less misleading*).
  Timedout StateOjbect will be reaped, and a reap callback will to applied on them before reaping.
  A function that refresh the local user will be called periodically (*This SHOULD be refactored out of this class*).

  This list is protected by a recurisve lock for operation (both read and write).
  '''
  _logger = Logger.get_logger('FreshList')

  reap_interval = StateObject.default_ttl * 2
  def __init__(self, refresh_func, reap_callback, *args, **kwargs):
    '''
    Args:
      refresh_func: Callback function to refresh local user.
      reap_callback: Callback function to be applied to the StateObject to be reaped.
    '''
    super(FreshList, self).__init__()
    self.instances = dict() 
    self.refresh_func = refresh_func
    self.reap_callback = reap_callback
    # every operation to self.instances should grab self.__rlock first
    self.__rlock = RLock()
    self.scheduler = Scheduler()
    self.scheduler.start()

    # schedule periodic reap 
    reap_interval = FreshList.reap_interval
    self.scheduler.add_job(RandomizedIntervalTrigger(timedelta(seconds = reap_interval), randomize = lambda: uniform(0, reap_interval / 4)), self.reap, None, None, **{})

    # schedule refresh self 
    refresh_interval = 0.7 * StateObject.default_ttl
    self.scheduler.add_job(RandomizedIntervalTrigger(timedelta(seconds = refresh_interval), randomize = lambda: uniform(0, refresh_interval / 4)), self.refresh_func, None, None, **{})

  def schedule_next(self, interval, func):
    '''A convenience function to schedule a task.

    Args:
      interval (float): The time from now to execute the task in seconds.
      func: The task to be executed.
    '''
    self.scheduler.add_date_job(func, datetime.now() + timedelta(seconds = interval))

  def shutdown(self, wait = False):
    '''Shutdown the scheduler of this object.
    '''
    self.scheduler.shutdown(wait = False)

  def reap(self):
    '''Checks the StateObjects and reap the ones that are timed out.
    '''
    with self.__rlock:
      zombies = filter(lambda(k, state_object): not state_object.is_active(), self.instances.iteritems())
      self.instances = dict(filter(lambda (k, state_object): state_object.is_active(), self.instances.iteritems()))
    map(lambda(k, state_object): self.reap_callback(state_object), zombies)
    
  def announce_received(self, k):
    '''Refresh stateobject when a refreshing announcement has been received for the object

    Args:
      k : the key for the state object to be refreshed
    '''
    with self.__rlock:
      self.instances[k].refresh_timestamp()

  def __getitem__(self, k):
    with self.__rlock:
      return self.instances.get(k, None)

  def __setitem__(self, k, state_object):
    with self.__rlock:
      self.instances[k] = state_object
    
  def __delitem__(self, k):
    with self.__rlock:
      try:
        del self.instances[k]
      except KeyError as e:
        FreshList._logger.exception("Try to del non-exist state object")
        raise e

  def __len__(self):
    with self.__rlock:
      return len(self.instances)

  def clear(self):
    '''Clear the dict.
    '''
    with self.__rlock:
      self.instances.clear()

  def has_key(self, key):
    '''Check whether the key exists in the current dict
    '''
    with self.__rlock:
      return self.instances.has_key(key)

  def keys(self):
    '''Get all keys.
    '''
    with self.__rlock:
      return self.instances.keys()
    
  def values(self):
    '''Get all values (stateobjects).
    '''
    with self.__rlock:
      return self.instances.values()

  def items(self):
    '''Get the key-value pairs.
    '''
    with self.__rlock:
      return self.instances.items()
                .format(emoji_to_show, description, temp_max, temp_min,
                        humidity)))  # dest and msg
        # print emoji.emojize("{0} - {1} \nTemperatura maxima: {2} C\nTemperatura minima: {3} C\nUmidade de {4}%\n\n==============\nCreated by @JGabrielFreitas\nPowered by OpenWeather API".format(emoji_to_show, description, temp_max, temp_min, humidity))
    except:
        bot.send_message(
            shut_down_alert,
            "Dude, your bot is down...:\n\n{0}".format(traceback.format_exc()))


# @sched.interval_schedule()
def timed_job():
    print('This job is run every one minutes.')
    job()


# @sched.scheduled_job('cron', day_of_week='mon-fri', hour=17)
# def scheduled_job():
#     print('This job is run every weekday at 5pm.')
#     job()

# run jobs
# schedule.every().hour.do(job)
# schedule.every(1).minutes.do(job)
#
# while True:
#     schedule.run_pending()
# time.sleep(1)
# job()
sched.add_job(timed_job, 'interval', seconds=10, id="timed_job")
sched.start()