Exemple #1
0
def main():
    init_config()  # 初始化配置
    """ 基于quartz的定时任务调度器 """
    scheduler = BlockingScheduler()
    """ FIELD_NAMES = ('year', 'month', 'day', 'week', 'day_of_week', 'hour', 'minute', 'second') 
        没用过quartz的同学去了解下CRON表达式~文档如下:
        https://apscheduler.readthedocs.io/en/stable/modules/triggers/cron.html#module-apscheduler.triggers.cron
    """
    year = str(gol.get_value('year') or None)
    month = str(gol.get_value('month') or None)
    day = str(gol.get_value('day') or None)
    week = str(gol.get_value('week') or None)
    day_of_week = str(gol.get_value('day_of_week') or None)
    hour = str(gol.get_value('hour') or None)
    minute = str(gol.get_value('minute') or None)
    second = str(gol.get_value('second') or None)
    scheduler.add_job(auto_job,
                      'cron',
                      year=year,
                      month=month,
                      day=day,
                      week=week,
                      day_of_week=day_of_week,
                      hour=hour,
                      minute=minute,
                      second=second,
                      id='auto_job_id')
    try:
        print('程序开始!-------->Job running(Tips: Ctrl + c 终止程序)')
        scheduler.start()
    except (KeyboardInterrupt, SystemExit):
        print('退出程序!')
        scheduler.remove_job('auto_job_id')
Exemple #2
0
def main():
    # 基于quartz的定时任务调度器
    scheduler = BlockingScheduler()
    """ FIELD_NAMES = ('year', 'month', 'day', 'week', 'day_of_week', 'hour', 'minute', 'second') """
    scheduler.add_job(auto_job, 'cron', second='0/2', id='my_job_id')
    try:
        scheduler.start()
    except (KeyboardInterrupt, SystemExit):
        scheduler.remove_job('my_job_id')
Exemple #3
0
class Main(KytosNApp):
    """Main class of amlight/scheduler NApp.

    This class is the entry point for this napp.
    """

    def setup(self):
        """Replace the '__init__' method for the KytosNApp subclass.

        The setup method is automatically called by the controller when your
        application is loaded.

        So, if you have any setup routine, insert it here.
        """
        self._scheduler = BlockingScheduler()

    def execute(self):
        """This method is executed right after the setup method execution.

        You can also use this method in loop mode if you add to the above setup
        method a line like the following example:

            self.execute_as_loop(30)  # 30-second interval.
        """
        self._scheduler.start()

    def shutdown(self):
        """This method is executed when your napp is unloaded.

        If you have some cleanup procedure, insert it here.
        """
        self._scheduler.shutdown()

    @listen_to('amlight/scheduler.add_job')
    def add_job(self, event):
        """Add a job to this scheduler."""
        try:
            job_id = event.content['id']
        except KeyError:
            log.error('Scheduled job must have an id')
            return

        func = event.content['func']
        kwargs = event.content['kwargs']
        self._scheduler.add_job(func, id=job_id, **kwargs)

    @listen_to('amlight/scheduler.remove_job')
    def remove_job(self, event):
        """Remove a job from this scheduler."""
        try:
            job_id = event.content['id']
        except KeyError:
            log.error('Scheduled job must have an id')
            return

        self._scheduler.remove_job(job_id)
Exemple #4
0
class ActionManager:
    def __init__(self):
        self.actions = []
        self.curAction = None
        self.id = "ActionManager_id" + str(time.time())

        self.sched = BlockingScheduler()

    def destroy(self):
        self.sched.remove_job(self.id)
        self.sched.shutdown()

    def start(self):
        # self.sched.add_job(self.tick, 'interval', seconds=0.1, max_instances=10, id=self.id,
        #                    args={self.swipe_speed})
        self.sched.add_job(self.tick, 'interval', seconds=0.1, max_instances=1, id=self.id)
        self.sched.start()
        print("ActionManager::start.....")

    def addAction(self,action):
        self.actions.append(action)

    def _delAction(self,action):
        action.exit()

    def tick(self):

        if self.curAction:
            if self.curAction.finished == True:
                self.curAction.exit()
                self.curAction = None

        if self.curAction == None:
            if len(self.actions) > 0:
                self.curAction = self.actions.pop(0)
                self.curAction.enter()

        if self.curAction and self.curAction.running == True:
            self.curAction.tick()

        print("act mgr tick.....end")
Exemple #5
0
class SchedulerTest:
    def __init__(self):
        self.scheduler = BlockingScheduler()

    def test(self):
        attend_time = {"id": 5, "times_id": "8"}
        id = None
        for i in range(10):
            attend_time["id"] = i
            attend_time["times_id"] = "{}".format(i)
            id = attend_time.get("times_id")
            self.scheduler.add_job(start_work_test,
                                   'cron',
                                   kwargs=attend_time,
                                   second='*/3',
                                   hour='*',
                                   id=id)
        self.scheduler.start()

    def stop(self, id):
        self.scheduler.remove_job(job_id=id)
	except Exception, e:
		print(e)
		traceback.print_exc()

	print "[Scheduled task DONE] Sent daily subscription"


logging.basicConfig()
scheduler = BlockingScheduler()
scheduler.add_executor('threadpool')
if 'SCHED_HOUR' in os.environ:
	scheduler_hour = int(os.environ['SCHED_HOUR'])
else:
	scheduler_hour = 20

if 'SCHED_MIN' in os.environ:
	scheduler_min = int(os.environ['SCHED_MIN'])
else:
	scheduler_min = 48

job = scheduler.add_job(send_daily_subscription, 'cron', hour=scheduler_hour, minute=scheduler_min, id='job1')

try:
	print "[Scheduler started] Sends everyday at %s:%s"%(scheduler_hour, scheduler_min)
	scheduler.start()

except (KeyboardInterrupt, SystemExit):
	scheduler.remove_job('job1')
	scheduler.shutdown()
	print "Scheduler shutdown"
class ScheduleService():

    @inject
    def __init__(self, synchornizationService : SynchronizationService):
        self.__synchronizationService = synchornizationService
        self.__scheduler = BlockingScheduler()
        self.id = "id"
        self.hasJob = False
        self.isRunning = False

    def stop_job(self):
        if self.hasJob:
            self.__scheduler.remove_job(self.id)
            self.hasJob = False

    def get_schedule(self, string):
        if string is None:
            return None

        def get_permitted_digits(index):
            if index == 0:
                return 4
            elif index == 4:
                return 1
            else:
                return 2

        regex = lambda d: r"(\*|(\d{%d}(,|-)?))*" % d
        qualifiers = string.split(' ')
        if len(qualifiers) != 8:
            return None

        for i in range(0, len(qualifiers)):
            if not re.match(regex(get_permitted_digits(i)), qualifiers[i]):
                return None

        return {
            'year': qualifiers[0],
            'month': qualifiers[1],
            'week': qualifiers[2],
            'day': qualifiers[3],
            'day_of_week': qualifiers[4],
            'hour': qualifiers[5],
            'minute': qualifiers[6],
            'second': qualifiers[7],
        }

    def start_job(self, schedule):
        self.stop_job()
        self.__scheduler.add_job(
            self.__synchronizationService.synchronize,
            'cron',
            year = schedule['year'],
            month=schedule['month'],
            day= schedule['day'],
            day_of_week = schedule['day_of_week'],
            hour=schedule['hour'],
            minute=schedule['minute'],
            second=schedule['second'],
            id = self.id)

        self.hasJob = True
        if not self.isRunning:
            self.isRunning = True
            self.__scheduler.start()
Exemple #8
0
                  args=('一次性任务', ),
                  next_run_time=datetime.datetime.now() +
                  datetime.timedelta(seconds=12),
                  id='one_time_task')
scheduler.add_job(func=aps_test,
                  args=('循环任务', ),
                  trigger='interval',
                  seconds=3,
                  id='interval_task')

Starter(scheduler).start()
time.sleep(10)
# 暂停任务
scheduler.pause_job('cron_task')
# 删除任务
scheduler.remove_job('interval_task')
# 添加任务
scheduler.add_job(func=aps_test,
                  args=('待修改任务', ),
                  trigger='interval',
                  seconds=3,
                  id='else_task')
time.sleep(10)
# 唤醒任务
scheduler.resume_job('cron_task')
# 修改任务
scheduler.modify_job('else_task', args=('修改后任务', ))
time.sleep(10)
# 修改参数  对于同时修改任务和参数的需求,建议使用remove 和 add 进行
scheduler.reschedule_job('cron_task', trigger='cron', second='*/8')
time.sleep(10)
Exemple #9
0
class Host:
    def __init__(self,
                 event_queue_id,
                 event_queue_name,
                 policy_storage,
                 log_group=None,
                 metrics=None,
                 output_dir=None):
        logging.basicConfig(level=logging.INFO, format='%(message)s')
        log.info("Running Azure Cloud Custodian Self-Host")

        load_resources()
        self.session = local_session(Session)

        # Load configuration
        self.options = Host.build_options(output_dir, log_group, metrics)
        self.policy_storage_uri = policy_storage
        self.event_queue_name = event_queue_name
        self.event_queue_id = event_queue_id

        # Prepare storage bits
        self.policy_blob_client = None
        self.blob_cache = {}
        self.queue_storage_account = self.prepare_queue_storage(
            self.event_queue_id, self.event_queue_name)

        self.queue_service = None

        # Track required event subscription updates
        self.require_event_update = False

        # Policy cache and dictionary
        self.policy_cache = tempfile.mkdtemp()
        self.policies = {}

        # Configure scheduler
        self.scheduler = BlockingScheduler()
        logging.getLogger('apscheduler.executors.default').setLevel(
            logging.ERROR)

        # Schedule recurring policy updates
        self.scheduler.add_job(self.update_policies,
                               'interval',
                               seconds=policy_update_seconds,
                               id="update_policies",
                               next_run_time=datetime.now())

        # Schedule recurring queue polling
        self.scheduler.add_job(self.poll_queue,
                               'interval',
                               seconds=queue_poll_seconds,
                               id="poll_queue")

        self.scheduler.start()

    def update_policies(self):
        """
        Enumerate all policies from storage.
        Use the MD5 hashes in the enumerated policies
        and a local dictionary to decide if we should
        bother downloading/updating each blob.
        We maintain an on-disk policy cache for future
        features.
        """
        if not self.policy_blob_client:
            self.policy_blob_client = Storage.get_blob_client_by_uri(
                self.policy_storage_uri, self.session)
        (client, container, prefix) = self.policy_blob_client

        try:
            # All blobs with YAML extension
            blobs = [
                b for b in client.list_blobs(container)
                if Host.has_yaml_ext(b.name)
            ]
        except AzureHttpError as e:
            # If blob methods are failing don't keep
            # a cached client
            self.policy_blob_client = None
            raise e

        # Filter to hashes we have not seen before
        new_blobs = self._get_new_blobs(blobs)

        # Get all YAML files on disk that are no longer in blob storage
        cached_policy_files = [
            f for f in os.listdir(self.policy_cache) if Host.has_yaml_ext(f)
        ]

        removed_files = [
            f for f in cached_policy_files if f not in [b.name for b in blobs]
        ]

        if not (removed_files or new_blobs):
            return

        # Update a copy so we don't interfere with
        # iterations on other threads
        policies_copy = self.policies.copy()

        for f in removed_files:
            path = os.path.join(self.policy_cache, f)
            self.unload_policy_file(path, policies_copy)

        # Get updated YML files
        for blob in new_blobs:
            policy_path = os.path.join(self.policy_cache, blob.name)
            if os.path.exists(policy_path):
                self.unload_policy_file(policy_path, policies_copy)

            client.get_blob_to_path(container, blob.name, policy_path)
            self.load_policy(policy_path, policies_copy)
            self.blob_cache.update(
                {blob.name: blob.properties.content_settings.content_md5})

        # Assign our copy back over the original
        self.policies = policies_copy

        if self.require_event_update:
            self.update_event_subscriptions()

    def _get_new_blobs(self, blobs):
        new_blobs = []
        for blob in blobs:
            md5_hash = blob.properties.content_settings.content_md5
            if not md5_hash:
                blob, md5_hash = self._try_create_md5_content_hash(blob)
            if blob and md5_hash and md5_hash != self.blob_cache.get(
                    blob.name):
                new_blobs.append(blob)
        return new_blobs

    def _try_create_md5_content_hash(self, blob):
        # Not all storage clients provide the md5 hash when uploading a file
        # so, we need to make sure that hash exists.
        (client, container, _) = self.policy_blob_client
        log.info("Applying md5 content hash to policy {}".format(blob.name))

        try:
            # Get the blob contents
            blob_bytes = client.get_blob_to_bytes(container, blob.name)

            # Re-upload the blob. validate_content ensures that the md5 hash is created
            client.create_blob_from_bytes(container,
                                          blob.name,
                                          blob_bytes.content,
                                          validate_content=True)

            # Re-fetch the blob with the new hash
            hashed_blob = client.get_blob_properties(container, blob.name)

            return hashed_blob, hashed_blob.properties.content_settings.content_md5
        except AzureHttpError as e:
            log.warning("Failed to apply a md5 content hash to policy {}. "
                        "This policy will be skipped.".format(blob.name))
            log.error(e)
            return None, None

    def load_policy(self, path, policies):
        """
        Loads a YAML file and prompts scheduling updates
        :param path: Path to YAML file on disk
        :param policies: Dictionary of policies to update
        """
        with open(path, "r") as stream:
            try:
                policy_config = yaml.safe_load(stream)
                new_policies = PolicyCollection.from_data(
                    policy_config, self.options)

                if new_policies:
                    for p in new_policies:
                        log.info("Loading Policy %s from %s" % (p.name, path))

                        p.validate()
                        policies.update({p.name: {'policy': p}})

                        # Update periodic and set event update flag
                        policy_mode = p.data.get('mode', {}).get('type')
                        if policy_mode == CONTAINER_TIME_TRIGGER_MODE:
                            self.update_periodic(p)
                        elif policy_mode == CONTAINER_EVENT_TRIGGER_MODE:
                            self.require_event_update = True
                        else:
                            log.warning(
                                "Unsupported policy mode for Azure Container Host: {}. "
                                "{} will not be run. "
                                "Supported policy modes include \"{}\" and \"{}\"."
                                .format(policy_mode, p.data['name'],
                                        CONTAINER_EVENT_TRIGGER_MODE,
                                        CONTAINER_TIME_TRIGGER_MODE))

            except Exception as exc:
                log.error('Invalid policy file %s %s' % (path, exc))

    def unload_policy_file(self, path, policies):
        """
        Unload a policy file that has changed or been removed.
        Take the copy from disk and pop all policies from dictionary
        and update scheduled jobs and event registrations.
        """
        with open(path, "r") as stream:
            try:
                policy_config = yaml.safe_load(stream)
            except yaml.YAMLError as exc:
                log.warning('Failure loading cached policy for cleanup %s %s' %
                            (path, exc))
                os.unlink(path)
                return

        removed = [
            policies.pop(p['name']) for p in policy_config.get('policies', [])
        ]
        log.info('Removing policies %s' % removed)

        # update periodic
        periodic_names = \
            [p['name'] for p in policy_config['policies'] if p.get('mode', {}).get('schedule')]
        periodic_to_remove = \
            [p for p in periodic_names if p in [j.id for j in self.scheduler.get_jobs()]]

        for name in periodic_to_remove:
            self.scheduler.remove_job(job_id=name)

        # update event
        event_names = \
            [p['name'] for p in policy_config['policies'] if p.get('mode', {}).get('events')]

        if event_names:
            self.require_event_update = True

        os.unlink(path)

        return path

    def update_periodic(self, policy):
        """
        Update scheduled policies using cron type
        periodic scheduling.
        """
        trigger = CronTrigger.from_crontab(policy.data['mode']['schedule'])
        trigger.jitter = jitter_seconds
        self.scheduler.add_job(self.run_policy,
                               trigger,
                               id=policy.name,
                               name=policy.name,
                               args=[policy, None, None],
                               coalesce=True,
                               max_instances=1,
                               replace_existing=True,
                               misfire_grace_time=20)

    def update_event_subscriptions(self):
        """
        Find unique list of all subscribed events and
        update a single event subscription to channel
        them to an Azure Queue.
        """
        log.info('Updating event grid subscriptions')
        destination = \
            StorageQueueEventSubscriptionDestination(resource_id=self.queue_storage_account.id,
                                                     queue_name=self.event_queue_name)

        # Get total unique event list to use in event subscription
        policy_items = self.policies.items()
        events_lists = [
            v['policy'].data.get('mode', {}).get('events')
            for n, v in policy_items
        ]
        flat_events = [e for l in events_lists if l for e in l if e]
        resolved_events = AzureEvents.get_event_operations(flat_events)
        unique_events = set(resolved_events)

        # Build event filter strings
        advance_filter = StringInAdvancedFilter(key='Data.OperationName',
                                                values=unique_events)
        event_filter = EventSubscriptionFilter(
            advanced_filters=[advance_filter])

        # Update event subscription
        AzureEventSubscription.create(destination, self.event_queue_name,
                                      self.session.get_subscription_id(),
                                      self.session, event_filter)

        self.require_event_update = False

    def poll_queue(self):
        """
        Poll the Azure queue and loop until
        there are no visible messages remaining.
        """
        # Exit if we don't have any policies
        if not self.policies:
            return

        if not self.queue_service:
            self.queue_service = Storage.get_queue_client_by_storage_account(
                self.queue_storage_account, self.session)

        while True:
            try:
                messages = Storage.get_queue_messages(
                    self.queue_service,
                    self.event_queue_name,
                    num_messages=queue_message_count,
                    visibility_timeout=queue_timeout_seconds)
            except AzureHttpError:
                self.queue_service = None
                raise

            if len(messages) == 0:
                break

            log.info('Pulled %s events to process while polling queue.' %
                     len(messages))

            for message in messages:
                if message.dequeue_count > max_dequeue_count:
                    Storage.delete_queue_message(self.queue_service,
                                                 self.event_queue_name,
                                                 message=message)
                    log.warning(
                        "Event deleted due to reaching maximum retry count.")
                else:
                    # Run matching policies
                    self.run_policies_for_event(message)

                    # We delete events regardless of policy result
                    Storage.delete_queue_message(self.queue_service,
                                                 self.event_queue_name,
                                                 message=message)

    def run_policies_for_event(self, message):
        """
        Find all policies subscribed to this event type
        and schedule them for immediate execution.
        """
        # Load up the event
        event = json.loads(base64.b64decode(message.content).decode('utf-8'))
        operation_name = event['data']['operationName']

        # Execute all policies matching the event type
        for k, v in self.policies.items():
            events = v['policy'].data.get('mode', {}).get('events')
            if not events:
                continue
            events = AzureEvents.get_event_operations(events)
            if operation_name in events:
                self.scheduler.add_job(self.run_policy,
                                       id=k + event['id'],
                                       name=k,
                                       args=[v['policy'], event, None],
                                       misfire_grace_time=60 * 3)

    def run_policy(self, policy, event, context):
        try:
            policy.push(event, context)
        except Exception as e:
            log.error("Exception running policy: %s error: %s", policy.name, e)

    def prepare_queue_storage(self, queue_resource_id, queue_name):
        """
        Create a storage client using unusual ID/group reference
        as this is what we require for event subscriptions
        """

        # Use a different session object if the queue is in a different subscription
        queue_subscription_id = ResourceIdParser.get_subscription_id(
            queue_resource_id)
        if queue_subscription_id != self.session.subscription_id:
            session = Session(queue_subscription_id)
        else:
            session = self.session

        storage_client = session.client(
            'azure.mgmt.storage.StorageManagementClient')

        account = storage_client.storage_accounts.get_properties(
            ResourceIdParser.get_resource_group(queue_resource_id),
            ResourceIdParser.get_resource_name(queue_resource_id))

        Storage.create_queue_from_storage_account(account, queue_name,
                                                  self.session)
        return account

    @staticmethod
    def build_options(output_dir=None, log_group=None, metrics=None):
        """
        Initialize the Azure provider to apply global config across all policy executions.
        """
        if not output_dir:
            output_dir = tempfile.mkdtemp()
            log.warning(
                'Output directory not specified.  Using directory: %s' %
                output_dir)

        config = Config.empty(**{
            'log_group': log_group,
            'metrics': metrics,
            'output_dir': output_dir
        })

        return Azure().initialize(config)

    @staticmethod
    def has_yaml_ext(filename):
        return filename.lower().endswith(('.yml', '.yaml'))
Exemple #10
0
class JobScheduler(metaclass=Singleton):
    __function_dict__ = {}

    def __init__(self):
        self.logger = Log().logger
        self.scheduler = BlockingScheduler(logger=self.logger)
        self.parser = DateTimeParser()

    def get_lives(self):
        return {job.name: job for job in self.scheduler.get_jobs()}

    def add_job(self, name, trigger, trigger_args, func_args=None, func_kwargs=None):
        if name not in self.get_lives():
            func = self.get_functions(name)
            self.scheduler.add_job(func, trigger=trigger, args=func_args, kwargs=func_kwargs,
                                   id=name, name=name, **trigger_args)
            self.logger.info('[%s][%s]作业已添加,并已启动' % (trigger, name))

    def add_date_job(self, name, time, func_args=None, func_kwargs=None):
        time = self.parser.set_date(time).set_time(time).datetime
        self.add_job(name, 'date', {'run_date': time}, func_args, func_kwargs)

    def add_interval_job(self, name, weeks=0, days=0, hours=0, minutes=0, seconds=0,
                         func_args=None, func_kwargs=None):
        self.add_job(name, 'interval',
                     {'weeks': weeks, 'days': days,
                      'hours': hours, 'minutes': minutes, 'seconds': seconds},
                     func_args, func_kwargs)

    def add_cron_job(self, name, year=None, month=None, day=None, week=None,
                     day_of_week=None, hour=None, minute=None, second=None,
                     func_args=None, func_kwargs=None):
        self.add_job(name, 'cron',
                     {'year': year, 'month': month, 'day': day, 'week': week,
                      'day_of_week': day_of_week,
                      'hour': hour, 'minute': minute, 'second': second},
                     func_args, func_kwargs)

    def add_started_job(self, name, after_seconds=1, func_args=None, func_kwargs=None):
        time = datetime.now() + timedelta(seconds=after_seconds)
        self.add_date_job(name, time, func_args, func_kwargs)

    def delete_job(self, name):
        jobs = self.get_lives()
        if name in jobs:
            self.scheduler.remove_job(jobs[name].id)
            self.logger.info('作业[%s]已移除' % name)

    @classmethod
    def register(cls, name):
        def add_method(f):
            cls.__function_dict__[name.strip()] = f
            return f

        return add_method

    def get_functions(self, name):
        return self.__function_dict__.get(name.strip())

    def get_function_doc(self, name):
        return self.get_functions(name).__doc__

    def get_function_names(self):
        return sorted(self.__function_dict__.keys())
Exemple #11
0
    data_fr = FR.find_one(sort=[('_id', 1)])
    os.system('scrapy crawl FR')
    data_FR = FR.find_one(sort=[('_id', 1)])
    time_FR = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
    print(time_FR, ':已检查FR数据')
    if data_fr != data_FR:
        date = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
        print(date, ':FR数据有更新,已发送邮件')
        collection = 'FR'
        send_email(collection)
    ECB = db['ECB']
    data_ecb = ECB.find_one(sort=[('_id', 1)])
    os.system('scrapy crawl ecb')
    data_ECB = ECB.find_one(sort=[('_id', 1)])
    time_ECB = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
    print(time_ECB, ':已检查ECB数据')
    if data_ecb != data_ECB:
        date = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
        print(date, ':ECB数据有更新,已发送邮件')
        collection = 'ECB'
        send_email(collection)

sched = BlockingScheduler()
sched.add_job(job_1, 'interval', seconds=30, id='job_1')

try:
    sched.start()
except Exception as e:
    print('scheduler error:', e)
    sched.remove_job('job_1')
Exemple #12
0
class abstract_schedule(metaclass=ABCMeta):

    def __init__(self):
        self.sched = BlockingScheduler()

    def lstCronJob(self, job_id=None):
        result = {}
        if not job_id:
            jobs = self.sched.get_jobs()
            for j in jobs:
                result[j.id] = j
        else:
            jobs = self.sched.get_job(job_id)
            result[job_id] = jobs
        return result

    def delCronJob(self, job_id):
        jobs = self.lstCronJob(job_id)
        if not jobs:
            sys.stdout.write("Job %s not found" %job_id)
        else:
            self.sched.remove_job(job_id)
            sys.stdout.write("Job %s 删除成功!"%job_id)
            return True

    def addCronJob(self, job_id, func, policy, args):
        cron = CronTrigger(**policy)
        self.sched.add_job(func, cron, args=args, id=job_id)

    def start(self):
        print("123123")
        self.sched.add_job(self.autoAddJob, IntervalTrigger(seconds=5), id="autoAddJob")
        self.sched.start()

    def autoAddJob(self):
        history_jobs = self.lstCronJob()
        print(history_jobs, 'history_jobs')

        current_jobs = self.getBackupPolicy()
        print(current_jobs, 'current_jobs')

        only_current_jobs = set(current_jobs.keys()).difference(set(history_jobs.keys()))
        print(only_current_jobs, 'only_current_jobs')
        # 当前任务调度列表中有的 历史任务列表中没有的

        only_history_jobs = set(history_jobs.keys()).difference(set(current_jobs.keys()))
        print(only_history_jobs, 'only_history_jobs')
        #历史任务中有的当前任务列表中没有的任务
        #
        for j in only_history_jobs:
            if j == 'autoAddJob':
                continue
            self.delCronJob(job_id=j)

        for j in only_current_jobs:
            func = current_jobs[j].pop('func')
            args = current_jobs[j].pop('args')
            policy = current_jobs[j]
            self.addCronJob(job_id=j, func=func, policy=policy, args=args)

    @abstractmethod
    def getBackupPolicy(self):
        pass
    # #power rankings:                     tuesday evening at 6:30pm local time.
    # #matchups:                           thursday evening at 7:30pm east coast time.
    # #close scores (within 15.99 points): monday evening at 6:30pm east coast time.
    # #trophies:                           tuesday morning at 7:30am local time.
    # #score update:                       friday, monday, and tuesday morning at 7:30am local time.
    # #score update:                       sunday at 4pm, 8pm east coast time.

    # sched.add_job(bot_main, 'cron', ['get_power_rankings'], id='power_rankings',
    #     day_of_week='tue', hour=18, minute=30, start_date=ff_start_date, end_date=ff_end_date,
    #     timezone=my_timezone, replace_existing=True)
    # sched.add_job(bot_main, 'cron', ['get_matchups'], id='matchups',
    #     day_of_week='thu', hour=19, minute=30, start_date=ff_start_date, end_date=ff_end_date,
    #     timezone=game_timezone, replace_existing=True)
    # sched.add_job(bot_main, 'cron', ['get_close_scores'], id='close_scores',
    #     day_of_week='mon', hour=18, minute=30, start_date=ff_start_date, end_date=ff_end_date,
    #     timezone=game_timezone, replace_existing=True)
    # sched.add_job(bot_main, 'cron', ['get_final'], id='final',
    #     day_of_week='tue', hour=7, minute=30, start_date=ff_start_date, end_date=ff_end_date,
    #     timezone=my_timezone, replace_existing=True)
    # sched.add_job(bot_main, 'cron', ['get_scoreboard_short'], id='scoreboard1',
    #     day_of_week='fri,mon', hour=7, minute=30, start_date=ff_start_date, end_date=ff_end_date,
    #     timezone=my_timezone, replace_existing=True)
    # sched.add_job(bot_main, 'cron', ['random_phrase'], id='random_phrase',
    #     day_of_week='sun', hour='15,42', start_date=ff_start_date, end_date=ff_end_date,
    #     timezone=game_timezone, replace_existing=True)
    sched.remove_job("random_phrase")

    print("Ready!")
    sched.start()
Exemple #14
0
class SchedUtility(object, metaclass=Singleton):
    
    def __init__(self):
        try:
            self.Global = Global()
            self.Utility = Utility()
            self.InfraUtil = InfraUtility()
            self.db = DBMySql('Scheduler')

            self.myModulePyFile = os.path.abspath(__file__)
            self.myClass = self.__class__.__name__

            #Setting the infrastructure
            self.Infra = self.InfraUtil.setInfra(self.Global.SchedulerInfraKey)
            if not self.Infra:
                raise InfraInitializationError('Could not initialize {cls}'.format(cls=(self.myModulePyFile,self.myClass)))

            # we need to get the proper logger for a given module
            self.logger = self.Infra.getInfraLogger(self.Global.SchedulerInfraKey)

            # loading Schduler config and starting scheduler
            self.__startScheduler__()

        except Exception as err:
            raise err

    def __startScheduler__(self):

        try:
            mySchedulerType = self.Global.DefaultSchedulerType
            mySchedulerMode = self.Global.DefaultSchedulerMode

            if mySchedulerMode == 'Run':
                myArgPaused = False
            else:
                myArgPaused = True
            #fi

            mySchedulerConfig = self.Utility.getACopy(self.Infra.schedulerConfigData)

            if mySchedulerType == 'Background':
                self.Scheduler = BackgroundScheduler(mySchedulerConfig)
            else:
                self.Scheduler = BlockingScheduler(mySchedulerConfig)
            #fi

            if not self.Scheduler.running:
                self.Scheduler.start(paused = myArgPaused)

        except Exception as err:
            raise err

    def getAllJobDetail(self):
        '''
        Description: Returns all jobs as stored in scheduler
        '''
        myJobDetail = []
        
        for job in self.Scheduler.get_jobs():
            myJobDetail.append(self.getAJobDetail(job.id))

        return myJobDetail

    def getAJobDetail(self, jobIdArg):
        '''
        Description: Print all jobs as stored in scheduler
        '''
        myJobId = jobIdArg
        job = self.Scheduler.get_job(myJobId)
        myJobDetail = job.__getstate__()

        return myJobDetail

    def suspendJob(self, jobIdArg):
        myJobId = jobIdArg
        job = self.Scheduler.get_job(myJobId)
        job.pause()

    def resumeJob(self, jobIdArg):
        myJobId = jobIdArg
        job = self.Scheduler.get_job(myJobId)
        job.resume()

    def getCurrentlyExecutingJob(self):
        return len(self.Scheduler.get_jobs())

    def removeJob(self, jobId):
        try:
            self.Scheduler.remove_job(jobId)
        except JobLookupError as err:
            print('Invalid Job !!')

    def removeAllJobs(self):
        try:
            self.Scheduler.remove_all_jobs()
        except Exception as err:
            raise err

    def getAllJobsFromRep(self):
        for job in self.Scheduler.get_jobs():
            myJobDetail = self.Scheduler.get_job(job.id)    
            print(job,myJobDetail)

    def getNewJob(self,prefixArg):
        # random number between 10 and 99 to ensure we always get 2 digit
        if isinstance(prefixArg,str) and prefixArg is not None:
            return prefixArg + '_' + str(datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d_%-H%M%S_') + str(random.randrange(10,99)))
        else:
            return datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d_%-H%M%S_') + str(random.randrange(10,99))

    def getJobInfoFromDb(self, jobIdArg):
        try:
            myResponse = self.Utility.getResponseTemplate()
            myJobId = self.Utility.getACopy(jobIdArg)
            self.logger.debug('arg [{arg}] received'.format(arg = myJobId))

            myJobCriteria = 'JobId = %s ' %repr(myJobId)
            return self.db.processDbRequest(operation = self.Global.fetch, container = 'ScheduledJobs', contents = ['*'], criteria = myJobCriteria)

        except Exception as err:
            myErrorMsg, myTraceback = self.Utility.getErrorTraceback()
            self.logger.error(self.Global.DefPrefix4Error * self.Global.DefPrefixCount , myErrorMsg)
            self.logger.error(self.Global.DefPrefix4Error * self.Global.DefPrefixCount , myTraceback)
            self.Utility.buildResponse(myResponse, self.Global.UnSuccess,myErrorMsg)
            return myResponse

    def getNextSeqForJob(self, jobIdArg):
        try:
            myResponse = self.Utility.getResponseTemplate()
            myJobId = self.Utility.getACopy(jobIdArg)
            self.logger.debug('arg [{arg}] received'.format(arg = myJobId))

            myJobCriteria = 'JobId = %s ' %repr(myJobId)
            return self.db.getTotalRowCount(container = 'ScheduledJobsRunLog', criteria = myJobCriteria) + 1

        except Exception as err:
            myErrorMsg, myTraceback = self.Utility.getErrorTraceback()
            self.logger.error(self.Global.DefPrefix4Error * self.Global.DefPrefixCount , myErrorMsg)
            self.logger.error(self.Global.DefPrefix4Error * self.Global.DefPrefixCount , myTraceback)
            return myErrorMsg

    def getCurrentSeqForJob(self, jobIdArg):
        try:
            myResponse = self.Utility.getResponseTemplate()
            myJobId = self.Utility.getACopy(jobIdArg)
            self.logger.debug('arg [{arg}] received'.format(arg = myJobId))

            myJobCriteria = 'JobId = %s ' %repr(myJobId)
            return self.db.getTotalRowCount(container = 'ScheduledJobsRunLog', criteria = myJobCriteria)

        except Exception as err:
            myErrorMsg, myTraceback = self.Utility.getErrorTraceback()
            self.logger.error(self.Global.DefPrefix4Error * self.Global.DefPrefixCount , myErrorMsg)
            self.logger.error(self.Global.DefPrefix4Error * self.Global.DefPrefixCount , myTraceback)
            return myErrorMsg

    def getElapsedStatsForJob(self, jobIdArg):
        try:
            myResponse = self.Utility.getResponseTemplate()
            myJobId = self.Utility.getACopy(jobIdArg)
            self.logger.debug('arg [{arg}] received'.format(arg = myJobId))

            myJobCriteria = 'JobId = %s ' %repr(myJobId)
            return self.db.getTotalRowCount(container = 'ScheduledJobsRunLog', criteria = myJobCriteria)

        except Exception as err:
            myErrorMsg, myTraceback = self.Utility.getErrorTraceback()
            self.logger.error(self.Global.DefPrefix4Error * self.Global.DefPrefixCount , myErrorMsg)
            self.logger.error(self.Global.DefPrefix4Error * self.Global.DefPrefixCount , myTraceback)
            return myErrorMsg

    def processJobStartEvent(self, jobIdArg):
        '''
        1. Mark job started in ScheduledJobs
        2. Create new entry for this job in ScheduledJobsRunLog
        '''
        try:
            # initializing
            myResponse = self.Utility.getResponseTemplate()
            myJobId = self.Utility.getACopy(jobIdArg)
            self.logger.debug('arg [{arg}] received'.format(arg=myJobId))

            myJobDetailsFromDb = self.getJobInfoFromDb(myJobId)['Data']

            if myJobDetailsFromDb:

                # building data for SchedulerJobsRunLog
                myJobCriteria = ' JobId = %s' %repr(myJobId)
                myNextSeqForJob = self.getNextSeqForJob(myJobId)

                # will mark the job started and creat the run log for this run
                self.db.processDbRequest(operation='change', container='ScheduledJobs', \
                    dataDict={'Status': 'Executing'}, criteria = myJobCriteria, commitWork=True )
                
                # creating run information
                self.db.processDbRequest(operation='create', container='ScheduledJobsRunLog', \
                        dataDict={'JobId':myJobId, 'Seq' : myNextSeqForJob,  'ExecutionStarted': self.Utility.getCurrentTime()}, commitWork=True )

                self.Utility.buildResponse(myResponse, self.Global.Success, self.Global.Success, {'Seq':myNextSeqForJob})
            else:
                self.Utility.buildResponse(myResponse, self.Global.UnSuccess, 'Cound not find job details for job {job}'.format(job = myJobId))

            return myResponse

        except Exception as err:
            myErrorMsg, myTraceback = self.Utility.getErrorTraceback()
            self.logger.error(self.Global.DefPrefix4Error * self.Global.DefPrefixCount , myErrorMsg)
            self.logger.error(self.Global.DefPrefix4Error * self.Global.DefPrefixCount , myTraceback)
            self.Utility.buildResponse(myResponse, self.Global.UnSuccess,myErrorMsg)
            #raise err # will raise the error so this can be logged by scheduler as an error occurred in processing job
            return myResponse

    def processJobFinishEvent(self, jobIdArg, execDetailsArg):
        '''
        1. Mark job completed (update failure cnt and total count and consc fail count, lastrunstatus) in ScheduledJobs
        2. Update ScheduledJobsRunlog container
        '''
        try:
            # initializing
            myResponse = self.Utility.getResponseTemplate()
            myJobId = self.Utility.getACopy(jobIdArg)
            myExecDetails = execDetailsArg
            myJobStatus = self.Global.NextJobRun
            
            self.logger.debug('arg [{arg}] received'.format(arg=myJobId))

            myJobDetailsFromDb = self.getJobInfoFromDb(myJobId)['Data']

            if myJobDetailsFromDb:

                self.logger.debug('Job details found, proceeding with finish event')
                myJobCriteria = 'JobId = %s' %repr(myJobId)
                myCurrentSeqForJob = self.getCurrentSeqForJob(myJobId)
                myJobRunCriteria = ' JobId = %s and Seq = %s ' %(repr(myJobId), myCurrentSeqForJob)

                self.logger.debug('Job criteria {criteria}'.format(criteria = myJobCriteria))
                self.logger.debug('Job criteria with seq {criteria}'.format(criteria = myJobRunCriteria))

                myJobDetailsFromSched = self.getAJobDetail(myJobId)

                # Updating execution details in ScheduledJobsRunLog
                self.logger.debug('udating statistics of this run')

                myDbResult = self.db.processDbRequest(operation = 'change', container = 'ScheduledJobsRunLog', \
                    dataDict={
                        'Status': myExecDetails['Status'], 'ElapsedSeconds':myExecDetails['Data']['ElapsedSecs'],
                        'ExecutionCompleted': self.Utility.getCurrentTime(), 'ExecutionDetail': json.dumps(myExecDetails['Data']) 
                    }, criteria = myJobRunCriteria, commitWork=True )

                self.logger.debug('ScheduledJobsRunLog: db results >> {results}'.format(results = myDbResult))

                # Updating execution details in ScheduledJobs
                #if myExecDetails['Status'] == self.Global.Success:
                    # if success, reset consecfailcnt to 0, increment totalrun by 1 and update next run
                myElapsedStats = self.db.executeDynamicSql(\
                    operation = 'fetch', \
                    sql_text = 'select min(ElapsedSeconds) "Min", max(ElapsedSeconds) "Max", avg(ElapsedSeconds) "Avg" from ScheduledJobsRunLog')

                self.logger.debug('Elapsed Stats: {stats}'.format(stats = myElapsedStats))

                myDbResult = self.db.processDbRequest(operation='change', container='ScheduledJobs', \
                    dataDict={
                        'Status': myJobStatus, 'LastRunStatus': myExecDetails['Status'], 'TotalRun' : myJobDetailsFromDb[0]['TotalRun'] + 1,
                        'NextRun' : myJobDetailsFromSched['next_run_time'].strftime('%Y-%m-%d% %H:%M:%S'), 'LatConsecFailCnt' : 0,
                        'MinElapsedSecs' : myElapsedStats['Data'][0]['Min'], 'MaxElapsedSecs' : myElapsedStats['Data'][0]['Min'] , 
                        'AvgElapsedSecs' : myElapsedStats['Data'][0]['Avg']  
                    }, criteria = myJobCriteria, commitWork=True )

                self.logger.debug('ScheduledJobs: last stats update >> {result}'.format(result = myDbResult))

                #self.Utility.buildResponse(myResponse, self.Global.Success,self.Global.Success)
                '''
                else:
                    # process job was unsuccessful
                    if myJobDetailsFromDb[0]['LatConsecFailCnt'] >= self.Global.SchedConsecFailCntThreshold:
                        myJobStatus = self.Global.SuspendMode
                        self.logger.info('suspending job {job}'.format(job=myJobId))
                        self.suspendJob(myJobId)

                    myDbResult = self.db.processDbRequest(operation='change', container='ScheduledJobs', \
                        dataDict={
                            'Status': myJobStatus, 'LastRunStatus': myExecDetails['Status'], 'TotalRun' : myJobDetailsFromDb[0]['TotalRun'] + 1,
                            'next_run' : myJobDetailsFromSched['next_run_time'], 'LatConsecFailCnt' : myJobDetailsFromDb[0]['LatConsecFailCnt'] + 1, 
                            'TotalFailure' :  myJobDetailsFromDb[0]['TotalFailure' + 1]
                        }, criteria = myJobCriteria, commitWork=True )
                    # will suspend the job if total failure count has been reached beyond Total consecutive failure threshold
                    self.Utility.buildResponse(myResponse, self.Global.UnSuccess,self.Global.UnSuccess)
                    raise processJobError(myExecDetails['Message'])
                '''
            self.Utility.buildResponse(myResponse, self.Global.Success,self.Global.Success)
            return myResponse
        except Exception as err:
            myErrorMsg, myTraceback = self.Utility.getErrorTraceback()
            self.logger.error(self.Global.DefPrefix4Error * self.Global.DefPrefixCount , myErrorMsg)
            self.logger.error(self.Global.DefPrefix4Error * self.Global.DefPrefixCount , myTraceback)
            self.Utility.buildResponse(myResponse, self.Global.UnSuccess, myErrorMsg)
            return myResponse
Exemple #15
0
class JobScheduler(object):
    def __init__(self, every=30, unit='second'):
        self.mongo = mongopool.get()
        self.cursor = self.mongo.get_database('apscheduler').get_collection(
            'jobs')
        self.every = every
        self.unit = unit
        self.scheduler = BlockingScheduler(logger=logger)
        self.scheduler.configure(jobstores=jobstores,
                                 executors=executors,
                                 job_defaults=job_defaults,
                                 timezone=pytz.timezone('Asia/Saigon'))
        self._set_trigger(every, unit)

    def _set_trigger(self, every, unit):
        now = datetime.now().astimezone(pytz.timezone('Asia/Saigon'))
        if unit == 'second':
            self.trigger = CronTrigger(second='*/{}'.format(every),
                                       start_date=now)
        elif unit == 'minute':
            self.trigger = CronTrigger(minute='*/{}'.format(every),
                                       start_date=now)
        elif unit == 'hour':
            self.trigger = CronTrigger(hour='*/{}'.format(every),
                                       start_date=now)
        elif unit == 'day':
            self.trigger = CronTrigger(day='*/{}'.format(every),
                                       start_date=now)
        else:
            raise Exception(message='Unknown time unit')

    def add_jobstore(self, jobstore, alias):
        self.scheduler.add_jobstore(jobstore, alias)

    def add_executor(self, executor, alias):
        self.scheduler.add_executor(executor, alias)

    def add_job(self,
                job_fn,
                id='id1',
                name='job1',
                jobstore='default',
                executor='default',
                args=None,
                kwargs=None):
        now = datetime.now().astimezone(pytz.timezone('Asia/Saigon'))
        history = list(self.cursor.find({'_id': id}))
        if history:
            #TODO: process missing jobs
            self.cursor.delete_one({'_id': id})
        next_run_time = self.trigger.get_next_fire_time(None, now)
        if kwargs:
            kwargs['run_time'] = next_run_time
        else:
            kwargs = {'run_time': next_run_time}

        self.scheduler.add_job(job_fn,
                               trigger=self.trigger,
                               next_run_time=next_run_time,
                               id=id,
                               name=name,
                               jobstore=jobstore,
                               executor=executor,
                               args=args,
                               kwargs=kwargs)

    def remove_job(self, id, jobstore='default'):
        self.scheduler.remove_job(job_id=id, jobstore=jobstore)

    def callback(self, callback_fn, mark=EVENT_ALL):
        self.scheduler.add_listener(callback_fn)

    def start(self):
        mongopool.put(self.mongo)
        self.scheduler.start()

    def shutdown(self):
        self.scheduler.shutdown()
        self.scheduler.scheduled_job
Exemple #16
0
class JobManage():
    def __init__(self):
        jobstores = {'default': MemoryJobStore()}
        executors = {
            'default': ThreadPoolExecutor(50)
            #             'processpool': ProcessPoolExecutor(3)
        }
        job_defaults = {'coalesce': False, 'max_instances': 50}
        self.sched = BlockingScheduler(jobstores=jobstores,
                                       executors=executors,
                                       job_defaults=job_defaults)
        self.addError()
        self.addJobExecuted()

    def addJob(self, func, jobId=None, cron=None, args=[], kwargs={}):
        '''
                                只支持cron的形式
            *  *  *  *  *  command
                                分 时 日 月 周 命令
                                
                                第1列表示分钟1~59 每分钟用*或者 */1表示
                                第2列表示小时1~23(0表示0点)
                                第3列表示日期1~31
                                第4列表示月份1~12
                                第5列标识号星期0~6(0表示星期天)
                                第6列要运行的命令
        '''
        if cron is None:
            raise Exception("cron cannot be Null")

        (minute, hour, day, month, week) = cron.split(" ")
        self.sched.add_job(func,
                           trigger='cron',
                           id=jobId,
                           hour=hour,
                           minute=minute,
                           day=day,
                           month=month,
                           week=week,
                           args=args,
                           kwargs=kwargs)

    def removeJob(self, jobId):
        self.sched.remove_job(jobId)

    def start(self):
        self.sched.start()

    def shutdown(self):
        self.sched.shutdown()

    def printJobs(self):
        self.sched.print_jobs()

    def getJobs(self):
        return self.sched.get_jobs()

    def addError(self, func=None):
        if func is None:
            func = self.listener
        self.sched.add_listener(func, EVENT_JOB_ERROR)

    def addJobExecuted(self, func=None):
        if func is None:
            func = self.listener
        self.sched.add_listener(func, EVENT_JOB_EXECUTED)

    def listener(self, event):
        if event.exception:
            log.error("任务【%s】 任务出错 : %s" % (event.job_id, event.traceback))
        else:
            log.debug("任务【%s】已经跑完,结束时间 : %s " % (event.job_id, getNow()))


# jobMange = JobManage()
Exemple #17
0
#     print(time.time())
#
#
# sched.start()

# 获得job列表
# def my_job():
#     print(time.time())
#
#
# sched = BlockingScheduler()
# job = sched.add_job(my_job, 'interval', seconds=2, id='123')
# print(sched.get_job(job_id='123'))
# print(sched.get_jobs())

# 移除作业
sched = BlockingScheduler()


def my_job():
    print(time.time())


job = sched.add_job(my_job, 'interval', seconds=2, id='123')
sched.add_job(my_job, 'interval', minutes=2, id='my_job_id')
print(sched.get_jobs())
sched.remove_job('my_job_id')
print(sched.get_jobs())
job.remove()
print(sched.get_jobs())
Exemple #18
0
class eagle_eye_bot(object):
    def __init__(self, stock_list=[]):
        self._scheduler = BlockingScheduler()
        self.stock_list = stock_list
        self._money_flows = {}
        self._start_time = {}
        for stock in stock_list:
            self._money_flows[stock] = money_flow_level()
            self._start_time[stock] = '00:00:00'

    def start(self):
        self._scheduler.add_job(self._start_monitor_job,
                                'cron',
                                day_of_week='mon-fri',
                                hour=9,
                                minute=29,
                                second=58)  # 启动监控9:29:58,先获取集合竞价的数据
        self._scheduler.add_job(self._end_monitor_job,
                                'cron',
                                day_of_week='mon-fri',
                                hour=11,
                                minute=30,
                                second=5)  # 结束上午的监控11:30:05
        self._scheduler.add_job(self._start_monitor_job,
                                'cron',
                                day_of_week='mon-fri',
                                hour=13,
                                minute=0,
                                second=0)  # 启动下午的监控
        self._scheduler.add_job(self._end_monitor_job,
                                'cron',
                                day_of_week='mon-fri',
                                hour=15,
                                minute=0,
                                second=5)  # 结束下午的监控15:00:05
        try:
            self._scheduler.start()
            print('start to monitor tick data...')
        except (KeyboardInterrupt, SystemExit):
            self._scheduler.remove_all_jobs()

    def _start_monitor_job(self):
        self._scheduler.add_job(self.__stock_monitor_on_second,
                                'interval',
                                seconds=3,
                                id='monitor_tick')

    def _end_monitor_job(self):
        self._scheduler.remove_job(job_id='monitor_tick')

    def add_stock(self, stock_code, levels=[]):
        """
        add code to be monitored
        :param stock_code:
        :return:
        """
        levels = fit_levels(levels)
        self.stock_list.append(stock_code)
        self._money_flows[stock_code] = money_flow_level(
            levels[0], levels[1], levels[2], levels[3])
        self._start_time[stock_code] = '00:00:00'

    def remove_stock(self, stock_code):
        """

        :param stock_code:
        :return:
        """
        self.stock_list.remove(stock_code)
        del self._money_flows[stock_code]
        del self._start_time[stock_code]

    def get_stock_money_flow(self, stock_code):
        """

        :param stock_code:
        :return:
        """
        return self._money_flows[stock_code]

    def __stock_monitor_on_second(self):  # 定时获取tick数据处理
        data = ts.get_realtime_quotes(self.stock_list)
        for _, item in data.iterrows():
            if item['time'] != self._start_time[item['code']]:  # 是否是新成交的数据
                self.__add_tick(item)
                self._start_time[item['code']] = item['time']

    def __add_tick(self, tick_data):
        price = float(tick_data['price'])
        if price <= float(tick_data['ask']):  # 小于等于买一价,卖盘
            tick_type = -1
        elif price >= float(tick_data['bid']):  # 大于等于卖一价,买盘
            tick_type = 1
        else:
            tick_type = 0
        self._money_flows[tick_data['code']].add_tick(
            float(tick_data['amount']), float(tick_data['volume']), tick_type)
Exemple #19
0
def run_all():
    while True:
        global MSG
        global IMG
        global FILE
        sendtype = show_login()
        result = False
        if sendtype == 1:
            result = login_normal()
        elif sendtype == 2:
            result = login_vip()
        else:
            break
        if result == True:
            print("登陆成功!")
            print("请准备扫描登陆微信")
            itchat.auto_login(hotReload=True)
            print("登陆微信成功!您有以下这些群:")
            rooms = itchat.get_chatrooms(update=True)
            for i in rooms:
                print(i['NickName'])
            getans = show_time()
            if getans == 1:
                time_set = input("请输入一个间隔时间:如3即从先在开始每3小时自动发送")
                get_type = show()
                if get_type == 1:

                    MSG = input("请输入您要发送的文字:")
                    print("准备发送中。。。")
                    def msgset():
                        setmsg(MSG)

                    sched = BlockingScheduler()
                    int_trigger = IntervalTrigger(hour=int(time_set), id="my_job")
                    sched.add_job(msgset, int_trigger, id="my_job")

                    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))
                    try:
                        sched.start()
                    except (KeyboardInterrupt, SystemExit):
                        sched.remove_job('my_job')
                    run_all()
                elif get_type == 2:

                    IMG = input("请输入您要发送的图片路径:")
                    print("准备发送中。。。")
                    def imgset():

                        setimg(IMG)

                    sched = BlockingScheduler()
                    int_trigger = IntervalTrigger(hour=int(time_set), id="my_job")
                    sched.add_job(imgset, int_trigger, id="my_job")
                    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))
                    try:
                        sched.start()
                    except (KeyboardInterrupt, SystemExit):
                        sched.remove_job('my_job')
                    run_all()
                elif get_type == 3:

                    FILE = input("请输入您要发送的文件路径:")
                    print("准备发送中。。。")
                    def fileset():

                        setfile(FILE)

                    sched = BlockingScheduler()
                    int_trigger = IntervalTrigger(hour=int(time_set))
                    sched.add_job(fileset, int_trigger, id="my_job")
                    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))
                    try:
                        sched.start()
                    except (KeyboardInterrupt, SystemExit):
                        sched.remove_job('my_job')
                    run_all()
                else:
                    break

            elif getans == 2:
                time_set = input("请输入一个准确的时间:如2018.7.10.8.20即2018年7月10日8点20分:\n")
                year, month, day, hour, minute = time_set.split('.')[0], time_set.split('.')[1], time_set.split('.')[2], time_set.split('.')[3], time_set.split('.')[4]
                get_type = show()
                if get_type == 1:
                    MSG = input("请输入您要发送的文字:")
                    print("准备发送中。。。")
                    def msgset():

                        setmsg(MSG)

                    sched = BlockingScheduler()
                    cron_trigger = CronTrigger(year=int(year), month=int(month), day=int(day), hour=int(hour), minute=int(minute), second=0)
                    sched.add_job(msgset, cron_trigger, id="my_job")
                    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))
                    try:
                        sched.start()
                    except (KeyboardInterrupt, SystemExit):
                        sched.remove_job('my_job')
                    run_all()
                elif get_type == 2:

                    IMG = input("请输入您要发送的图片路径:")
                    print("准备发送中。。。")
                    def imgset():

                        setimg(IMG)

                    sched = BlockingScheduler()
                    cron_trigger = CronTrigger(year=int(year), month=int(month), day=int(day), hour=int(hour),
                                               minute=int(minute), second=0)
                    sched.add_job(imgset, cron_trigger, id="my_job")
                    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))
                    try:
                        sched.start()
                    except (KeyboardInterrupt, SystemExit):
                        sched.remove_job('my_job')
                    run_all()
                elif get_type == 3:

                    FILE = input("请输入您要发送的文件路径:")
                    print("准备发送中。。。")
                    def fileset():

                        setfile(FILE)

                    sched = BlockingScheduler()
                    cron_trigger = CronTrigger(year=int(year), month=int(month), day=int(day), hour=int(hour),
                                               minute=int(minute), second=0)
                    sched.add_job(fileset, cron_trigger, id="my_job")
                    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))
                    try:
                        sched.start()
                    except (KeyboardInterrupt, SystemExit):
                        sched.remove_job('my_job')
                    run_all()
                else:
                    break

            elif getans == 3:
                run()
            else:
                break
        else:
            print("登陆失败!用户名不存在或密码错误!请重新登陆!")
            run_all()
Exemple #20
0
class Host:

    def __init__(self, storage_id, queue_name, policy_uri,
                 log_group=None, metrics=None, output_dir=None):
        logging.basicConfig(level=logging.INFO, format='%(message)s')
        log.info("Running Azure Cloud Custodian Self-Host")

        resources.load_available()

        self.session = local_session(Session)
        self.storage_session = self.session
        storage_subscription_id = ResourceIdParser.get_subscription_id(storage_id)
        if storage_subscription_id != self.session.subscription_id:
            self.storage_session = Session(subscription_id=storage_subscription_id)

        # Load configuration
        self.options = Host.build_options(output_dir, log_group, metrics)
        self.policy_storage_uri = policy_uri
        self.event_queue_id = storage_id
        self.event_queue_name = queue_name

        # Default event queue name is the subscription ID
        if not self.event_queue_name:
            self.event_queue_name = self.session.subscription_id

        # Prepare storage bits
        self.policy_blob_client = None
        self.blob_cache = {}
        self.queue_storage_account = self.prepare_queue_storage(
            self.event_queue_id,
            self.event_queue_name)

        self.queue_service = None

        # Register event subscription
        self.update_event_subscription()

        # Policy cache and dictionary
        self.policy_cache = tempfile.mkdtemp()
        self.policies = {}

        # Configure scheduler
        self.scheduler = BlockingScheduler(Host.get_scheduler_config())
        logging.getLogger('apscheduler.executors.default').setLevel(logging.ERROR)
        logging.getLogger('apscheduler').setLevel(logging.ERROR)

        # Schedule recurring policy updates
        self.scheduler.add_job(self.update_policies,
                               'interval',
                               seconds=policy_update_seconds,
                               id="update_policies",
                               next_run_time=datetime.now(),
                               executor='threadpool')

        # Schedule recurring queue polling
        self.scheduler.add_job(self.poll_queue,
                               'interval',
                               seconds=queue_poll_seconds,
                               id="poll_queue",
                               executor='threadpool')

        self.scheduler.start()

    def update_policies(self):
        """
        Enumerate all policies from storage.
        Use the MD5 hashes in the enumerated policies
        and a local dictionary to decide if we should
        bother downloading/updating each blob.
        We maintain an on-disk policy cache for future
        features.
        """
        if not self.policy_blob_client:
            self.policy_blob_client = Storage.get_blob_client_by_uri(self.policy_storage_uri,
                                                                     self.storage_session)
        (client, container, prefix) = self.policy_blob_client

        try:
            # All blobs with YAML extension
            blobs = [b for b in client.list_blobs(container) if Host.has_yaml_ext(b.name)]
        except AzureHttpError as e:
            # If blob methods are failing don't keep
            # a cached client
            self.policy_blob_client = None
            raise e

        # Filter to hashes we have not seen before
        new_blobs = self._get_new_blobs(blobs)

        # Get all YAML files on disk that are no longer in blob storage
        cached_policy_files = [f for f in os.listdir(self.policy_cache)
                               if Host.has_yaml_ext(f)]

        removed_files = [f for f in cached_policy_files if f not in [b.name for b in blobs]]

        if not (removed_files or new_blobs):
            return

        # Update a copy so we don't interfere with
        # iterations on other threads
        policies_copy = self.policies.copy()

        for f in removed_files:
            path = os.path.join(self.policy_cache, f)
            self.unload_policy_file(path, policies_copy)

        # Get updated YML files
        for blob in new_blobs:
            policy_path = os.path.join(self.policy_cache, blob.name)
            if os.path.exists(policy_path):
                self.unload_policy_file(policy_path, policies_copy)
            elif not os.path.isdir(os.path.dirname(policy_path)):
                os.makedirs(os.path.dirname(policy_path))

            client.get_blob_to_path(container, blob.name, policy_path)
            self.load_policy(policy_path, policies_copy)
            self.blob_cache.update({blob.name: blob.properties.content_settings.content_md5})

        # Assign our copy back over the original
        self.policies = policies_copy

    def _get_new_blobs(self, blobs):
        new_blobs = []
        for blob in blobs:
            md5_hash = blob.properties.content_settings.content_md5
            if not md5_hash:
                blob, md5_hash = self._try_create_md5_content_hash(blob)
            if blob and md5_hash and md5_hash != self.blob_cache.get(blob.name):
                new_blobs.append(blob)
        return new_blobs

    def _try_create_md5_content_hash(self, blob):
        # Not all storage clients provide the md5 hash when uploading a file
        # so, we need to make sure that hash exists.
        (client, container, _) = self.policy_blob_client
        log.info("Applying md5 content hash to policy {}".format(blob.name))

        try:
            # Get the blob contents
            blob_bytes = client.get_blob_to_bytes(container, blob.name)

            # Re-upload the blob. validate_content ensures that the md5 hash is created
            client.create_blob_from_bytes(container, blob.name, blob_bytes.content,
                validate_content=True)

            # Re-fetch the blob with the new hash
            hashed_blob = client.get_blob_properties(container, blob.name)

            return hashed_blob, hashed_blob.properties.content_settings.content_md5
        except AzureHttpError as e:
            log.warning("Failed to apply a md5 content hash to policy {}. "
                        "This policy will be skipped.".format(blob.name))
            log.error(e)
            return None, None

    def load_policy(self, path, policies):
        """
        Loads a YAML file and prompts scheduling updates
        :param path: Path to YAML file on disk
        :param policies: Dictionary of policies to update
        """
        with open(path, "r") as stream:
            try:
                policy_config = yaml.safe_load(stream)
                new_policies = PolicyCollection.from_data(policy_config, self.options)

                if new_policies:
                    for p in new_policies:
                        log.info("Loading Policy %s from %s" % (p.name, path))

                        p.validate()
                        policies.update({p.name: {'policy': p}})

                        # Update periodic
                        policy_mode = p.data.get('mode', {}).get('type')
                        if policy_mode == CONTAINER_TIME_TRIGGER_MODE:
                            self.update_periodic(p)
                        elif policy_mode != CONTAINER_EVENT_TRIGGER_MODE:
                            log.warning(
                                "Unsupported policy mode for Azure Container Host: {}. "
                                "{} will not be run. "
                                "Supported policy modes include \"{}\" and \"{}\"."
                                .format(
                                    policy_mode,
                                    p.data['name'],
                                    CONTAINER_EVENT_TRIGGER_MODE,
                                    CONTAINER_TIME_TRIGGER_MODE
                                )
                            )

            except Exception as exc:
                log.error('Invalid policy file %s %s' % (path, exc))

    def unload_policy_file(self, path, policies):
        """
        Unload a policy file that has changed or been removed.
        Take the copy from disk and pop all policies from dictionary
        and update scheduled jobs.
        """
        with open(path, "r") as stream:
            try:
                policy_config = yaml.safe_load(stream)
            except yaml.YAMLError as exc:
                log.warning('Failure loading cached policy for cleanup %s %s' % (path, exc))
                os.unlink(path)
                return path

        try:
            # Some policies might have bad format, so they have never been loaded
            removed = [policies.pop(p['name'])
                       for p in policy_config.get('policies', [])
                       if p['name'] in policies]
            log.info('Removing policies %s' % removed)

            # update periodic
            periodic_names = \
                [p['name'] for p in policy_config.get('policies', [])
                 if p.get('mode', {}).get('schedule')]
            periodic_to_remove = \
                [p for p in periodic_names if p in [j.id for j in self.scheduler.get_jobs()]]

            for name in periodic_to_remove:
                self.scheduler.remove_job(job_id=name)
        except (AttributeError, KeyError) as exc:
            log.warning('Failure loading cached policy for cleanup %s %s' % (path, exc))

        os.unlink(path)
        return path

    def update_periodic(self, policy):
        """
        Update scheduled policies using cron type
        periodic scheduling.
        """
        trigger = CronTrigger.from_crontab(policy.data['mode']['schedule'])
        self.scheduler.add_job(Host.run_policy,
                               trigger,
                               id=policy.name,
                               name=policy.name,
                               args=[policy, None, None],
                               coalesce=True,
                               max_instances=1,
                               replace_existing=True,
                               misfire_grace_time=60)

    def update_event_subscription(self):
        """
        Create a single event subscription to channel
        all events to an Azure Queue.
        """
        log.info('Updating event grid subscriptions')
        destination = StorageQueueEventSubscriptionDestination(
            resource_id=self.queue_storage_account.id, queue_name=self.event_queue_name)

        # Build event filter
        event_filter = EventSubscriptionFilter(
            included_event_types=['Microsoft.Resources.ResourceWriteSuccess'])

        # Update event subscription
        AzureEventSubscription.create(destination,
                                      self.event_queue_name,
                                      self.session.get_subscription_id(),
                                      self.session, event_filter)

    def poll_queue(self):
        """
        Poll the Azure queue and loop until
        there are no visible messages remaining.
        """
        # Exit if we don't have any policies
        if not self.policies:
            return

        if not self.queue_service:
            self.queue_service = Storage.get_queue_client_by_storage_account(
                self.queue_storage_account,
                self.storage_session)

        while True:
            try:
                messages = Storage.get_queue_messages(
                    self.queue_service,
                    self.event_queue_name,
                    num_messages=queue_message_count,
                    visibility_timeout=queue_timeout_seconds)
            except AzureHttpError:
                self.queue_service = None
                raise

            if len(messages) == 0:
                break

            log.info('Pulled %s events to process while polling queue.' % len(messages))

            for message in messages:
                if message.dequeue_count > max_dequeue_count:
                    Storage.delete_queue_message(self.queue_service,
                                                 self.event_queue_name,
                                                 message=message)
                    log.warning("Event deleted due to reaching maximum retry count.")
                else:
                    # Run matching policies
                    self.run_policies_for_event(message)

                    # We delete events regardless of policy result
                    Storage.delete_queue_message(
                        self.queue_service,
                        self.event_queue_name,
                        message=message)

    def run_policies_for_event(self, message):
        """
        Find all policies subscribed to this event type
        and schedule them for immediate execution.
        """
        # Load up the event
        event = json.loads(base64.b64decode(message.content).decode('utf-8'))
        operation_name = event['data']['operationName']

        # Execute all policies matching the event type
        for k, v in self.policies.items():
            events = v['policy'].data.get('mode', {}).get('events')
            if not events:
                continue
            events = AzureEvents.get_event_operations(events)
            if operation_name.upper() in (event.upper() for event in events):
                self.scheduler.add_job(Host.run_policy,
                                       id=k + event['id'],
                                       name=k,
                                       args=[v['policy'],
                                             event,
                                             None],
                                       misfire_grace_time=60 * 3)

    def prepare_queue_storage(self, queue_resource_id, queue_name):
        """
        Create a storage client using unusual ID/group reference
        as this is what we require for event subscriptions
        """

        storage_client = self.storage_session \
            .client('azure.mgmt.storage.StorageManagementClient')

        account = storage_client.storage_accounts.get_properties(
            ResourceIdParser.get_resource_group(queue_resource_id),
            ResourceIdParser.get_resource_name(queue_resource_id))

        Storage.create_queue_from_storage_account(account,
                                                  queue_name,
                                                  self.session)
        return account

    @staticmethod
    def run_policy(policy, event, context):
        try:
            policy.push(event, context)
        except Exception:
            log.exception("Policy Failed: %s", policy.name)

    @staticmethod
    def build_options(output_dir=None, log_group=None, metrics=None):
        """
        Initialize the Azure provider to apply global config across all policy executions.
        """
        if not output_dir:
            output_dir = tempfile.mkdtemp()
            log.warning('Output directory not specified.  Using directory: %s' % output_dir)

        config = Config.empty(
            **{
                'log_group': log_group,
                'metrics': metrics,
                'output_dir': output_dir
            }
        )

        return Azure().initialize(config)

    @staticmethod
    def get_scheduler_config():
        if os.name == 'nt':
            executor = "apscheduler.executors.pool:ThreadPoolExecutor"
        else:
            executor = "apscheduler.executors.pool:ProcessPoolExecutor"

        return {
            'apscheduler.jobstores.default': {
                'type': 'memory'
            },
            'apscheduler.executors.default': {
                'class': executor,
                'max_workers': '4'
            },
            'apscheduler.executors.threadpool': {
                'type': 'threadpool',
                'max_workers': '20'
            },
            'apscheduler.job_defaults.coalesce': 'true',
            'apscheduler.job_defaults.max_instances': '1',
            'apscheduler.timezone': 'UTC',
        }

    @staticmethod
    def has_yaml_ext(filename):
        return filename.lower().endswith(('.yml', '.yaml'))

    @staticmethod
    @click.command(help="Periodically run a set of policies from an Azure storage container "
                        "against a single subscription. The host will update itself with new "
                        "policies and event subscriptions as they are added.")
    @click.option("--storage-id", "-q", envvar=ENV_CONTAINER_STORAGE_RESOURCE_ID, required=True,
                  help="The resource id of the storage account to create the event queue in")
    @click.option("--queue-name", "-n", envvar=ENV_CONTAINER_QUEUE_NAME,
                  help="The name of the event queue to create")
    @click.option("--policy-uri", "-p", envvar=ENV_CONTAINER_POLICY_URI, required=True,
                  help="The URI to the Azure storage container that holds the policies")
    @click.option("--log-group", "-l", envvar=ENV_CONTAINER_OPTION_LOG_GROUP,
                  help="Location to send policy logs")
    @click.option("--metrics", "-m", envvar=ENV_CONTAINER_OPTION_METRICS,
                  help="The resource name or instrumentation key for uploading metrics")
    @click.option("--output-dir", "-d", envvar=ENV_CONTAINER_OPTION_OUTPUT_DIR,
                  help="The directory for policy output")
    def cli(**kwargs):
        Host(**kwargs)
Exemple #21
0
class JobScheduler(metaclass=Singleton):
    __function_dict__ = {}

    def __init__(self):
        self.logger = Log().logger
        self.scheduler = BlockingScheduler(logger=self.logger)
        self.parser = DateTimeParser()

    def get_lives(self):
        return {job.name: job for job in self.scheduler.get_jobs()}

    def add_job(self,
                name,
                trigger,
                trigger_args,
                func_args=None,
                func_kwargs=None):
        if name not in self.get_lives():
            func = self.get_functions(name)
            self.scheduler.add_job(func,
                                   trigger=trigger,
                                   args=func_args,
                                   kwargs=func_kwargs,
                                   id=name,
                                   name=name,
                                   **trigger_args)
            self.logger.info('[%s][%s]作业已添加,并已启动' % (trigger, name))

    def add_date_job(self, name, time, func_args=None, func_kwargs=None):
        time = self.parser.set_date(time).set_time(time).datetime
        self.add_job(name, 'date', {'run_date': time}, func_args, func_kwargs)

    def add_interval_job(self,
                         name,
                         weeks=0,
                         days=0,
                         hours=0,
                         minutes=0,
                         seconds=0,
                         func_args=None,
                         func_kwargs=None):
        self.add_job(
            name, 'interval', {
                'weeks': weeks,
                'days': days,
                'hours': hours,
                'minutes': minutes,
                'seconds': seconds
            }, func_args, func_kwargs)

    def add_cron_job(self,
                     name,
                     year=None,
                     month=None,
                     day=None,
                     week=None,
                     day_of_week=None,
                     hour=None,
                     minute=None,
                     second=None,
                     func_args=None,
                     func_kwargs=None):
        self.add_job(
            name, 'cron', {
                'year': year,
                'month': month,
                'day': day,
                'week': week,
                'day_of_week': day_of_week,
                'hour': hour,
                'minute': minute,
                'second': second
            }, func_args, func_kwargs)

    def add_started_job(self,
                        name,
                        after_seconds=1,
                        func_args=None,
                        func_kwargs=None):
        time = datetime.now() + timedelta(seconds=after_seconds)
        self.add_date_job(name, time, func_args, func_kwargs)

    def delete_job(self, name):
        jobs = self.get_lives()
        if name in jobs:
            self.scheduler.remove_job(jobs[name].id)
            self.logger.info('作业[%s]已移除' % name)

    @classmethod
    def register(cls, name):
        def add_method(f):
            cls.__function_dict__[name.strip()] = f
            return f

        return add_method

    def get_functions(self, name):
        return self.__function_dict__.get(name.strip())

    def get_function_doc(self, name):
        return self.get_functions(name).__doc__

    def get_function_names(self):
        return sorted(self.__function_dict__.keys())