class Engine():
    def __init__(self):
        self.logger = get_logger('Core',
                                 True)  # core moudles share the same logger

        self.Scheduler = Scheduler(self.logger)
        self.Downloader = Downloader(self.logger)
        self.Uploader = Uploader(self.logger)
        self.Monitor = Monitor(self.logger)

    def _do_register(self):
        user = GlobalConfig.Deploy_dict['user']
        password = GlobalConfig.Deploy_dict['password']
        self.logger.info('registering START: %s' % user)
        RegisterSuccess = do_register(user, password, self.logger)
        self.logger.info('registering END: %s' % str(RegisterSuccess))
        return RegisterSuccess

    def start(self):
        if self._do_register():
            self.logger.info('---engine START---')

            self.Scheduler.start_threads()
            self.Monitor.start_threads()
            self.Downloader.start_threads(
            )  # Downloader uses spiders which uses Status, so Monitor should run in front
            self.Uploader.start_threads()

        else:
            self.logger.info('---engine START failed---')

    def stop(self):
        pass
Exemple #2
0
 def __init__(self, data_bus):
     self.endpoint = Endpoint(data_bus)
     self.phs = PhysicsSystem()
     self.em = EntityManager(self.phs)
     self.resources = ResourceStorage()
     self.scheduler = Scheduler()
     self.effects = EffectFactory()
     self.em.load_statics()
    def __init__(self):
        self.logger = get_logger('Core',
                                 True)  # core moudles share the same logger

        self.Scheduler = Scheduler(self.logger)
        self.Downloader = Downloader(self.logger)
        self.Uploader = Uploader(self.logger)
        self.Monitor = Monitor(self.logger)
Exemple #4
0
 def __init__(self, executor=MultiprocessedExecutor):
     """Initializes a Controller object.
     
     Args:
         executor (cls, optional): The executor to use in the controller. Defaults to MultiprocessedExecutor
     """
     self.uid = 'controller'
     self.playbook_store = PlaybookStore()
     self.scheduler = Scheduler()
     self.executor = executor()
Exemple #5
0
 def __init__(self, start_monitor=True):
     self.init()
     self.number_dict = {core.constant.TOTAL_TASK: 0, core.constant.TOTAL_REQUEST: 0,
                         core.constant.TOTAL_RESPONSE: 0}
     self.color = core.constant.COLOR
     self.close = False
     self.loop = asyncio.get_event_loop()
     self.filter = core.bloomFilter.bloomFilterContext.get_filter(settings.PROJECT_NAME)
     self.scheduler = Scheduler(self)
     self.downloader = Downloader(self, settings.DOWNLOADER_WORKER)
     self.save = Save(self, settings.SAVE_WORKER)
     self.monitor = Monitor(self)
     self.start_monitor = start_monitor
 def __init__(self):  #, spider_group, task_gettter):
     # self.spider_group = spider_group
     # self.task_getter = task_gettter
     self.spiders = self._auto_import_cls(SPIDERS, True)
     self.pool = Pool()
     self.pipelines = self._auto_import_cls(PIPELINES)
     self.spider_mids = self._auto_import_cls(SPIDER_MIDDLEWARES)
     #self.downloader_mids = downloader_mids
     self.downloader_mids = self._auto_import_cls(DOWNLOADER_MIDDLEWARES)
     self.scheduler = Scheduler(ROLE, QUEUE_TYPE)
     self.downloader = Downloader()
     # self.spider_mids = spider_mids
     self.spider_mids = self._auto_import_cls(SPIDER_MIDDLEWARES)
     self.is_running = True
     self.total_response = 0
     self.executor = BaseThreadPoolExecutor(max_workers=ASYNC_COUNT)
Exemple #7
0
    def process(self):
        """
        TODO, Handle request 
        1] open to RMS websocket
        2] New waitingQueue and runningQueue and defines maximum size
        3] Launch comsumer thread, then monitor runningQueue
        4] Two thread do their work
            How to use Queue?
            MainThread
            1] new a waitingQueue, like a proceduer, everytime, main thread receives client request, then put it into waitintQueue
            2] Everytime, main thread receives client release, call RMS release resources, then remove resource from runningQueue,
                How to remove resources from runningQueue, need to talk with RMSs
    
            CosumerThread
            2] new a runningQuueu, like a consumer, its has maximum size, 
                when runningQueue reaches its maximun size, consumer thread sleep 1 second, then poll again
                when runningQueue resources -1, consumer thread get a new resource from waiting queue,
                    if this unit is BUSY by RMS, put it to the end of the waiting queue, then loop get next
                    if this unit is Y/N by RMS, put it to the runningQueue + 1, then return it to client
                    Need to talk RMS
        """

        self.wsRms.connect()

        self.scheduler = Scheduler(self)
        self.scheduler.setDaemon(True)
        self.scheduler.start()

        while not self.stop:
            json = self.wsEngine.receive()
            if json == None:
                time.sleep(1)
                continue
            print "------->Receive from lib: %s" %json
            message = Message().restore(json)

            if message.getCmd() == Message.CMD_REGISTER:
                self.waitingQueue.append(message)

            elif message.getCmd() == Message.CMD_RELEASE:
                self.wsRms.release(message.getRes())
                self.runningQueue.remove(message)

        self.scheduler.stop()
Exemple #8
0
    def __init__(self, machine_configs, task_configs, algorithm, event_file):
        self.env = simpy.Environment()
        cluster = Cluster()
        cluster.add_machines(machine_configs)

        task_broker = Episode.broker_cls(self.env, task_configs)

        scheduler = Scheduler(self.env, algorithm)

        self.simulation = Simulation(self.env, cluster, task_broker, scheduler,
                                     event_file)
def start_server(server_address: str):
    """
    启动gRPC服务
    :param server_address: 服务地址ip:port
    :return:
    """

    # 创建一个线程用于执行定时任务
    scheduler = Scheduler([])
    scheduler.start()
    # gRPC 目前只支持concurrent.futures.ThreadPoolExecutor
    # 参考https://grpc.io/grpc/python/grpc.html#create-server
    thread_pool = futures.ThreadPoolExecutor(max_workers=10)
    grpc_server = grpc.server(thread_pool, maximum_concurrent_rpcs=10)
    quote_service_pb2_grpc.add_QuoteServiceServicer_to_server(
        QuoteService(), grpc_server)
    grpc_server.add_insecure_port(server_address)
    grpc_server.start()

    try:
        while True:
            time.sleep(_ONE_DAY_IN_SECONDS)
    # 在进程关闭前关闭所有资源
    except (KeyboardInterrupt, SystemExit):
        # 必须要首先关闭进程池,用join方法阻塞主线程
        _POOL.close()
        _POOL.join()
        print("POOL is closed!")
        # 然后关闭grpc server,等待时间设为20秒,保证请求能正常返回
        grpc_server.stop(20)
        print("grpc server is closed!")
        # 保证所有后台资源都合理的关闭掉
        scheduler.terminate()
        print("Service exits.")
Exemple #10
0
class Handler(object):

    def __init__(self, request):
        self.request = request
        self.wsEngine = request.META.get('wsgi.websocket', None)
        self.wsRms = RmsWsClient(self)

        self.stop = False
        self.waitingQueue = MyQueue()
        self.runningQueue = MyQueue()


    def process(self):
        """
        TODO, Handle request 
        1] open to RMS websocket
        2] New waitingQueue and runningQueue and defines maximum size
        3] Launch comsumer thread, then monitor runningQueue
        4] Two thread do their work
            How to use Queue?
            MainThread
            1] new a waitingQueue, like a proceduer, everytime, main thread receives client request, then put it into waitintQueue
            2] Everytime, main thread receives client release, call RMS release resources, then remove resource from runningQueue,
                How to remove resources from runningQueue, need to talk with RMSs
    
            CosumerThread
            2] new a runningQuueu, like a consumer, its has maximum size, 
                when runningQueue reaches its maximun size, consumer thread sleep 1 second, then poll again
                when runningQueue resources -1, consumer thread get a new resource from waiting queue,
                    if this unit is BUSY by RMS, put it to the end of the waiting queue, then loop get next
                    if this unit is Y/N by RMS, put it to the runningQueue + 1, then return it to client
                    Need to talk RMS
        """

        self.wsRms.connect()

        self.scheduler = Scheduler(self)
        self.scheduler.setDaemon(True)
        self.scheduler.start()

        while not self.stop:
            json = self.wsEngine.receive()
            if json == None:
                time.sleep(1)
                continue
            print "------->Receive from lib: %s" %json
            message = Message().restore(json)

            if message.getCmd() == Message.CMD_REGISTER:
                self.waitingQueue.append(message)

            elif message.getCmd() == Message.CMD_RELEASE:
                self.wsRms.release(message.getRes())
                self.runningQueue.remove(message)

        self.scheduler.stop()

    def stop(self):
        self.stop = True
Exemple #11
0
    def epoch(self, settings_flo):
        """After initialization, execution begins here.

        This is called by the `main` routine in `dia.py`.  It should
        not be necessary for this to be called by any other code.
        """

        # Delay further initialization until the system is fully available:
        self.__wait_until_system_ready()

        print "Core: initial garbage collection of %d objects." % (gc.collect())

        # Allow the core to stand in as the global SettingsBase instance:
        self.set_service("settings_base", self)

        # Load initial settings:
        self.load_settings(self.__settings_filename, settings_flo)
        settings_flo.close()

        try:
            print "Core: post-settings garbage " + \
                   "collection of %d objects." % (gc.collect())
            print "Core: Starting Tracing Manager...", # <- the ',' belongs there
            TracingManager(core_services=self)
            print "Core: Starting Scheduler..."
            Scheduler(core_services=self)
            print "Core: Starting Channel Manager..."
            ChannelManager(core_services=self)
            print "Core: Starting Device Driver Manager..."
            DeviceDriverManager(core_services=self)
            print "Core: Starting Presentation Manager..."
            PresentationManager(core_services=self)
            print "Core: Starting Services Manager..."
            ServiceManager(core_services=self)

            ##### DOCUMENTATION REMINDER: #########################
            # If you add objects as core services to the system,
            # please remember to add them to the get_service docstring.
            #######################################################

        except KeyboardInterrupt:  # pragma: no cover
            print "Core: Caught keyboard interrupt. Stopping..."
            self._shutdown()
            return  # don't print "Core services started."
        except:
            print "Core: Exception during core initialization:"
            traceback.print_exc()
            raise Exception("Fatal exception during initialization.")
        print "Core services started."
Exemple #12
0
 def start(self):
     cherrypy.config['tools.encode.on'] = True
     cherrypy.config['tools.encode.encoding'] = 'utf-8'
     cherrypy.config['tools.sessions.on'] = True
     cherrypy.config['server.environment'] = 'production'
     cherrypy.config['engine.autoreload_on'] = True
     cherrypy.config['engine.autoreload_frequency'] = 5
     
     configfile = get_absolute_path('web.ini')
     cherrypy.config.update(configfile)
     cherrypy.config.update(self.config)
     
     app = cherrypy.tree.mount(root = None, config = configfile)
     app.merge(self.config)
     
     cherrypy.engine.autoreload.files.add(get_absolute_path('core.ini'))
     cherrypy.engine.start()
     log.info('Server started...')
     
     # Start snatch scheduler
     scheduler = Scheduler()
     scheduler.start()
     
     cherrypy.engine.block()
Exemple #13
0
    def test(self):

        p = []
        pp = []
        init_queue()
        init_bloomFilter()
        for i in range(10):
            print('start TB process,%d' % i)
            # p.append(TB.new_process(TB,))
            # pp.start()
            p.append(Scheduler(None))
            p.append(TBB(None))
        for i in p:
            x = multiprocessing.Process(target=i.run)
            pp.append(x)
        for i in pp:
            i.start()
        for i in pp:
            i.join()
Exemple #14
0
def main():
    schedulers = None  # will reference a list of Scheduler instances

    def exit_dtrace(signum, stack):
        """ shuts down all schedulers (running threads) and exits"""
        map(lambda s: s.stop(), schedulers)
        print
        sys.exit()

    arg_parser = get_arg_parser()
    args = arg_parser.parse_args()  # get CLI arguments
    config_logger(args.e, args.d)  # configure Python logging facility
    signal.signal(signal.SIGINT, exit_dtrace)  # register Interrupt signal
    signal.signal(signal.SIGTERM, exit_dtrace)  # register Terminate signal
    signal.signal(signal.SIGHUP, exit_dtrace)  # register Terminal HangUp
    signal.signal(signal.SIGALRM, exit_dtrace)  # register alarm

    try:
        # Instantiate a Scheduler instance for each config file given at CLI.
        schedulers = [Scheduler(f, args.r) for f in args.session_config_file]
    except IOError as err:
        # Failed to open a system config file.
        sys.stderr.write('%s: error: %s- %s\n\n' %
                         (arg_parser.prog, err.strerror, err.filename))
        sys.exit(2)  # exit with error - 2 for CLI syntax errors
    except ValueError as err:
        # Content error in system config file.
        sys.stderr.write('%s: error: %s- %s\n\n' %
                         (arg_parser.prog, err.args[1], err.args[0]))
        sys.exit(1)  # exit with error
    except ImportError as err:
        # Failed to load fault injector file.
        sys.stderr.write('%s: error: %s\n\n' % (arg_parser.prog, err.args[0]))
        sys.exit(1)  # exit with error

    # Scheduler is derived from Thread.  This will start each Thread.
    map(lambda s: s.start(), schedulers)

    # Setup alarm for a fixed duration session if necessary.
    if args.time: signal.alarm(args.time)

    # All FaultSims (threads) are running, main thread now waits for OS signal.
    signal.pause()
Exemple #15
0
 def __init__(self, token: str):
     self.logger = logging.getLogger(self.__class__.__name__)
     self.token = token
     self.updater = Updater(token)
     self.scheduler = Scheduler(self.updater.job_queue)
     self.dispatcher = self.updater.dispatcher
class Core():
    def __init__(self):  #, spider_group, task_gettter):
        # self.spider_group = spider_group
        # self.task_getter = task_gettter
        self.spiders = self._auto_import_cls(SPIDERS, True)
        self.pool = Pool()
        self.pipelines = self._auto_import_cls(PIPELINES)
        self.spider_mids = self._auto_import_cls(SPIDER_MIDDLEWARES)
        #self.downloader_mids = downloader_mids
        self.downloader_mids = self._auto_import_cls(DOWNLOADER_MIDDLEWARES)
        self.scheduler = Scheduler(ROLE, QUEUE_TYPE)
        self.downloader = Downloader()
        # self.spider_mids = spider_mids
        self.spider_mids = self._auto_import_cls(SPIDER_MIDDLEWARES)
        self.is_running = True
        self.total_response = 0
        self.executor = BaseThreadPoolExecutor(max_workers=ASYNC_COUNT)

    def _auto_import_cls(self, path_list=[], is_spider=False):
        if is_spider:
            instances = {}
        else:
            instances = []

        import importlib

        for path in path_list:

            if is_spider:
                module_name = 'crawlers.' + path[:path.rfind(".")]
                class_name = path[path.rfind(".") + 1:]
                result = importlib.import_module(module_name)
                cls = getattr(result, class_name)
                instances[cls.name] = cls()
                print(f'爬虫“{cls.name}”已加载')

            else:
                module_name = path[:path.rfind(".")]
                class_name = path[path.rfind(".") + 1:]
                result = importlib.import_module(module_name)
                cls = getattr(result, class_name)
                instances.append(cls())
                print(f'“{cls.__name__}”已加载')
        return instances

    def _start_engine(self):
        # master只执行 添加请求,所以total_request会自增,
        # 但是不发送请求total_response不会自增
        if ROLE == "master" or ROLE is None:
            # 将Engine的工作分工,分为两部分:
            # 1 处理start_request请求并存如调度器中
            #self._execute_start_requests()
            self.pool.apply_async(self._execute_start_requests)

        while 1:
            time.sleep(0.01)
            li_req = self.scheduler.get_batch_requests(ASYNC_COUNT)
            if not li_req:
                continue
            tasks = [
                self.executor.submit(self._execute_request_return_item, req)
                for req in li_req
            ]
            for fu in as_completed(tasks):
                fu.result()
            if self.scheduler.total_request == self.total_response and self.scheduler.total_request != 0:
                self.is_running = False
                break
        print("Main Thread is over!")

    # def _callback(self, _):
    #     if self.is_running:
    #         self.pool.apply_async(self._execute_request_response_item, callback=self._callback)
    def start(self):
        # 开始时间
        start = datetime.now()
        print("Start time : {}".format(start))
        print("----" * 30)

        self._start_engine()

        # 结束时间
        end = datetime.now()

        print("----" * 30)
        print("End time : {}".format(end))
        # 总计运行时间
        print("Useing time : {}".format((end - start).total_seconds()))

    def _execute_start_requests(self):
        # 将所有爬虫的start_urls里的请求全部放入同一个调度器中
        #[("baidu", baidu_spider), ("douban" : douban_spider)]
        for spider_name, spider in self.spiders.items():
            print(spider_name, spider)
            # 1. 从spider中获取第一批请求,交给调度器
            #request = self.spider.start_requests()
            for request in spider.start_requests():
                # 第一次处理请求时,就添加爬虫名,该爬虫名可以传递到后续提取的请求中
                request.spider_name = spider_name
                # 1.1 将请求交给spider中间件做处理,再返回处理后的请求
                for spider_mid in self.spider_mids:
                    request = spider_mid.process_request(request, spider)

                self.scheduler.add_request(request)

    def _execute_request_response_item(self):
        # 每次while 循环,处理的都是同一个爬虫下的某一个请求
        #while True:
        # 2. 取出调度器的请求,并交给下载器,下载器返回响应交给spider解析
        request = self.scheduler.get_request()

        if not request:
            #break
            return

        # 获取请求对应的爬虫对象
        spider = self.spiders[request.spider_name]

        # 2.1 将调度器中返回的请求交给下载中间件做预处理,并返回处理后的请求
        for downloader_mid in self.downloader_mids:
            request = downloader_mid.process_request(request, spider)

        response = self.downloader.send_request(request)
        # 2.2 将下载器返回的响应交给下载中间件做预处理,并返回处理后的响应
        for downloader_mid in self.downloader_mids:
            response = downloader_mid.process_response(response, spider)
        #  将响应交给爬虫解析
        # parse_func = spider.parse(response)

        #爬虫对象的某个解析方法 parse, parse_page
        #getattr(spider, "parse_page")
        # 动态获取获取爬虫对象的该请求指定的回调函数,并将响应传入回调函数解析

        callback_func = getattr(spider, request.callback)
        parse_func = callback_func(response)

        for item_or_request in parse_func:
            # 3. 判断解析结果,如果是请求继续交给调度器;如果是item数据交给管道
            if isinstance(item_or_request, LRequest):
                item_or_request.spider_name = spider.name

                for spider_mid in self.spider_mids:
                    item_or_request = spider_mid.process_request(
                        item_or_request, spider)

                self.scheduler.add_request(item_or_request)

            elif isinstance(item_or_request, Item):
                for spider_mid in self.spider_mids:
                    item_or_request = spider_mid.process_item(
                        item_or_request, spider)

                for pipeline in self.pipelines:
                    item_or_request = pipeline.process_item(
                        item_or_request, spider)
            else:
                raise Exception("Not support data type : <{}>".format(
                    type(item_or_request)))

        self.total_response += 1

    def _execute_request_return_item(self, request: LRequest):

        if not request:
            return

        spider = self.spiders[request.spider_name]

        for downloader_mid in self.downloader_mids:
            request = downloader_mid.process_request(request, spider)
        try:
            response = self.downloader.send_request(request)
        except Exception as e:
            spider.logger.error(f'链接{request.url}出错:' + str(e))
            return
        for downloader_mid in self.downloader_mids:
            response = downloader_mid.process_response(response, spider)

        callback_func = getattr(spider, request.callback)
        try:
            parse_func = callback_func(response)
            for item_or_request in parse_func:
                if isinstance(item_or_request, LRequest):
                    item_or_request.spider_name = spider.name

                    for spider_mid in self.spider_mids:
                        item_or_request = spider_mid.process_request(
                            item_or_request, spider)

                    self.scheduler.add_request(item_or_request)

                elif isinstance(item_or_request, Item):
                    for spider_mid in self.spider_mids:
                        item_or_request = spider_mid.process_item(
                            item_or_request, spider)

                    for pipeline in self.pipelines:
                        item_or_request = pipeline.process_item(
                            item_or_request, spider)
                else:
                    raise Exception("Not support data type : <{}>".format(
                        type(item_or_request)))
        except Exception as e:
            spider.logger.error(f'解析{request.url}出错:' + str(e) +
                                f'响应码[{response.status_code}]')
            return
        self.total_response += 1
Exemple #17
0
def scheduler():
    return Scheduler(node=Node())
Exemple #18
0
class BasePipe(object):
    fetcher_class = BaseFetcher
    modeller_class = BaseModeller
    publisher_class = BasePublisher

    def __init__(self):
        self.post_interval = 10 * 60
        self.channel_id = None
        self.updater = None
        self.scheduler = None

    def set_up(self, channel_id: str, updater: Updater, **kwargs):
        self.channel_id = channel_id
        self.updater = updater
        self.scheduler = Scheduler(self.updater.job_queue)

    def start_posting_cycle(self):
        self.pre_cycle_hook()
        self.scheduler.run_once(self._fetch, 0)

    def pre_cycle_hook(self):
        pass

    # fetch
    def _fetch(self):
        self.pre_fetch_hook()
        data = self.fetch()
        self._pre_model_filter(data)

    def fetch(self):
        fetcher = self.get_fetcher()
        return fetcher.fetch()

    def get_fetcher(self) -> BaseFetcher:
       return self.fetcher_class()

    def pre_fetch_hook(self):
        pass

    # pre model filter
    def _pre_model_filter(self, data):
        data = self.pre_model_filter(data)
        self._model(data)

    def pre_model_filter(self, data):
        filters = self.get_pre_filters()
        for flr in filters:
            data = flr.filter(data)
        return data

    def get_pre_filters(self) -> List[BaseFilter]:
        return []

    # model
    def _model(self, data):
        posts = self.model(data)
        self._post_model_filter(posts)

    def model(self, data):
        modeller = self.get_modeller()
        return modeller.model(data)

    def get_modeller(self) -> BaseModeller:
        return self.modeller_class()

    # post model filter
    def _post_model_filter(self, posts):
        posts = self.post_model_filter(posts)
        self._publish(posts)

    def post_model_filter(self, posts):
        filters = self.get_post_filters()
        for flr in filters:
            posts = flr.filter(posts)
        return posts

    def get_post_filters(self) -> List[BaseFilter]:
        return []

    # publish
    def _publish(self, posts):
        publisher = self.get_publisher()
        self.scheduler.run_once(self.schedule_posts, 0, None, publisher, posts)

    def get_publisher(self) -> BasePublisher:
        return self.publisher_class(self.channel_id, self.updater)

    def schedule_posts(self, publisher: BasePublisher, posts: list):
        """
        Recursively post item and schedule posting for next one.
        After posting everything schedule new fetch-model-filter-post cycle.
        :param publisher:
        :param posts:
        """
        if posts:
            head, *tail = posts
            publisher.publish(head)
            self.scheduler.run_once(self.schedule_posts, 0, None, publisher, tail)
        else:
            self.scheduler.run_once(self.start_posting_cycle, self.post_interval)
Exemple #19
0
class Controller(object):
    def __init__(self, executor=MultiprocessedExecutor):
        """Initializes a Controller object.
        
        Args:
            executor (cls, optional): The executor to use in the controller. Defaults to MultiprocessedExecutor
        """
        self.uid = 'controller'
        self.playbook_store = PlaybookStore()
        self.scheduler = Scheduler()
        self.executor = executor()

    def initialize_threading(self, worker_environment_setup=None):
        """Initializes threading in the executor
        """
        self.executor.initialize_threading(
            worker_environment_setup=worker_environment_setup)

    def shutdown_pool(self, num_workflows=0):
        """Shuts down the executor

        Args:
            num_workflows (int, optional): Number of workflows to wait to complete before shutting down. Defaults to 0,
                meaning that it will immediately shutdown the pool upon receiving this command.
        """
        self.executor.shutdown_pool(num_workflows=num_workflows)

    def pause_workflow(self, execution_uid):
        """Pauses a workflow.

        Args:
            execution_uid (str): The execution UID of the workflow to pause
        """
        self.executor.pause_workflow(execution_uid)

    def resume_workflow(self, workflow_execution_uid):
        """Resumes a workflow that has been paused.

        Args:
            workflow_execution_uid (str): The randomly-generated hexadecimal key that was returned from
                pause_workflow(). This is needed to resume a workflow for security purposes.

        Returns:
            (bool) True if successful, False otherwise.
        """
        return self.executor.resume_workflow(workflow_execution_uid)

    def load_workflow(self, resource, workflow_name):
        """Loads a workflow from a file.

        Args:
            resource (str): Path to the workflow.
            workflow_name (str): Name of the workflow to load.

        Returns:
            True on success, False otherwise.
        """
        return self.playbook_store.load_workflow(resource, workflow_name)

    def load_playbook(self, resource):
        """Loads playbook from a file.

        Args:
            resource (str): Path to the workflow.
        """
        return self.playbook_store.load_playbook(resource)

    def load_playbooks(self, resource_collection=None):
        """Loads all playbooks from a directory.

        Args:
            resource_collection (str, optional): Path to the directory to load from. Defaults to the
                configuration workflows_path.
        """
        return self.playbook_store.load_playbooks(resource_collection)

    def schedule_workflows(self, task_id, workflow_uids, trigger):
        """Schedules workflows to be run by the scheduler

        Args:
            task_id (str|int): Id of the task to run
            workflow_uids (list[str]): UIDs of the workflows to schedule
            trigger: The type of scheduler trigger to use
        """
        playbook_workflows = self.playbook_store.get_workflows_by_uid(
            workflow_uids)
        schedule_workflows = []
        for playbook_name, workflows in playbook_workflows.items():
            for workflow in workflows:
                schedule_workflows.append(
                    (playbook_name, workflow.name, workflow.uid))
        self.scheduler.schedule_workflows(task_id, self.execute_workflow,
                                          schedule_workflows, trigger)

    def create_workflow(self, playbook_name, workflow_name):
        """Creates a workflow from a workflow template.
        
        Args:
            playbook_name (str): The name of the new playbook. 
            workflow_name (str): The name of the new workflow.

        Returns:
            True on success, False if otherwise.
        """
        return self.playbook_store.create_workflow(playbook_name,
                                                   workflow_name)

    def create_playbook(self, playbook_name, workflows=None):
        """Creates a playbook from a playbook template.

        Args:
            playbook_name (str): The name of the new playbook.
            workflows (list[Workflow], optional): An optional list of Workflows to be associated with this
                Playbook. Defaults to None.
        """
        return self.playbook_store.create_playbook(playbook_name, workflows)

    def remove_workflow(self, playbook_name, workflow_name):
        """Removes a workflow.
        
        Args:
            playbook_name (str): Playbook name under which the workflow is located.
            workflow_name (str): The name of the workflow to remove.
            
        Returns:
            True on success, False otherwise.
        """
        self.playbook_store.remove_workflow(playbook_name, workflow_name)

    def remove_playbook(self, playbook_name):
        """Removes a playbook and all workflows within it.
        
        Args:
            playbook_name (str): The name of the playbook to remove.
            
        Returns:
            True on success, False otherwise.
        """
        self.playbook_store.remove_playbook(playbook_name)

    def get_all_workflows(self, full_representation=False, reader=None):
        """Gets all of the currently loaded workflows.

        Args:
            full_representation (bool, optional): A boolean specifying whether or not to include the JSON representation
                of all the workflows, or just their names. Defaults to False.
            reader (cls): The reader to specify how to display the Workflows. Defaults to None, which will show
                basic JSON representation of the Workflows.
        
        Returns:
            A dict with key being the playbook, mapping to a list of workflow names for each playbook.
        """
        return self.playbook_store.get_all_workflows(full_representation,
                                                     reader=reader)

    def get_all_playbooks(self):
        """Gets a list of all playbooks.
        
        Returns:
            A list containing all currently loaded playbook names.
        """
        return self.playbook_store.get_all_playbooks()

    def is_workflow_registered(self, playbook_name, workflow_name):
        """Checks whether or not a workflow is currently registered in the system.
        
        Args:
            playbook_name (str): Playbook name under which the workflow is located.
            workflow_name (str): The name of the workflow.
            
        Returns:
            True if the workflow is registered, false otherwise.
        """
        return self.playbook_store.is_workflow_registered(
            playbook_name, workflow_name)

    def is_playbook_registered(self, playbook_name):
        """Checks whether or not a playbook is currently registered in the system.
        
        Args:
            playbook_name (str): The name of the playbook.
            
        Returns:
            True if the playbook is registered, false otherwise.
        """
        return self.playbook_store.is_playbook_registered(playbook_name)

    def update_workflow_name(self, old_playbook, old_workflow, new_playbook,
                             new_workflow):
        """Update the name of a workflow.
        
        Args:
            old_playbook (str): Name of the current playbook.
            old_workflow (str): Name of the current workflow.
            new_playbook (str): The new name of the playbook.
            new_workflow (str): The new name of the workflow.
        """
        self.playbook_store.update_workflow_name(old_playbook, old_workflow,
                                                 new_playbook, new_workflow)

    def update_playbook_name(self, old_playbook, new_playbook):
        """Update the name of a playbook.
        
        Args:
            old_playbook (str): Name of the current playbook.
            new_playbook (str): The new name of the playbook.
        """
        self.playbook_store.update_playbook_name(old_playbook, new_playbook)

    def execute_workflow(self,
                         playbook_name,
                         workflow_name,
                         start=None,
                         start_input=None):
        """Executes a workflow.

        Args:
            playbook_name (str): Playbook name under which the workflow is located.
            workflow_name (str): Workflow to execute.
            start (str, optional): The name of the first, or starting step. Defaults to None.
            start_input (dict, optional): The input to the starting step of the workflow. Defaults to None.

        Returns:
            The execution UID if successful, None otherwise.
        """
        if self.playbook_store.is_workflow_registered(playbook_name,
                                                      workflow_name):
            workflow = self.playbook_store.get_workflow(
                playbook_name, workflow_name)
            return self.executor.execute_workflow(workflow, start, start_input)
        else:
            logger.error(
                'Attempted to execute playbook which does not exist in controller'
            )
            return None, 'Attempted to execute playbook which does not exist in controller'

    def get_waiting_workflows(self):
        return self.executor.get_waiting_workflows()

    def get_workflow(self, playbook_name, workflow_name):
        """Get a workflow object.
        
        Args:
            playbook_name (str): Playbook name under which the workflow is located.
            workflow_name (str): The name of the workflow.
            
        Returns:
            The workflow object if found, else None.
        """
        return self.playbook_store.get_workflow(playbook_name, workflow_name)

    def get_all_workflows_by_playbook(self, playbook_name):
        """Get a list of all workflow objects in a playbook.
        
        Args:
            playbook_name: The name of the playbook.
            
        Returns:
            A list of all workflow objects in a playbook.
        """
        return self.playbook_store.get_all_workflows_by_playbook(playbook_name)

    def get_playbook_representation(self, playbook_name, reader=None):
        """Returns the JSON representation of a playbook.

        Args:
            playbook_name: The name of the playbook.
            reader (cls, optional): An optional different way to represent the Playbook. Defaults to None,
                meaning that it will show basic JSON representation.

        Returns:
            The JSON representation of the playbook if the playbook has any workflows under it, else None.
        """
        return self.playbook_store.get_playbook_representation(playbook_name,
                                                               reader=reader)

    def copy_workflow(self, old_playbook_name, new_playbook_name,
                      old_workflow_name, new_workflow_name):
        """Duplicates a workflow into its current playbook, or a different playbook.
        
        Args:
            old_playbook_name (str): Playbook name under which the workflow is located.
            new_playbook_name (str): The new playbook name for the duplicated workflow.
            old_workflow_name (str): The name of the workflow to be copied.
            new_workflow_name (str): The new name of the duplicated workflow.
        """
        self.playbook_store.copy_workflow(old_playbook_name, new_playbook_name,
                                          old_workflow_name, new_workflow_name)

    def copy_playbook(self, old_playbook_name, new_playbook_name):
        """Copies a playbook.
        
        Args:
            old_playbook_name (str): The name of the playbook to be copied.
            new_playbook_name (str): The new name of the duplicated playbook.
        """
        self.playbook_store.copy_playbook(old_playbook_name, new_playbook_name)

    def send_data_to_trigger(self, data_in, workflow_uids, inputs=None):
        """Tries to match the data in against the conditionals of all the triggers registered in the database.

        Args:
            data_in (dict): Data to be used to match against the triggers for a Step awaiting data.
            workflow_uids (list[str]): A list of workflow execution UIDs to send this data to.
            inputs (dict, optional): An optional dict of inputs to update for a Step awaiting data for a trigger.
                Defaults to {}.

        Returns:
            Dictionary of {"status": <status string>}
        """
        inputs = inputs if inputs is not None else {}
        if workflow_uids is not None:
            self.executor.send_data_to_trigger(data_in, workflow_uids, inputs)

    def get_workflow_status(self, execution_uid):
        """Gets the status of an executing workflow

        Args:
            execution_uid (str): Execution UID of the executing workflow

        Returns:
            (int) Status code of the executing workflow
        """
        return self.executor.get_workflow_status(execution_uid)
Exemple #20
0
 def set_up(self, channel_id: str, updater: Updater, **kwargs):
     self.channel_id = channel_id
     self.updater = updater
     self.scheduler = Scheduler(self.updater.job_queue)
Exemple #21
0
class Game:
    def __init__(self, data_bus):
        self.endpoint = Endpoint(data_bus)
        self.phs = PhysicsSystem()
        self.em = EntityManager(self.phs)
        self.resources = ResourceStorage()
        self.scheduler = Scheduler()
        self.effects = EffectFactory()
        self.em.load_statics()

    def exec_step(self, time_delta):
        self.scheduler.exec_all()
        self.phs.exec_next(time_delta)
        self.phs.collision_computer()
        self.em.remove_all_dead()

        if self.em.bot_count < config.BOTS_COUNT:
            self.em.create_ship('bot')

    def get_state(self):
        return {
            'entities': [pl.get_info() for pl in self.em.all()],
            'effects': self.effects.get_effects()
        }

    def run(self):
        last = time.time()
        curr_step_players = {}
        while True:
            curr = time.time()
            delta = float((curr - last))
            last = curr

            curr_step_players, new_players, expire_players = self.endpoint.scan_players(
                curr_step_players)

            for player in new_players:
                self.em.create_ship('player', player.get('player_id'),
                                    player.get('player_name'))

            for player in expire_players:
                self.em.remove_ship(player)

            for pl_id, pl_data in curr_step_players.items():
                player_obj: SpaceShip = self.em.players.get(pl_id)
                if player_obj:
                    pass
                    self.scheduler.add(player_obj, SpaceShip.set_shooting,
                                       pl_data.get('shooting'))
                    self.scheduler.add(player_obj, SpaceShip.set_moving,
                                       pl_data.get('angle', 0),
                                       pl_data.get('direction', 0))
                else:
                    self.em.remove_ship(pl_id)

            self.exec_step(delta)
            self.endpoint.send_data_to_player(self.get_state())

            delay = config.RPS - (time.time() - curr)
            delay = 0 if delay < 0 else delay
            yield delay
Exemple #22
0
def configure_extensions(app):
    from core.scheduler import Scheduler
    app.scheduler = Scheduler()
    app.scheduler.start()
Exemple #23
0
        "course_title": x["@CourseTitle"],
        "course_teacher": x["@Teacher"],
        "course_teacher_email": x["@TeacherEmail"],
        "course_room": x["@RoomName"]
    }


# schedule handling
async def on_schedule_event(**kwargs):
    #if kwargs["type"] in c_sorted_period_listing:  # period listing found!
    print(kwargs)
    if "url" in user_settings["schedule"][kwargs["type"]]:
        ms_open_url(url=user_settings["schedule"][kwargs["type"]]["url"])


s_scheduler = Scheduler()
# this list comprehension should never enter a codebase but...
s_jobs = [
    s_scheduler.object().add_job(on_schedule_event,
                                 "cron",
                                 kwargs={
                                     "schedule": {
                                         **y
                                     },
                                     "type": x
                                 },
                                 **on_first_index(
                                     without_keys(y, {"type", "url"})))
    for x, y in user_settings["schedule"].items()
]
log.debug(f"Added {len(s_jobs)} jobs!")
Exemple #24
0
from core.scheduler import Scheduler
from common.logger_help import Logger

logger = Logger(logger_name="main").get_logger()

if __name__ == "__main__":
    logger.info("==========================start===========================")
    root_url = "https://hot.cnbeta.com/articles/movie/914069.htm"
    save_url = "D:\\CodeSpace\\PycharmProjects\\cnBate\\file.txt"
    Spider = Scheduler(save_url, root_url, 50)
    Spider.run_spider()
    logger.info("==========================end===========================")
Exemple #25
0
class Spider():
    __metaclass__ = abc.ABCMeta

    start_request=[]
    def __init__(self, start_monitor=True):
        self.init()
        self.number_dict = {core.constant.TOTAL_TASK: 0, core.constant.TOTAL_REQUEST: 0,
                            core.constant.TOTAL_RESPONSE: 0}
        self.color = core.constant.COLOR
        self.close = False
        self.loop = asyncio.get_event_loop()
        self.filter = core.bloomFilter.bloomFilterContext.get_filter(settings.PROJECT_NAME)
        self.scheduler = Scheduler(self)
        self.downloader = Downloader(self, settings.DOWNLOADER_WORKER)
        self.save = Save(self, settings.SAVE_WORKER)
        self.monitor = Monitor(self)
        self.start_monitor = start_monitor

    def start_work(self):
        logging.warning(settings.PROJECT_NAME + ' start work')
        self.scheduler.put_task(self.__class__.start_request)
        if self.start_monitor:
            self.monitor_porcess = Process(target=self.monitor.start_work, name="monitor")
            self.monitor_porcess.daemon = True
            self.monitor_porcess.start()

        self.__start_process()

    def update_number(self, key, value):
        self.number_dict[key] += value

    async def check_finish(self):
        while not self.close:
            await asyncio.sleep(5)
            self.close = reduce(lambda x, y: x == y, self.number_dict.values())


    def __start_process(self):
        pass

        try:
            self.loop.run_until_complete(
                asyncio.wait([self.downloader.do_work(), self.scheduler.do_work(), self.save.do_work(),self.check_finish(),]))
        except Exception as excep:
            logging.error(("%s start_work error: " % self.__class__.__name__), excep.args)
        finally:
            self.loop.stop()
            self.loop.run_forever()
            self.loop.close()
            logging.info(settings.PROJECT_NAME + "finish.")
            # monitor

            # self.downloader.process = multiprocessing.Process(target=self.downloader.run)
            # self.downloader.process.start()
            # self.save.process = multiprocessing.Process(target=self.save.run)
            # self.save.process.start()
            # self.scheduler.process = Process(target=self.scheduler.run)
            # self.scheduler.process.start()
            #
            # self.scheduler.process.join()
            # self.downloader.process.join()
            # self.save.process.join()

    def init(self):
        init_log()
        init_queue()
        init_middleware()
        init_bloomFilter()

    # @abc.abstractmethod
    def add_start_url(self) -> Request:
        pass

    def to_save(self, response):
        with open(settings.STORAGE_PATH + '/save_data.log', 'w') as f:
            f.write("\t".join([response.url,str(response.status),json.dumps(response.meta)]) + "\n")
            f.flush()
            logging.info(("save data success.data=%r" % response.meta))

    def parse(self, response):
        pass
Exemple #26
0
class Manager(object):
    def __init__(self, token: str):
        self.logger = logging.getLogger(self.__class__.__name__)
        self.token = token
        self.updater = Updater(token)
        self.scheduler = Scheduler(self.updater.job_queue)
        self.dispatcher = self.updater.dispatcher

    @log
    def activate(self):
        self.set_up_commands()
        self.set_up_strings()
        self.set_up_query()
        self.set_up_conversation()

        self.set_up_scheduler()

    def start_polling(self):
        self.activate()

        self.updater.start_polling()
        # Run the bot until the user presses Ctrl-C
        # or the process receives SIGINT,
        # SIGTERM or SIGABRT
        self.updater.idle()

    def start_webhook(self, webhook_setting):
        self.activate()

        self.updater.start_webhook(**webhook_setting)
        self.updater.idle()

    def set_up_scheduler(self):
        self.scheduler.run_repeating(tasks.ChartCreator.create_charts,
                                     interval=3600,
                                     first=1)

    def set_up_commands(self):
        callback_cmd = {
            'start': general.start,
            'help': about.fund,
            'admin': admin.panel,
            'test': general.test,
            'restart': general.restart,
            'menu': general.menu,
            'author': general.author,
            ('btc_deposit', 'bch_deposit', 'zec_deposit'): personal.deposit,
        }

        for name, callback_func in callback_cmd.items():
            self.dispatcher.add_handler(CommandHandler(name, callback_func), )
        self.dispatcher.add_error_handler(general.error)

    def set_up_strings(self):
        callback_str = {
            '❓  О крипто-фонде': about.fund,
            '📈  Трейдинг': trading.token,
            '💳  Обменник': exchanger.beta,
            '👨🏻‍💻  Личный кабинет': personal.area,
            '⌨️  Главное меню': general.menu
        }
        for name, callback_func in callback_str.items():
            self.dispatcher.add_handler(
                RegexHandler(f'^{name}$', callback_func))

    def set_up_query(self):
        callback_query = {
            # personal area
            'personal_wallet': personal.wallet,
            'personal_wallet_add': personal.wallet_add,
            'personal_wallet_make_\w+': personal.wallet_make,
            'personal_wallet_history': personal.wallet_history,
            'personal_wallet_autopay': personal.wallet_autopay,
            'personal_wallet_autopay_\w+': personal.wallet_autopay_addr,
            'personal_wallet_invoice': personal.wallet_invoice,
            'personal_wallet_back': personal.wallet,
            'personal_referral': personal.referral,
            'personal_portfolio': personal.portfolio,
            'personal_settings': personal.settings,
            'personal_logout': personal.logout,
            'personal_back': personal.back,
            # 'area_profit': personal.profit,

            # about
            'about_bot': about.bot,
            'about_dev': about.development,
            'about_meetup': about.meetup,
            'about_back': about.back,

            # trading
            'buy_sfi': trading.buy_token,
            'sell_tokens': trading.sell_tokens,
            'chart_\w+': trading.chart,
            'btc_payment': trading.payment_method_btc,
            'bch_payment': trading.payment_method_bch,
            'zec_payment': trading.payment_method_zec,
            'buy_token_btc': trading.pay_btc,
            'buy_token_bch': trading.pay_bch,
            'buy_token_zec': trading.pay_zec,
            'trading_back': trading.back

            # tools & plugins
        }

        for name, callback_func in callback_query.items():
            self.dispatcher.add_handler(
                CallbackQueryHandler(callback_func, pattern=f'^{name}$'))

    def set_up_conversation(self):
        # admin dispatch
        self.dispatcher.add_handler(
            # TODO: Здесь не работает "отмена" и эдит
            ConversationHandler(entry_points=[
                CallbackQueryHandler(admin.dispatch, pattern='^admin_dispatch')
            ],
                                states={
                                    TEXT: [
                                        MessageHandler(Filters.text,
                                                       admin.dispatch_text,
                                                       pass_user_data=True)
                                    ],
                                    SEND: [
                                        CallbackQueryHandler(
                                            admin.dispatch_send,
                                            pass_user_data=True,
                                            pattern='^admin_dispatch_send')
                                    ]
                                },
                                fallbacks=[
                                    MessageHandler(Filters.text,
                                                   admin.edit_dispatch_text,
                                                   edited_updates=True,
                                                   pass_user_data=True),
                                    CallbackQueryHandler(admin.back,
                                                         pattern='^back_admin')
                                ]))
        # login
        self.dispatcher.add_handler(
            ConversationHandler(
                entry_points=[RegexHandler(f'^🔑  Вход$', personal.login)],
                states={
                    EMAIL: [
                        MessageHandler(FilterEmail(),
                                       personal.login_email,
                                       pass_user_data=True),
                        MessageHandler(FilterInvalidValue(),
                                       personal.invalid_email)
                    ],
                    PASSWORD: [
                        MessageHandler(FilterPassword(),
                                       personal.login_password,
                                       pass_user_data=True),
                        MessageHandler(FilterInvalidValue(),
                                       personal.invalid_password)
                    ]
                },
                fallbacks=[
                    RegexHandler(f'^🚫  Отмена$',
                                 general.cancel,
                                 pass_user_data=True)
                ]))
        # registration
        self.dispatcher.add_handler(
            ConversationHandler(
                entry_points=[
                    RegexHandler(f'^📝  Регистрация$', personal.registration)
                ],
                states={
                    EMAIL: [
                        MessageHandler(FilterEmail(),
                                       personal.registration_email,
                                       pass_user_data=True),
                        MessageHandler(FilterInvalidValue(),
                                       personal.invalid_email)
                    ],
                    PASSWORD: [
                        MessageHandler(FilterPassword(),
                                       personal.registration_password,
                                       pass_user_data=True),
                        MessageHandler(FilterInvalidValue(),
                                       personal.invalid_password)
                    ]
                },
                fallbacks=[
                    RegexHandler(f'^🚫  Отмена$',
                                 general.cancel,
                                 pass_user_data=True)
                ]))
        # withdrawal
        self.dispatcher.add_handler(
            ConversationHandler(
                entry_points=[
                    CommandHandler(
                        ('btc_withdrawal', 'bch_withdrawal', 'zec_withdrawal'),
                        personal.withdrawal,
                        pass_user_data=True)
                ],
                states={
                    AMOUNT: [
                        MessageHandler(FilterCurrencyValue(),
                                       personal.withdrawal_amount,
                                       pass_user_data=True),
                        MessageHandler(FilterInvalidValue(),
                                       personal.withdrawal_invalid_amount,
                                       pass_user_data=True),
                    ],
                    ADDR: [
                        MessageHandler(FilterAddr(),
                                       personal.withdrawal_addr,
                                       pass_user_data=True),
                        MessageHandler(FilterInvalidValue(),
                                       personal.withdrawal_invalid_addr,
                                       pass_user_data=True)
                    ],
                    OK: [
                        MessageHandler(FilterСonfirmation(),
                                       personal.withdrawal_confirmation,
                                       pass_user_data=True),
                    ]
                },
                fallbacks=[
                    RegexHandler(f'^🚫  Отмена$',
                                 general.cancel,
                                 pass_user_data=True)
                ]))
        # sell token
        self.dispatcher.add_handler(
            ConversationHandler(
                entry_points=[
                    CommandHandler(('sfi_sell', 'sft_sell'),
                                   personal.sell_token,
                                   pass_user_data=True)
                ],
                states={
                    AMOUNT: [
                        MessageHandler(FilterCurrencyValue(),
                                       personal.sell_token_amount,
                                       pass_user_data=True),
                        MessageHandler(FilterInvalidValue(),
                                       personal.token_invalid_amount)
                    ],
                    QUOTED: [
                        MessageHandler(FilterQuotedCurrency(),
                                       personal.sell_token_quoted,
                                       pass_user_data=True),
                        MessageHandler(FilterInvalidValue(),
                                       personal.sell_token_invalid_quoted)
                    ],
                    ADDRESS: [
                        MessageHandler(FilterSellTokenAddress(),
                                       personal.sell_token_address,
                                       pass_user_data=True),
                        MessageHandler(FilterInvalidValue(),
                                       personal.sell_token_invalid_address,
                                       pass_user_data=True)
                    ],
                    CONFIRMATION: [
                        MessageHandler(FilterСonfirmation(),
                                       personal.sell_token_confirmation,
                                       pass_user_data=True),
                    ]
                },
                fallbacks=[
                    RegexHandler(f'^🚫  Отмена$',
                                 general.cancel,
                                 pass_user_data=True)
                ]))
        # buy token
        self.dispatcher.add_handler(
            ConversationHandler(
                entry_points=[
                    CommandHandler(('sfi_buy', 'sft_buy'),
                                   personal.buy_token,
                                   pass_user_data=True)
                ],
                states={
                    BUY_QUOTED: [
                        MessageHandler(FilterQuotedCurrency(),
                                       personal.buy_token_quoted,
                                       pass_user_data=True),
                        MessageHandler(FilterInvalidValue(),
                                       personal.buy_token_invalid_quoted)
                    ],
                    BUY_AMOUNT: [
                        MessageHandler(FilterCurrencyValue(),
                                       personal.buy_token_amount,
                                       pass_user_data=True),
                        MessageHandler(FilterInvalidValue(),
                                       personal.buy_token_invalid_amount)
                    ],
                    BUY_CONFIRMATION: [
                        MessageHandler(FilterСonfirmation(),
                                       personal.buy_token_confirmation,
                                       pass_user_data=True),
                    ]
                },
                fallbacks=[
                    RegexHandler(f'^🚫  Отмена$',
                                 general.cancel,
                                 pass_user_data=True)
                ]))