def main_process(my_id):
    config_params = ConfigReader().parse_vars(
        ["RECV_QUEUE",
        "SEND_QUEUE",
        "MASTER_SEND_QUEUE",
        "SEND_REQUEST_QUEUE",
        "STATUS_QUEUE",
        "DATA_CLUSTER_WRITE",
        "DATA_CLUSTER_READ"
        ]
    )

    worker = Worker(
        config_params["RECV_QUEUE"],
        [config_params["SEND_QUEUE"]],
        config_params["MASTER_SEND_QUEUE"],
        config_params["SEND_REQUEST_QUEUE"],
        config_params["STATUS_QUEUE"],
        config_params["DATA_CLUSTER_WRITE"],
        config_params["DATA_CLUSTER_READ"],
        my_id
    )
    
    while True:
        worker.start()
示例#2
0
def test_celery():
    # g1 = group(add.si(2, 3))
    # g2 = group([add.si(4, 4)])
    # s1 = chord(g1, dummy.si())
    # s2 = chord(g2, dummy.si())
    # # func = chain([g1, g2]) | final_callback.s()
    # func = s1 | s2 | final_callback.s()
    # res = func()
    # print(res)

    data = [
        '1',
        '2',
        '3',
        '4',
    ] * 1
    info = {
        'celery_worker': 'test.functional.test_celery_group_little.simple',
        'worker': 'test.functional.test_celery_group_little.worker_do_sth',
        'celery_max_workers': 1,
        'celery_chunk_size': 2,
        'chunk_size': 2,
        'queue': 'a1',
        'dummy': 'test.functional.test_celery.dummy',
    }
    # resp = work(data=data,

    worker = Worker(mode='celery')
    resp = worker.work(data, info)
    return resp
示例#3
0
def main():
    config_params = ConfigReader().parse_vars(
        ["RECV_QUEUE", "SEND_QUEUE", "MASTER_SEND_QUEUE"])

    worker = Worker(config_params["RECV_QUEUE"], [config_params["SEND_QUEUE"]],
                    config_params["MASTER_SEND_QUEUE"])

    worker.start()
示例#4
0
def test(self, data, info):
    print(data, '1111')
    print(info, '1111')
    sub_info = deepcopy(info)
    sub_info['chunk_size'] = info['sub_chunk_size']
    sub_info['worker'] = info['sub_worker']

    worker = Worker(mode='thread')
    resp = worker.work(data, sub_info)
def main_process():
    config_params = ConfigReader().parse_vars([
        "RECV_QUEUE", "SEND_QUEUE", "STATUS_QUEUE", "DATA_CLUSTER_WRITE",
        "DATA_CLUSTER_READ"
    ])

    worker = Worker(config_params["RECV_QUEUE"], config_params["SEND_QUEUE"],
                    config_params["STATUS_QUEUE"],
                    config_params["DATA_CLUSTER_WRITE"],
                    config_params["DATA_CLUSTER_READ"])

    worker.start()
示例#6
0
def xtest_it():
    print('>>>>')
    data = [
               'u11', 'u22', 'u33', 'u44',
               'u21', 'u22', 'u23', 'u24',
               'u31', 'u32', 'u33', 'u34',
               'u41', 'u42', 'u43', 'u44',
               'u51', 'u52', 'u53', 'u54',
           ] * 2
    info = {
        'worker': 'test.functional.test_worker_coroutine.worker_do_sth',
        'chunk_size': 4,
    }
    worker = Worker(mode='coroutine')
    resp = worker.work(data, info)
    print(resp)
def create_app():
    log = get_logger(__name__)
    log.info('Setting up worker...')
    WORKER_ID = os.getenv('WORKER_ID')
    MAIN_SERVER_URL = os.getenv('MAIN_SERVER_URL')
    WORKER_HOST = os.getenv('WORKER_HOST')
    WORKER_PORT = os.getenv('WORKER_PORT')
    log.info("""
        Parameters:
            WORKER_ID: {}
            MAIN_SERVER_URL: {},
            WORKER_HOST: {},
            WORKER_PORT: {}
    """.format(WORKER_ID, MAIN_SERVER_URL, WORKER_HOST, WORKER_PORT))
    worker = Worker(WORKER_ID, WORKER_HOST, WORKER_PORT, LabsRepository(),
                    LabTemplatesRepository())

    log.info("Setting up scheduled jobs")
    setup_scheduler(worker)

    log.info("Setting up api")
    app = Flask('VMs worker')
    api = Api(app)

    register_endpoints(api, worker)

    return app
示例#8
0
def work():
    customer_queue = queue.Queue()
    done = event.Event()
    customers = []
    workers = []
    jobs = []

    for i in range(CONFIG["workers"]):
        worker = Worker()
        workers.append(worker)

    queue_stats = QueueStats(workers, customers)

    for i in range(CONFIG["workers"]):
        w = gevent.spawn(workers[i].execute_job, customer_queue, done,
                         queue_stats)
        jobs.append(w)

    customer_job = gevent.spawn(insert_new_customer_to_queue, customer_queue,
                                customers, done)
    jobs.append(customer_job)

    gevent.joinall(jobs)

    queue_stats.report()
示例#9
0
    def _setup_workers(self, num_workers):
        workers = []
        for i in range(0, self.num_workers):
            workers.append(
                Worker(
                    module_name=self.module_name,
                    module_args=self.module_args
                )
            )

        return workers
示例#10
0
文件: netboy.py 项目: pingf/netboy2
    def run_remote(self, url, data, callback_data=None):
        self.worker = Worker(mode=self.info.get('mode', 'thread'))
        triggers = self.info.get('triggers')
        trigger_payload = {'trigger': 'netboy.support.triggers.post_it'}
        if callback_data:
            trigger_payload.update(callback_data)
            if triggers:
                self.info['triggers'].append(trigger_payload)
            else:
                self.info['triggers'] = [trigger_payload]

        payload = {
            'url': url,
            'method': 'post',
            'postfields': {
                'info': copy(self.info),
                'data': data
            }
        }

        resp = curl_work(payload, logger='netboy')
        return resp
示例#11
0
def xtest_celery():
    # g1 = group(add.si(2, 3))
    # g2 = group([add.si(4, 4)])
    # s1 = chord(g1, dummy.si())
    # s2 = chord(g2, dummy.si())
    # # func = chain([g1, g2]) | final_callback.s()
    # func = s1 | s2 | final_callback.s()
    # res = func()
    # print(res)




    data = [
        'u11', 'u22', 'u33', 'u44', 'u21', 'u22', 'u23', 'u24',
        'u31', 'u32',
        'u33', 'u34',
        'u41', 'u42', 'u43', 'u44', 'u51', 'u52', 'u53', 'u54',
    ]*1
    info = {
        'celery_worker': 'test.functional.test_celery.simple',
        'worker': 'test.functional.test_celery.worker_do_sth',
        'celery_max_workers': 1,
        'celery_chunk_size': 40,
        'chunk_size': 40,
        # 'final_callback': 'test.functional.test_celery.final_callback_si',
        'dummy': 'test.functional.test_celery.dummy',
        'sync_callback': 'test.functional.test_celery.final_callback',
        'each_callback': 'test.functional.test_celery.callback',
        # 'queue': 'worker'
    }
    # resp = work(data=data,

    worker = Worker(mode='celery')
    resp = worker.work(data, info)
    return resp
def start_labs_job(worker: Worker):
    log = get_logger(__name__)
    log.info('Starting start labs job')
    for lab in worker.labs():
        lab_status = lab.status(include_machines=False)
        status = LabStatus[lab_status.get('status')]
        if status != LabStatus.PREPARING:
            continue
        start_date_str = lab_status['start_date']
        start_date = None
        try:
            start_date = parse_date(start_date_str)
        except:
            log.warning('Lab with id {} has invalid start date'.format(lab.id))
            continue
        if start_date is not None:
            if datetime.datetime.now() > start_date:
                log.info('Starting lab {}'.format(lab.id))
                lab.start()

    log.info('Start labs job ended')
示例#13
0
from worker.settings import NEW_TASK_EVENT
from worker.worker import Worker

WORKER_LIST = [Worker(NEW_TASK_EVENT)]

# map(lambda x: x.start(), WORKER_LIST)
# for x in WORKER_LIST:
#     x.start()
示例#14
0

def work_do_sth(data, info):

    for i in data:
        get_picture(i)


if __name__ == "__main__":
    # a = get_picture()
    # set(a)

    a = time.time()
    data = [1, 2, 3, 4, 5, 6]
    info = {'worker': 'pachong.work_do_sth', 'chunk_size': 2}
    ww = Worker(mode="thread")
    resp = ww.work(data, info)

    # 直接爬取
    # for i in data:
    #     get_picture(i)

    # for i in list(range(1,7)):
    #     th1 = threading.Thread(target=get_picture, args=(i,))
    #     th1.start()

    # th1 = threading.Thread(target=get_picture, args=(1,))
    #
    # th1.start()
    # th2 = threading.Thread(target=get_picture, args=(2,))
    # th2.start()
示例#15
0
文件: netboy.py 项目: pingf/netboy2
class NetBoy:
    def __init__(self, info=None):
        self.info = info if info else {}
        self.info['dummy'] = 'netboy.celery.tasks.dummy'
        self.info['log'] = 'netboy'

    def use_socks5_proxy(self, proxy):
        p = proxy.split(':')
        self.info['proxytype'] = 'socks5'
        self.info['proxy'] = p[0]
        self.info['proxyport'] = int(p[1])
        return self

    def use_http_proxy(self, proxy):
        p = proxy.split(':')
        self.info['proxytype'] = 'http'
        self.info['proxy'] = p[0]
        self.info['proxyport'] = int(p[1])
        return self

    def use_queue(self, queue):
        self.info['queue'] = queue
        return self

    def use_logger(self, log_name):
        self.info['log'] = log_name
        return self

    def use_filter(self, result_filter):
        self.info['filter'] = result_filter
        return self

    def use_prepares(self, prepares):
        self.info['prepares'] = prepares
        return self

    def use_triggers(self, triggers):
        self.info['triggers'] = triggers
        return self

    def use_analysers(self, analysers):
        self.info['analysers'] = analysers
        return self

    def use_auth(self, user, password, group='default'):
        self.info['auth'] = {
            'user': user,
            'password': password,
            'group': group
        }
        return self

    def use_useragent(self, useragent):
        self.info['useragent'] = useragent
        return self

    def use_timeout(self, timeout=None, connect=None, wait=None, script=None):
        if timeout:
            self.info['timeout'] = timeout
        if connect:
            self.info['connecttimeout'] = connect
        if wait:
            self.info['wait'] = wait
        if script:
            self.info['script_timeout'] = script
        return self

    def use_info(self, info):
        self.info = info
        return self

    def use_final(self, final):
        self.info['final'] = final
        return self

    def use_mode(self, mode):
        self.info['mode'] = mode
        spider = self.info.get('spider')
        if spider:
            self.use_spider(spider)
        else:
            if mode == 'coroutine':
                self.use_spider('aiohttp')
        return self

    def use_spider(self, spider='pycurl'):
        self.info['spider'] = spider
        mode = self.info.get('mode', 'thread')
        if mode == 'celery':
            if spider == 'pycurl':
                self.info[
                    'celery_worker'] = 'netboy.celery.tasks.pycurl_worker'
                self.info[
                    'worker'] = 'netboy.celery.tasks.multicurl_worker_do_crawl'
                self.info[
                    'final_callback'] = 'netboy.celery.tasks.final_callback'
            elif spider == 'chrome':
                self.info[
                    'celery_worker'] = 'netboy.celery.tasks.thread_worker'
                self.info[
                    'worker'] = 'netboy.celery.tasks.chrome_worker_do_crawl'
                self.info[
                    'final_callback'] = 'netboy.celery.tasks.final_callback'
        elif mode == 'coroutine' and spider == 'aiohttp':
            self.info[
                'worker'] = 'netboy.aio_http.aiohttp_handler.aiohttp_handler'
        else:
            if spider == 'pycurl':
                self.info[
                    'worker'] = 'netboy.multi_pycurl.multicurl_handler.curl_handler'
            elif spider == 'chrome':
                self.info[
                    'worker'] = 'netboy.selenium_chrome.chrome_driver_handler.chrome_driver_handler'
            elif spider == 'aiohttp':
                self.info[
                    'worker'] = 'netboy.aio_http.aiohttp_handler.aio_http_handler'
        return self

    def use_workers(self, workers=8, chunk_size1=40, chunk_size2=8):
        self.info['celery_max_workers'] = workers
        self.info['max_workers'] = workers
        self.info['celery_chunk_size'] = max(chunk_size1, chunk_size2)
        self.info['chunk_size'] = min(chunk_size1, chunk_size2)
        return self

    def use_logger(self, logger):
        self.info['log'] = logger
        return self

    def use_chrome(self, chrome):
        self.info['chrome'] = chrome
        self.use_spider('chrome')
        return self

    def use_window(self, window):
        self.info['window_size'] = window
        self.use_spider('chrome')
        return self

    def run(self, data):
        self.worker = Worker(mode=self.info.get('mode', 'thread'))
        resp = self.worker.work(data, self.info)
        return resp

    def run_remote(self, url, data, callback_data=None):
        self.worker = Worker(mode=self.info.get('mode', 'thread'))
        triggers = self.info.get('triggers')
        trigger_payload = {'trigger': 'netboy.support.triggers.post_it'}
        if callback_data:
            trigger_payload.update(callback_data)
            if triggers:
                self.info['triggers'].append(trigger_payload)
            else:
                self.info['triggers'] = [trigger_payload]

        payload = {
            'url': url,
            'method': 'post',
            'postfields': {
                'info': copy(self.info),
                'data': data
            }
        }

        resp = curl_work(payload, logger='netboy')
        return resp

    def register_remote(self, url, user, password, group='default'):
        payload = {
            'url': url,
            'method': 'post',
            'postfields': {
                'user': user,
                'password': password,
                'group': group
            }
        }

        resp = curl_work(payload, logger='netboy')
        return resp
示例#16
0
def init_api(api, app):
    api_worker = Worker(app.config.get("APP_CONFIG", {}),
                        overide_logger=app.logger)
    api.workder = api_worker
    pass
示例#17
0
        'netboy.celery.tasks.pycurl_worker',
        'worker':
        'netboy.celery.tasks.multicurl_worker_do_crawl',
        'celery_max_workers':
        4,
        'celery_chunk_size':
        10,
        'chunk_size':
        5,
        'queue':
        'worker',
        'dummy':
        'netboy.celery.tasks.dummy',
        'filter': ['url', 'cookielist'],
        'triggers': [
            {
                'hello': 'world'
            },
            {
                'hello2': 'world2'
            },
            {
                'trigger': 'netboy.support.trigger.trig_it'
            },
        ],
        'analysers': ['netboy.support.analysers.analyse_it']
    }
    worker = Worker(mode='celery')
    resp = worker.work(data, info)
    print(resp)
eel.initJs(width)  # Call a Javascript function


def webServer():
    eel.start('main.html', block=False)  # Start (this blocks and enters loop)
    while True:
        eel.sleep(1.0)  # Use eel.sleep(), not time.sleep()


try:
    _thread.start_new_thread(webServer, ())  # launch the server in parallel
    print("webServer started !")
except:
    print("Error: unable to start thread")

if __name__ == "__main__":
    # We create our senders
    maze = PyramidalMaze(width)
    scout1 = Scout("*****@*****.**", "woweygiowa96")
    engineer1 = Engineer("*****@*****.**", "woweygiowa96")
    worker1 = Worker("*****@*****.**", "woweygiowa96")

    scout1.constructor(maze, eel)
    scout1.start()

    engineer1.constructor(maze, eel)
    engineer1.start()

    worker1.constructor(maze, eel)
    worker1.start()
示例#19
0
def worker(session, redis, wf):
    worker = Worker(redis=redis, workflow=wf, session=session)
    return worker
示例#20
0
def main():
    worker = Worker("./config/")
    worker.startWork()
示例#21
0
from flask import Flask
from flask import render_template
import logging

from utils.config import Config
from utils.common import create_folder, get_folder, init_usign

app = Flask(__name__)

import server.views

config = Config()
create_folder("{}/{}".format(get_folder("downloaddir"), "faillogs"))
if config.get("sign_images"):
    print("sign images")
    init_usign()

if config.get("dev"):
    from worker.worker import Worker
    worker = Worker()
    worker.start()
    #app.debug = True

示例#22
0
def main(config):
    log.init(config.get('worker', 'log_level'))

    w = Worker(config.get('worker', 'server_at'))
    w.run()
示例#23
0
文件: netboy.py 项目: pingf/netboy2
 def run(self, data):
     self.worker = Worker(mode=self.info.get('mode', 'thread'))
     resp = self.worker.work(data, self.info)
     return resp
示例#24
0
 def add_worker(self, worker_id, worker_type):
     self.workers.append(Worker(worker_id, worker_type))
示例#25
0
def main(config):
    log.init(config.get('worker', 'log_level'))

    w = Worker(config.get('worker', 'server_at'))
    w.run()
示例#26
0
    def remove_worker(self, worker_id):
        worker = self.workers.remove(Worker(worker_id, worker_type))

        return worker
示例#27
0
#!/usr/bin/env python3
import signal
import logging

from worker.worker import Worker

logging.basicConfig(level=logging.DEBUG)
w = Worker()
signal.signal(signal.SIGINT, w.destroy)
signal.signal(signal.SIGTERM, w.destroy)
w.run()