Exemple #1
0
def main():
    args = get_args()
    conn_config = ConnectionConfig.from_file(args.config)
    kwargs = {
        'concurrency': 2,
        'events': True,
        'hostname': '%h.worker.{}'.format(args.number),
        'loglevel': 'info',
    }

    tasks = Tasks(conn_config.amqp_uri)
    worker(app=tasks.app).run(**kwargs)
Exemple #2
0
def runcelery(config=None):
    _init_app(config)
    # Fix for setuptools generated scripts, so that it will
    # work with multiprocessing fork emulation.
    # (see multiprocessing.forking.get_preparation_data())
    if __name__ != '__main__':  # pragma: no cover
        sys.modules['__main__'] = sys.modules[__name__]
    from billiard import freeze_support

    freeze_support()
    worker(app=celery).run_from_argv('vpnchooser', argv=[
        '-B'
    ])
Exemple #3
0
def main(args):
	if args.reset:
		engine.execute("TRUNCATE checks; TRUNCATE rounds; TRUNCATE celery_taskmeta;")


	round = args.round
	if args.resume:
		lastRound = engine.execute("SELECT MAX(number) FROM rounds").first()
		round = lastRound[0] + 1

	# ScoreEngine will automatically start at
	# round+1, so subtract 1 if we're given a round
	if round > 0:
		round -= 1

	if args.worker:
		celery_app.autodiscover_tasks(['scoring.worker'])

		worker = Worker.worker(app=celery_app)
		worker.run(**config.CELERY["WORKER"])
	else:
		if args.queue:
			from scoring.master2 import Master
		else:
			from scoring.master import Master
		
		master = Master(round=round)
		master.run()
Exemple #4
0
def main():
    usage = "usage: %prog [options] [broker-url]"
    epilog = """\
The worker needs Filetracker server configured. If no FILETRACKER_URL is
present in the environment, a sensible default is generated, using the same
host as the Celery broker uses, with default Filetracker port."""
    parser = OptionParser(usage=usage, epilog=epilog)
    parser.disable_interspersed_args()

    os.environ.setdefault('CELERY_CONFIG_MODULE', 'sio.celery.default_config')
    app = Celery()
    cmd = worker(app)
    for x in cmd.get_options():
        parser.add_option(x)

    options, args = parser.parse_args()

    if len(args) > 1:
        parser.error("Unexpected arguments: " + ' '.join(args[1:]))
    if args:
        broker_url = args[0]
        os.environ['CELERY_BROKER_URL'] = args[0]

    if 'FILETRACKER_URL' not in os.environ:
        default_filetracker_host = None
        if 'CELERY_BROKER_URL' in os.environ:
            default_filetracker_host = \
                    _host_from_url(os.environ['CELERY_BROKER_URL'])
        if not default_filetracker_host:
            default_filetracker_host = '127.0.0.1'
        os.environ['FILETRACKER_URL'] = 'http://%s:%d' \
                % (default_filetracker_host, DEFAULT_FILETRACKER_PORT)

    return cmd.run(**vars(options))
Exemple #5
0
 def test_parse_options(self):
     cmd = worker()
     cmd.app = self.app
     opts, args = cmd.parse_options('worker', ['--concurrency=512',
                                    '--heartbeat-interval=10'])
     self.assertEqual(opts.concurrency, 512)
     self.assertEqual(opts.heartbeat_interval, 10)
Exemple #6
0
 def test_parse_options(self):
     cmd = worker()
     cmd.app = self.app
     opts, args = cmd.parse_options('worker', ['--concurrency=512',
                                    '--heartbeat-interval=10'])
     assert opts['concurrency'] == 512
     assert opts['heartbeat_interval'] == 10
Exemple #7
0
 def start():
     worker = w.worker(app=app.celery)
     worker.run(
         loglevel=app.config['CELERY_LOG_LEVEL'],
         traceback=True,
         pool_cls='eventlet',
     )
Exemple #8
0
def worker(args):
    env = os.environ.copy()
    env["AIRFLOW_HOME"] = settings.AIRFLOW_HOME

    # Celery worker
    from airflow.executors.celery_executor import app as celery_app
    from celery.bin import worker

    worker = worker.worker(app=celery_app)
    options = {"optimization": "fair", "O": "fair", "queues": args.queues, "concurrency": args.concurrency}

    if args.daemon:
        pid, stdout, stderr, log_file = setup_locations("worker", args.pid, args.stdout, args.stderr, args.log_file)
        handle = setup_logging(log_file)
        stdout = open(stdout, "w+")
        stderr = open(stderr, "w+")

        ctx = daemon.DaemonContext(
            pidfile=TimeoutPIDLockFile(pid, -1), files_preserve=[handle], stdout=stdout, stderr=stderr
        )
        with ctx:
            sp = subprocess.Popen(["airflow", "serve_logs"], env=env)
            worker.run(**options)
            sp.kill()

        stdout.close()
        stderr.close()
    else:
        signal.signal(signal.SIGINT, sigint_handler)
        signal.signal(signal.SIGTERM, sigint_handler)

        sp = subprocess.Popen(["airflow", "serve_logs"], env=env)

        worker.run(**options)
        sp.kill()
Exemple #9
0
    def run(self):  # pylint: disable=E0202
        from flask import current_app
        from celery.bin import worker
        from async.celery_helpers import CeleryFactory
        celery = CeleryFactory(current_app).celery
        worker = worker.worker(app=celery)

        worker.run(loglevel=logging.INFO, state_db="async/celery_state", autoreload=True)
Exemple #10
0
def runcelery():
    celery_worker = worker.worker(app=celery)

    options = {
        'loglevel': 'INFO'
    }

    celery_worker.run(**options)
Exemple #11
0
def celery():
    """
    Run celery worker.
    """
    from project.extensions import celery
    from celery.bin import worker
    worker = worker.worker(app=celery)
    worker.run()
Exemple #12
0
 def test_maybe_detach(self):
     x = worker(app=self.app)
     with patch('celery.bin.worker.detached_celeryd') as detached:
         x.maybe_detach([])
         detached.assert_not_called()
         with pytest.raises(SystemExit):
             x.maybe_detach(['--detach'])
         detached.assert_called()
Exemple #13
0
 def test_maybe_detach(self):
     x = worker(app=self.app)
     with patch('celery.bin.worker.detached_celeryd') as detached:
         x.maybe_detach([])
         self.assertFalse(detached.called)
         with self.assertRaises(SystemExit):
             x.maybe_detach(['--detach'])
         self.assertTrue(detached.called)
Exemple #14
0
def runtask(name=None):
    """Run task server"""
    from celery.bin.worker import worker
    from celery.bin.beat import beat

    log_level = app.config.get('CELERY_LOG_LEVEL')

    if name == 'celery':
        worker = worker(app=app.celery)
        worker.run(loglevel=log_level)
    elif name == 'beat':
        beat = beat(app=app.celery)
        beat.run(loglevel=log_level)
    elif name == 'all':
        worker = worker(app=app.celery)
        worker.run(loglevel=log_level, beat=True)
    else:
        print("Usage: python manager.py runtask -n [celery | beat | all]")
Exemple #15
0
def worker(workers):
    """Starts a Superset worker for async SQL query execution."""
    if workers:
        celery_app.conf.update(CELERYD_CONCURRENCY=workers)
    elif config.get("SUPERSET_CELERY_WORKERS"):
        celery_app.conf.update(
            worker_concurrency=config.get("SUPERSET_CELERY_WORKERS"))

    worker = celery_worker.worker(app=celery_app)
    worker.run()
Exemple #16
0
    def test_run_from_argv_basic(self):
        x = worker(app=self.app)
        x.run = Mock()
        x.maybe_detach = Mock()

        def run(*args, **kwargs):
            pass
        x.run = run
        x.run_from_argv('celery', [])
        self.assertTrue(x.maybe_detach.called)
Exemple #17
0
def start_celery_worker():
    from celery import current_app
    from celery.bin import worker

    celery_app = current_app._get_current_object()
    worker = worker.worker(app=celery_app)
    options = {
        'broker': app.config['CELERY_BROKER_URL'],
        'loglevel': 'INFO',
        'traceback': True
    }
    worker.run(**options)
Exemple #18
0
def run():
    try:
        cmd = sys.argv[1]
    except IndexError:
        print("incorrect number of arguments\nUsage: %prog [crawl|schedule] [options] arg")
        sys.exit(1)

    if cmd == "crawl":
        task = worker.worker(app=app)
        task.execute_from_commandline(sys.argv[1:])
    elif cmd == "schedule":
        schedule()
Exemple #19
0
 def handle(self, *args, **options):
     if settings.DEBUG:
         from celery import platforms
         # print('root: %s' % (C_FORCE_ROOT,))
         from celery.bin import worker
         module = importlib.import_module(settings.DJENGA_CELERY_MODULE)
         app = module.app
         platforms.C_FORCE_ROOT = True
         w = worker.worker(app)
         w.run(loglevel='info', concurrency=1, queues=Command.queues())
     else:
         return "Sorry, I shouldn't be run in production mode (DEBUG=False)"
     return 'Done.'
Exemple #20
0
def worker(args):
    # Worker to serve static log files through this simple flask app
    env = os.environ.copy()
    env["AIRFLOW_HOME"] = settings.AIRFLOW_HOME
    sp = subprocess.Popen(["airflow", "serve_logs"], env=env)

    # Celery worker
    from airflow.executors.celery_executor import app as celery_app
    from celery.bin import worker

    worker = worker.worker(app=celery_app)
    options = {"optimization": "fair", "O": "fair", "queues": args.queues, "concurrency": args.concurrency}
    worker.run(**options)
    sp.kill()
Exemple #21
0
def worker():
    """Starts a Superset worker for async SQL query execution."""
    # celery -A tasks worker --loglevel=info
    print("Starting SQL Celery worker.")
    if config.get('CELERY_CONFIG'):
        print("Celery broker url: ")
        print(config.get('CELERY_CONFIG').BROKER_URL)

    application = celery.current_app._get_current_object()
    c_worker = celery_worker.worker(app=application)
    options = {
        'broker': config.get('CELERY_CONFIG').BROKER_URL,
        'loglevel': 'INFO',
        'traceback': True,
    }
    c_worker.run(**options)
Exemple #22
0
    def run_celery(self):

        if self.celery_started:
            return

        application = current_app._get_current_object()

        celery_worker = worker.worker(app=application)

        options = {
            "broker": self.config["CELERY_BROKER_URL"],
            "loglevel": "INFO",
            "traceback": True,
        }

        celery_worker.run(**options)
Exemple #23
0
def worker():
    """Starts a Superset worker for async SQL query execution."""
    # celery -A tasks worker --loglevel=info
    print("Starting SQL Celery worker.")
    if config.get('CELERY_CONFIG'):
        print("Celery broker url: ")
        print(config.get('CELERY_CONFIG').BROKER_URL)

    application = celery.current_app._get_current_object()
    c_worker = celery_worker.worker(app=application)
    options = {
        'broker': config.get('CELERY_CONFIG').BROKER_URL,
        'loglevel': 'INFO',
        'traceback': True,
    }
    c_worker.run(**options)
Exemple #24
0
    def __init__(self, config, task_modules: List[str]):
        config = from_object(config)

        self.broker_url = config['CELERY_BROKER_URL']
        m = re.match(r'((^py)|(^.*\+)|(^))(.+):\/\/', self.broker_url)
        self.broker_type = m.group(5)
        """
        broker_type:
            redis, sqlite, rabbitmq, ...
        """

        app = Flask(__name__)
        app.config.from_object(config)
        db.init_app(app)
        db.create_all(app=app)
        self.db = db
        self.migrate = Migrate(app=app, db=db)
        self.api = Api()
        CORS(app)

        wdir = os.path.abspath(config['TASK_WORKDIR'])

        if not os.path.exists(wdir) or not os.path.isdir(wdir):
            os.makedirs(wdir)
        self.base_directory = wdir

        formatter = logging.Formatter(config['TASK_FILE_FORMAT']
                                      or '%(message)s')
        self.session = create_connection(config['PRE_STATE_DB'])
        custom_task_cls = get_celery_task(wdir, formatter, self.session)

        class ContextTask(custom_task_cls):
            def __call__(_self, *args, **kwargs):
                with app.app_context():
                    return _self.run(*args, **kwargs)

        celery = Celery(backend=config['CELERY_RESULT_BACKEND'],
                        broker=config['CELERY_BROKER_URL'],
                        config_source=config,
                        task_cls=ContextTask)

        self.celery = celery

        self.app = app
        self.register(task_modules)
        self.api.init_app(app)
        self.worker = worker.worker(app=self.celery)
Exemple #25
0
def start_worker():
    from tasks import selftest_task_queue
    from tasks.helpers import create_mq_url
    from celery.bin import worker

    worker = worker.worker(app=selftest_task_queue)
    worker_options = {
        'broker':
        create_mq_url(options.mq_hostname,
                      options.mq_port,
                      username=options.mq_username,
                      password=options.mq_password),
        'loglevel':
        options.mq_loglevel,
        'traceback':
        options.debug,
    }
    worker.run(**worker_options)
Exemple #26
0
def worker(args):
    env = os.environ.copy()
    env['AIRFLOW_HOME'] = settings.AIRFLOW_HOME

    # Celery worker
    from airflow.executors.celery_executor import app as celery_app
    from celery.bin import worker

    worker = worker.worker(app=celery_app)
    options = {
        'optimization': 'fair',
        'O': 'fair',
        'queues': args.queues,
        'concurrency': args.concurrency,
    }

    if args.daemon:
        pid, stdout, stderr, log_file = setup_locations(
            "worker", args.pid, args.stdout, args.stderr, args.log_file)
        handle = setup_logging(log_file)
        stdout = open(stdout, 'w+')
        stderr = open(stderr, 'w+')

        ctx = daemon.DaemonContext(
            pidfile=TimeoutPIDLockFile(pid, -1),
            files_preserve=[handle],
            stdout=stdout,
            stderr=stderr,
        )
        with ctx:
            sp = subprocess.Popen(['airflow', 'serve_logs'], env=env)
            worker.run(**options)
            sp.kill()

        stdout.close()
        stderr.close()
    else:
        signal.signal(signal.SIGINT, sigint_handler)
        signal.signal(signal.SIGTERM, sigint_handler)

        sp = subprocess.Popen(['airflow', 'serve_logs'], env=env)

        worker.run(**options)
        sp.kill()
Exemple #27
0
def worker(args):
    env = os.environ.copy()
    env['AIRFLOW_HOME'] = settings.AIRFLOW_HOME

    # Celery worker
    from airflow.executors.celery_executor import app as celery_app
    from celery.bin import worker

    worker = worker.worker(app=celery_app)
    options = {
        'optimization': 'fair',
        'O': 'fair',
        'queues': args.queues,
        'concurrency': args.concurrency,
    }

    if not args.foreground:
        pid, stdout, stderr, log_file = setup_locations("worker", args.pid, args.stdout, args.stderr, args.log_file)
        handle = setup_logging(log_file)
        stdout = open(stdout, 'w+')
        stderr = open(stderr, 'w+')

        ctx = daemon.DaemonContext(
            pidfile=TimeoutPIDLockFile(pid, -1),
            files_preserve=[handle],
            stdout=stdout,
            stderr=stderr,
        )
        with ctx:
            sp = subprocess.Popen(['airflow', 'serve_logs'], env=env)
            worker.run(**options)
            sp.kill()

        stdout.close()
        stderr.close()
    else:
        signal.signal(signal.SIGINT, sigint_handler)
        signal.signal(signal.SIGTERM, sigint_handler)

        sp = subprocess.Popen(['airflow', 'serve_logs'], env=env)

        worker.run(**options)
        sp.kill()
Exemple #28
0
def worker(concurrency, broker, debug):
    """Run the celery worker."""
    from bel_commons.wsgi import flask_app
    from bel_commons.celery_worker import celery_app

    if celery_app is None:
        click.secho('Celery is not configured', fg='red')
        return sys.exit(1)

    with flask_app.app_context():
        from celery.bin import worker

        pybel_worker = worker.worker(app=celery_app)
        pybel_worker.run(
            broker=broker,
            loglevel=debug,
            traceback=True,
            concurrency=concurrency,
        )
def start_job_manager():
    # Initializing the job manager app
    job_manager_app = init_job_manager()
    job_manager_app.steps['consumer'].add(GossipStepEvent)

    # creating the worker with the job manager app
    job_manager_worker = worker.worker(app=job_manager_app)
    

    # Creating the options
    job_manager_options = {
        "hostname": "job_manager",
        "queues": [job_manager_queue_name],
        "loglevel": "INFO",
        "traceback": True,
    }

    # Launching the worker
    job_manager_worker.run(**job_manager_options)
Exemple #30
0
def worker(args):
    # Worker to serve static log files through this simple flask app
    env = os.environ.copy()
    env['AIRFLOW_HOME'] = settings.AIRFLOW_HOME
    sp = subprocess.Popen(['airflow', 'serve_logs'], env=env)

    # Celery worker
    from airflow.executors.celery_executor import app as celery_app
    from celery.bin import worker

    worker = worker.worker(app=celery_app)
    options = {
        'optimization': 'fair',
        'O': 'fair',
        'queues': args.queues,
        'concurrency': args.concurrency,
    }
    worker.run(**options)
    sp.kill()
Exemple #31
0
def worker(args):
    # Worker to serve static log files through this simple flask app
    env = os.environ.copy()
    env['AIRFLOW_HOME'] = settings.AIRFLOW_HOME
    sp = subprocess.Popen(['airflow', 'serve_logs'], env=env)

    # Celery worker
    from airflow.executors.celery_executor import app as celery_app
    from celery.bin import worker

    worker = worker.worker(app=celery_app)
    options = {
        'optimization': 'fair',
        'O': 'fair',
        'queues': args.queues,
        'concurrency': args.concurrency,
    }
    worker.run(**options)
    sp.kill()
Exemple #32
0
def start_worker():
    application = current_app._get_current_object()
    celery_worker = worker.worker(app=application)
    celery_config = app.config.get("CELERY_CONFIG", None)
    options = {
        'broker':
        celery_config['broker_url']
        if celery_config['broker_url'] is None else '0.0.0.0',
        'loglevel':
        celery_config['loglevel']
        if celery_config['loglevel'] is None else 'INFO',
        'traceback':
        celery_config['traceback']
        if celery_config['traceback'] is None else True,
        'concurrency':
        celery_config['concurrency']
        if celery_config['concurrency'] is None else 2
    }

    celery_worker.run(**options)
Exemple #33
0
def celery(queues, logfile, concurrency, worker_max_tasks_per_child):
    """Starts the celery worker."""
    config = deepcopy(current_config.CELERY_WORKER_CONFIG)

    if queues:
        config.update(queues=queues.split(','))
        logger.info("worker is listening to queues: {}".format(queues))
    else:
        logger.info("worker is listening to ALL queues")

    if logfile:
        config.update(logfile=logfile)
    if concurrency:
        config.update(concurrency=concurrency)
    if worker_max_tasks_per_child:
        config.update(worker_max_tasks_per_child=worker_max_tasks_per_child)

    application = current_celery_app._get_current_object()
    w = worker.worker(app=application)
    w.run(**config)
def main(ctx, test, list_config, enable_messaging, log_minimal, version):
    """
    Use this tool to collect and broadcast data from configured coins
    or/and tokens from configured crypto-currencies exchanges.
    """
    if list_config:
        if list_config == "currencies":
            import pprint

            click.echo(pprint.pprint(s.SYMBOLS_PER_EXCHANGE))
        elif list_config == "exchanges":
            click.echo("\n".join(s.EXCHANGES))
        ctx.exit()

    beat_kwargs = dict(
        enable_messaging=enable_messaging, log_minimal=log_minimal,
    )

    if test:
        ticker.test(**beat_kwargs)
        ctx.exit()

    if version:
        from xtcryptosignals import __title__, __version__

        click.echo("{} {}".format(__title__, __version__))
        ctx.exit()

    from celery import current_app
    from celery.bin import worker

    app = current_app._get_current_object()

    app.config_from_object("xtcryptosignals.tasks.celeryconfig")

    # updates beat config dynamically
    app.conf.beat_schedule["ticker"].update(kwargs=beat_kwargs)

    worker = worker.worker(app=app)
    worker.run(beat=True, loglevel=ticker.logging.INFO)
    def inner(propagate_traces=True, backend="always_eager", **kwargs):
        sentry_init(
            integrations=[CeleryIntegration(propagate_traces=propagate_traces)],
            **kwargs
        )
        celery = Celery(__name__)

        if backend == "always_eager":
            if VERSION < (4,):
                celery.conf.CELERY_ALWAYS_EAGER = True
            else:
                celery.conf.task_always_eager = True
        elif backend == "redis":
            # broken on celery 3
            if VERSION < (4,):
                pytest.skip("Redis backend broken for some reason")

            # this backend requires capture_events_forksafe
            celery.conf.worker_max_tasks_per_child = 1
            celery.conf.worker_concurrency = 1
            celery.conf.broker_url = "redis://127.0.0.1:6379"
            celery.conf.result_backend = "redis://127.0.0.1:6379"
            celery.conf.task_always_eager = False

            Hub.main.bind_client(Hub.current.client)
            request.addfinalizer(lambda: Hub.main.bind_client(None))

            # Once we drop celery 3 we can use the celery_worker fixture
            w = worker.worker(app=celery)
            t = threading.Thread(target=w.run)
            t.daemon = True
            t.start()
        else:
            raise ValueError(backend)

        return celery
Exemple #36
0
def worker(args):
    """start a celery worker"""
    import celery.signals

    @celery.signals.setup_logging.connect
    def on_celery_setup_logging(**kwargs):
        pass

    # from celery import current_app
    from celery.bin import worker
    from minc.tasks import app

    # app = current_app._get_current_object()

    worker = worker.worker(app=app)

    options = {
        "broker": "amqp://*****:*****@localhost:5672//",
        "loglevel": args.loglevel,
        "traceback": True,
        "worker_hijack_root_logger": False,
    }

    worker.run(**options)
Exemple #37
0
def main(ctx, testing, list_config, enable_messaging, log_minimal, version):
    """
    Use this tool to collect and broadcast data from configured coins
    or/and tokens from configured crypto-currencies exchanges.
    """
    if list_config:
        if list_config == 'currencies':
            import pprint
            click.echo(pprint.pprint(s.SYMBOLS_PER_EXCHANGE))
        elif list_config == 'exchanges':
            click.echo('\n'.join(s.EXCHANGES))
        ctx.exit()

    if testing:
        test()
        ctx.exit()

    if version:
        from xtcryptosignals import __title__, __version__
        click.echo('{} {}'.format(__title__, __version__))
        ctx.exit()

    TickerSettings.enable_socket_io = enable_messaging
    TickerSettings.log_minimal = log_minimal

    from celery import current_app
    from celery.bin import worker

    app = current_app._get_current_object()
    app.config_from_object('xtcryptosignals.celeryconfig')

    worker = worker.worker(app=app)
    worker.run(
        beat=True,
        loglevel=logging.INFO,
    )
def test_transport_shutdown(request, celery, capture_events_forksafe, tmpdir):
    events = capture_events_forksafe()

    celery.conf.worker_max_tasks_per_child = 1
    celery.conf.broker_url = "memory://localhost/"
    celery.conf.broker_backend = "memory"
    celery.conf.result_backend = "file://{}".format(
        tmpdir.mkdir("celery-results"))
    celery.conf.task_always_eager = False

    runs = []

    @celery.task(name="dummy_task", bind=True)
    def dummy_task(self):
        runs.append(1)
        1 / 0

    res = dummy_task.delay()

    w = worker.worker(app=celery)
    t = threading.Thread(target=w.run)
    t.daemon = True
    t.start()

    with pytest.raises(Exception):
        # Celery 4.1 raises a gibberish exception
        res.wait()

    event = events.read_event()
    exception, = event["exception"]["values"]
    assert exception["type"] == "ZeroDivisionError"

    events.read_flush()

    # if this is nonempty, the worker never really forked
    assert not runs
Exemple #39
0
def main():
    usage = "usage: %prog [options] [broker-url]"
    epilog = """\
The worker needs Filetracker server configured. If no FILETRACKER_URL is
present in the environment, a sensible default is generated, using the same
host as the Celery broker uses, with default Filetracker port."""
    parser = OptionParser(usage=usage, epilog=epilog)
    parser.disable_interspersed_args()

    os.environ.setdefault('CELERY_CONFIG_MODULE', 'sio.celery.default_config')
    app = Celery()
    cmd = worker(app)
    for x in cmd.get_options():
        parser.add_option(x)

    options, args = parser.parse_args()

    if len(args) > 1:
        parser.error("Unexpected arguments: " + ' '.join(args[1:]))
    if args:
        broker_url = args[0]
        os.environ['CELERY_BROKER_URL'] = args[0]

    return cmd.run(**vars(options))
Exemple #40
0
 def test_no_loglevel(self):
     self.app.Worker = Mock()
     worker(app=self.app).run(loglevel=None)
Exemple #41
0
 def test_invalid_loglevel_gives_error(self):
     x = worker(app=self.app)
     with self.assertRaises(SystemExit):
         x.run(loglevel='GRIM_REAPER')
def run_crawler():
    worker_celery = worker.worker(app=app)
    option = {'loglevel': 'INFO'}
    worker_celery.run(**option)
def worker_start():
    from celery.bin import worker as celery_worker
    worker = celery_worker.worker(app=cel_app)
    worker.run(concurrency=4, traceback=False, loglevel='INFO', P="eventlet")
                              meta={'msg': 'Uploading all files to aws...'})
            item = store.save(item, data)
            item['paid'] = 2
            self.update_state(state='PROGRESS',
                              meta={'msg': 'Saving into database...'})
            g = sync(item)
            if g:
                g.join()
                store.redis.hset('app_record', appid, item['version_code'])
        else:
            self.update_state(state='PROGRESS',
                              meta={'msg': 'This app has been up to date...'})
    except socket.error, e:
        self.update_state(state='PROGRESS',
                          meta={'msg': 'Have some error happened...'})
        self.retry(exc=e)

    return item['appid']


if __name__ == "__main__":
    from celery.bin import worker

    worker = worker.worker(app=c)
    options = {
        'concurrency': 4,
        'loglevel': 'INFO',
        'traceback': True,
    }
    worker.run(**options)
 def test_no_loglevel(self):
     self.app.Worker = Mock()
     worker(app=self.app).run(loglevel=None)
Exemple #46
0
 def test_unknown_loglevel(self):
     with self.assertRaises(SystemExit):
         worker(app=self.app).run(loglevel='ALIEN')
     worker1 = self.Worker(app=self.app, loglevel=0xFFFF)
     self.assertEqual(worker1.loglevel, 0xFFFF)
Exemple #47
0
"""

Start the celery daemon from the Django management command.

"""
from __future__ import absolute_import, unicode_literals

from celery.bin import worker

from djcelery.app import app
from djcelery.management.base import CeleryCommand

worker = worker.worker(app=app)


class Command(CeleryCommand):
    """Run the celery daemon."""
    help = 'Old alias to the "celery worker" command.'
    requires_model_validation = True
    options = (CeleryCommand.options + worker.get_options() +
               worker.preload_options)

    def handle(self, *args, **options):
        worker.run(*args, **options)
Exemple #48
0
from webapp import app, celeryconfig
from celery import current_app
from celery.bin import worker

application = current_app._get_current_object()

worker = worker.worker(app=application)

options = {
    'broker': app.config['CELERY_BROKER_URL'],
    'loglevel': 'INFO',
    'traceback': True,
}

#app.run(host='0.0.0.0', port=80, debug=True)
worker.run(**options)

Exemple #49
0
 def test_windows_B_option(self):
     self.app.IS_WINDOWS = True
     with self.assertRaises(SystemExit):
         worker(app=self.app).run(beat=True)
 def test_unknown_loglevel(self):
     with mock.stdouts():
         with pytest.raises(SystemExit):
             worker(app=self.app).run(loglevel='ALIEN')
         worker1 = self.Worker(app=self.app, loglevel=0xFFFF)
         assert worker1.loglevel == 0xFFFF
Exemple #51
0
 def test_windows_B_option(self):
     self.app.IS_WINDOWS = True
     with self.assertRaises(SystemExit):
         worker(app=self.app).run(beat=True)
Exemple #52
0
 def test_unknown_loglevel(self):
     with self.assertRaises(SystemExit):
         worker(app=self.app).run(loglevel='ALIEN')
     worker1 = self.Worker(app=self.app, loglevel=0xFFFF)
     self.assertEqual(worker1.loglevel, 0xFFFF)
 def test_invalid_loglevel_gives_error(self):
     with mock.stdouts():
         x = worker(app=self.app)
         with pytest.raises(SystemExit):
             x.run(loglevel='GRIM_REAPER')
 def test_windows_B_option(self):
     with mock.stdouts():
         self.app.IS_WINDOWS = True
         with pytest.raises(SystemExit):
             worker(app=self.app).run(beat=True)
Exemple #55
0
 def test_invalid_loglevel_gives_error(self):
     x = worker(app=self.app)
     with self.assertRaises(SystemExit):
         x.run(loglevel='GRIM_REAPER')
Exemple #56
0
 def test_setup_concurrency_very_early(self):
     x = worker()
     x.run = Mock()
     with self.assertRaises(ImportError):
         x.execute_from_commandline(['worker', '-P', 'xyzybox'])
 def test_setup_concurrency_very_early(self):
     x = worker()
     x.run = Mock()
     with pytest.raises(ImportError):
         x.execute_from_commandline(['worker', '-P', 'xyzybox'])
def worker(args):
    """Starts Airflow Celery worker"""
    if not settings.validate_session():
        raise SystemExit(
            "Worker exiting, database connection precheck failed.")

    autoscale = args.autoscale
    skip_serve_logs = args.skip_serve_logs

    if autoscale is None and conf.has_option("celery", "worker_autoscale"):
        autoscale = conf.get("celery", "worker_autoscale")

    # Setup locations
    pid_file_path, stdout, stderr, log_file = setup_locations(
        process=WORKER_PROCESS_NAME,
        pid=args.pid,
        stdout=args.stdout,
        stderr=args.stderr,
        log=args.log_file,
    )

    if hasattr(celery_app.backend, 'ResultSession'):
        # Pre-create the database tables now, otherwise SQLA via Celery has a
        # race condition where one of the subprocesses can die with "Table
        # already exists" error, because SQLA checks for which tables exist,
        # then issues a CREATE TABLE, rather than doing CREATE TABLE IF NOT
        # EXISTS
        try:
            session = celery_app.backend.ResultSession()
            session.close()
        except sqlalchemy.exc.IntegrityError:
            # At least on postgres, trying to create a table that already exist
            # gives a unique constraint violation or the
            # "pg_type_typname_nsp_index" table. If this happens we can ignore
            # it, we raced to create the tables and lost.
            pass

    # Setup Celery worker
    worker_instance = worker_bin.worker(app=celery_app)
    options = {
        'optimization': 'fair',
        'O': 'fair',
        'queues': args.queues,
        'concurrency': args.concurrency,
        'autoscale': autoscale,
        'hostname': args.celery_hostname,
        'loglevel': conf.get('logging', 'LOGGING_LEVEL'),
        'pidfile': pid_file_path,
        'without_mingle': args.without_mingle,
        'without_gossip': args.without_gossip,
    }

    if conf.has_option("celery", "pool"):
        pool = conf.get("celery", "pool")
        options["pool"] = pool
        # Celery pools of type eventlet and gevent use greenlets, which
        # requires monkey patching the app:
        # https://eventlet.net/doc/patching.html#monkey-patch
        # Otherwise task instances hang on the workers and are never
        # executed.
        maybe_patch_concurrency(['-P', pool])

    if args.daemon:
        # Run Celery worker as daemon
        handle = setup_logging(log_file)

        with open(stdout, 'w+') as stdout_handle, open(stderr,
                                                       'w+') as stderr_handle:
            if args.umask:
                umask = args.umask

            ctx = daemon.DaemonContext(
                files_preserve=[handle],
                umask=int(umask, 8),
                stdout=stdout_handle,
                stderr=stderr_handle,
            )
            with ctx:
                sub_proc = _serve_logs(skip_serve_logs)
                worker_instance.run(**options)
    else:
        # Run Celery worker in the same process
        sub_proc = _serve_logs(skip_serve_logs)
        worker_instance.run(**options)

    if sub_proc:
        sub_proc.terminate()
        task_default_exchange='job_exchange',
        task_default_routing_key='job_routing_key',
    )
    return job_app


job_app = init()
node_id = "no_id"
#def start(container):
if __name__ == '__main__':
    print(" ----------- I'm starting the Job Worker for the container ")
    node_id = sys.argv[1]
    container = ast.literal_eval(sys.argv[2])
    log_file = "./log/" + container['hostname'] + ".log"
    with open(log_file, "a") as myfile:
        myfile.write("=====================================================\n")
        myfile.write(str(container) + "\n")
        myfile.write("=====================================================\n")
    worker_id = node_id + "##" + container['service_name'] + "##" + container[
        'id_long']

    job_app = init()
    job_worker = worker.worker(app=job_app)
    job_options = {
        'hostname': worker_id,
        'queues': [job_queue_name],
        'loglevel': 'INFO',
        'traceback': True,
    }
    job_worker.run(**job_options)