Пример #1
0
def process_celery_flower_command(command_line_values):
    """
    :param _CommandLineValues command_line_values:
    """
    celery_app = get_celery_app()

    command_args = ['celery', 'flower', f'--port={command_line_values.port}']
    celery_app.start(command_args)
Пример #2
0
def process_celery_worker_command(command_line_values):
    """
    :param _CommandLineValues command_line_values:
    """
    celery_app = get_celery_app()

    celery_worker_args = [
        'worker',
        '--pool=gevent',
        '--autoscale=100,30',
        # Had an unfortunate occasion of bringing
        # a Redis instance to near-death with large
        # amount of network IO when these were on
        # and many workers are present.
        # Contemplate going "without" on these
        # '--without-heartbeat',
        # '--without-mingle',
        # '--without-gossip',
        # However,
        # - Flower does not work without this
        # - While doing research found a number of bugs filed for obscure "result management"
        #   failures when these are disabled. Because these are not commonly used switches,
        #   majority of the world runs systems with them on, and, as result, majority of the
        #   result store kinks are caught with this stuff enabled. After trying to chase an
        #   explanation for why in random scenarios task.delay().join() never resolves (result
        #   is never returned), not leaving any weirdness on the table. Commenting these switches
        #   out is removing weirdness. (Yeah, I am going superstitious on you there :) )
        # Temporarily ignoring prescribed worker_type values
        # and assigning all possible routing keys to all workers.
        # Notice that we are purposefully still keep two separate queues,
        # even though they are processed by same workers.
        # one of these is effectively "high priority" line.
        # Even if it's same workers working on both,
        # items added to the "high priority" line get to
        # worker sooner because there are very few competitors in that line.
        # The other line may have thousands more tasks.
        '--queues',
        ','.join(
            [pad_with_build_id(routing_key) for routing_key in RoutingKey.ALL])
        # '--queues', pad_with_build_id(command_line_values.worker_type)
    ]
    celery_app.worker_main(celery_worker_args)
Пример #3
0
from common.celeryapp import get_celery_app
from common.enums.entity import Entity
from common.id_tools import generate_universal_id
from common.measurement import Measure
from common.tokens import PlatformTokenManager
from oozer.common.cold_storage.batch_store import NormalStore
from oozer.common.facebook_api import PlatformApiContext, get_default_fields
from oozer.common.helpers import extract_tags_for_celery_fb_task
from oozer.common.job_context import JobContext
from oozer.common.job_scope import JobScope
from oozer.common.sweep_running_flag import sweep_running
from oozer.common.vendor_data import add_vendor_data
from oozer.entities.feedback_entity_task import feedback_entity_task
from oozer.reporting import reported_task

app = get_celery_app()
logger = logging.getLogger(__name__)


@app.task
@Measure.timer(__name__,
               function_name_as_metric=True,
               extract_tags_from_arguments=extract_tags_for_celery_fb_task)
@Measure.counter(__name__,
                 function_name_as_metric=True,
                 count_once=True,
                 extract_tags_from_arguments=extract_tags_for_celery_fb_task)
@reported_task
@sweep_running
def collect_adaccount_task(job_scope: JobScope, _: JobContext):
    logger.info(f'{job_scope} started')