Beispiel #1
0
# CELERY_ACKS_LATE and CELERYD_PREFETCH_MULTIPLIER settings help evenly
# distribute tasks across the cluster. This configuration is intended
# make worker processes reserve only a single task at any given time.
# (The default settings for prefetching define that each worker process will
# reserve 4 tasks at once. For long running calculations with lots of long,
# heavy tasks, this greedy prefetching is not recommended and can result in
# performance issues with respect to cluster utilization.)
# CELERY_MAX_CACHED_RESULTS disable the cache on the results: this means
# that map_reduce will not leak memory by keeping the intermediate results
CELERY_ACKS_LATE = True
CELERYD_PREFETCH_MULTIPLIER = 1
CELERY_MAX_CACHED_RESULTS = 1

CELERY_ACCEPT_CONTENT = ['pickle', 'json']

CELERY_IMPORTS = get_core_modules(engine) + [
    "openquake.engine.calculators.hazard.general"] + [
    "openquake.calculators.classical",
    "openquake.calculators.classical_risk",
    "openquake.calculators.classical_damage",
    "openquake.calculators.event_based",
    "openquake.calculators.event_based_risk",
    "openquake.calculators.scenario_risk",
    "openquake.calculators.scenario_damage",
    ]

os.environ["DJANGO_SETTINGS_MODULE"] = "openquake.engine.settings"

try:
    from openquake.engine.utils import tasks
    # as a side effect, this import replaces the litetask with oqtask
# BROKER_POOL_LIMIT enables a connections pool so Celery can reuse
# a single connection to RabbitMQ. Value 10 is the default from
# Celery 2.5 where this feature is enabled by default.
# Actually disabled because it's not stable in production.
# See https://bugs.launchpad.net/oq-engine/+bug/1250402
BROKER_POOL_LIMIT = None

CELERY_RESULT_BACKEND = "amqp"

# CELERY_ACKS_LATE and CELERYD_PREFETCH_MULTIPLIER settings help evenly
# distribute tasks across the cluster. This configuration is intended
# make worker processes reserve only a single task at any given time.
# (The default settings for prefetching define that each worker process will
# reserve 4 tasks at once. For long running calculations with lots of long,
# heavy tasks, this greedy prefetching is not recommended and can result in
# performance issues with respect to cluster utilization.)
# CELERY_MAX_CACHED_RESULTS disable the cache on the results: this means
# that map_reduce will not leak memory by keeping the intermediate results
CELERY_ACKS_LATE = True
CELERYD_PREFETCH_MULTIPLIER = 1
CELERY_MAX_CACHED_RESULTS = 1

HAZARD_MODULES = get_core_modules(hazard)

RISK_MODULES = get_core_modules(risk)

CELERY_IMPORTS = HAZARD_MODULES + RISK_MODULES + [
    "openquake.engine.tests.utils.tasks"]

os.environ["DJANGO_SETTINGS_MODULE"] = "openquake.engine.settings"
Beispiel #3
0
BROKER_USER = amqp.get("user")
BROKER_PASSWORD = amqp.get("password")
BROKER_VHOST = amqp.get("vhost")

CELERY_RESULT_BACKEND = "amqp"

# CELERY_ACKS_LATE and CELERYD_PREFETCH_MULTIPLIER settings help evenly
# distribute tasks across the cluster. This configuration is intended
# make worker processes reserve only a single task at any given time.
# (The default settings for prefetching define that each worker process will
# reserve 4 tasks at once. For long running calculations with lots of long,
# heavy tasks, this greedy prefetching is not recommended and can result in
# performance issues with respect to cluster utilization.)
CELERY_ACKS_LATE = True
CELERYD_PREFETCH_MULTIPLIER = 1

HAZARD_MODULES = get_core_modules(hazard)

RISK_MODULES = get_core_modules(risk)

CELERY_IMPORTS = HAZARD_MODULES + RISK_MODULES

try:
    imp.find_module("tasks", [os.path.join(x, "tests/utils")
                              for x in sys.path])
    CELERY_IMPORTS.append("tests.utils.tasks")
except ImportError:
    pass

os.environ["DJANGO_SETTINGS_MODULE"] = "openquake.engine.settings"
Beispiel #4
0
# CELERY_ACKS_LATE and CELERYD_PREFETCH_MULTIPLIER settings help evenly
# distribute tasks across the cluster. This configuration is intended
# make worker processes reserve only a single task at any given time.
# (The default settings for prefetching define that each worker process will
# reserve 4 tasks at once. For long running calculations with lots of long,
# heavy tasks, this greedy prefetching is not recommended and can result in
# performance issues with respect to cluster utilization.)
# CELERY_MAX_CACHED_RESULTS disable the cache on the results: this means
# that map_reduce will not leak memory by keeping the intermediate results
CELERY_ACKS_LATE = True
CELERYD_PREFETCH_MULTIPLIER = 1
CELERY_MAX_CACHED_RESULTS = 1

CELERY_ACCEPT_CONTENT = ['pickle', 'json']

CELERY_IMPORTS = get_core_modules(engine) + [
    "openquake.engine.calculators.hazard.general",
    "openquake.engine.tests.utils.tasks"
] + [
    "openquake.commonlib.calculators.classical",
    "openquake.commonlib.calculators.classical_risk",
    "openquake.commonlib.calculators.classical_damage",
    "openquake.commonlib.calculators.event_based",
    "openquake.commonlib.calculators.event_based_risk",
    "openquake.commonlib.calculators.scenario_risk",
    "openquake.commonlib.calculators.scenario_damage",
]

os.environ["DJANGO_SETTINGS_MODULE"] = "openquake.engine.settings"

try: