Exemplo n.º 1
0
    app.iniconfig.get('redis', 'host'),
    app.iniconfig.get('redis', 'port', fallback=6379),
    app.iniconfig.get('redis', 'db', fallback=0),
    app.iniconfig.get('redis', 'password', fallback=None), False)

assert app.iniconfig.get(
    'default', 'taskfile_dir'), "missing taskfile_dir from settings.ini"
taskfile_dir = app.iniconfig.get('default', 'taskfile_dir')
assert os.path.isdir(
    taskfile_dir), "taskfile_dir (%s) must be a directory" % taskfile_dir

retry = 0
while retry < 10:
    try:
        current_default_config = redis_db.exists("default") \
                                 and redis_db.hget("default", "configuration")
        break
    except (ConnectionError, AssertionError) as e:
        retry += 1
        time.sleep(1)

assert retry < 10, "Cannot connect to redis DB - aborting"

if current_default_config != default_config:
    redis_db.hset("default", "configuration", default_config)
    redis_db.hset("default", "timestamp", time.time())


def append_version(v):
    global VERSION
    VERSION += ":" + v
Exemplo n.º 2
0
for service in services:

    # remove busy state from resources
    for key in redis.keys('busy:%s:*' % service):
        redis.delete(key)
    # remove reserved state from resources
    for key in redis.keys('reserved:%s:*' % service):
        redis.delete(key)
    # remove queued tasks on service
    for key in redis.keys('queued:%s' % service):
        redis.delete(key)

    # On startup, add all active tasks in the work queue or service queue
    for task_id in task.list_active(redis, service):
        with redis.acquire_lock(task_id):
            status = redis.hget('task:'+task_id, 'status')
            if status == 'queued' or status == 'allocating' or status == 'allocated':
                task.service_queue(redis, task_id, redis.hget('task:'+task_id, 'service'))
                task.set_status(redis, 'task:'+task_id, 'queued')
            else:
                task.work_queue(redis, task_id, service)
        # check integrity of tasks
        if redis.hget(task_id, 'priority') is None:
            redis.hset(task_id, 'priority', 0)
        if redis.hget(task_id, 'queued_time') is None:
            redis.hset(task_id, 'queued_time', time.time())

    # Desallocate all resources that are not anymore associated to a running task
    resources = services[service].list_resources()

    for resource in resources:
Exemplo n.º 3
0
keys = 'admin:service:%s' % service
redis.hset(keys, "current_configuration", current_configuration)
redis.hset(keys, "configurations", json.dumps(configurations))
redis.hset(keys, "def", pickle.dumps(services[service]))

# remove reserved state from resources
for key in redis.keys('reserved:%s:*' % service):
    redis.delete(key)
# remove queued tasks on service
for key in redis.keys('queued:%s' % service):
    redis.delete(key)

# On startup, add all active tasks in the work queue or service queue
for task_id in task.list_active(redis, service):
    with redis.acquire_lock(task_id):
        status = redis.hget('task:' + task_id, 'status')
        if status == 'queued' or status == 'allocating' or status == 'allocated':
            task.service_queue(redis, task_id,
                               redis.hget('task:' + task_id, 'service'))
            task.set_status(redis, 'task:' + task_id, 'queued')
        else:
            task.work_queue(redis, task_id, service)
    # check integrity of tasks
    if redis.hget('task:' + task_id, 'priority') is None:
        redis.hset('task:' + task_id, 'priority', 0)
    if redis.hget('task:' + task_id, 'queued_time') is None:
        redis.hset('task:' + task_id, 'queued_time', time.time())

# Desallocate all resources that are not anymore associated to a running task
resources = services[service].list_resources()
servers = services[service].list_servers()
Exemplo n.º 4
0
while retry < 10:
    try:
        # make sure notify events are set
        redis.config_set('notify-keyspace-events', 'Klgx')
        break
    except ConnectionError as e:
        retry += 1
        logger.warn("cannot connect to redis DB - retrying (%d)" % retry)
        time.sleep(1)

assert retry < 10, "Cannot connect to redis DB - aborting"

# load default configuration from database
retry = 0
while retry < 10:
    default_config = redis.hget('default', 'configuration')
    default_config_timestamp = redis.hget('default', 'timestamp')
    if default_config:
        break
    time.sleep(5)

assert retry < 10, "Cannot retrieve default config from redis DB - aborting"

base_config = json.loads(default_config)

services = config.load_service_config(args.config, base_config)
assert len(services) == 1, "workers are now dedicated to one single service"
service = next(iter(services))

current_configuration = None
configurations = {}
Exemplo n.º 5
0
while retry < 10:
    try:
        # make sure notify events are set
        redis.config_set('notify-keyspace-events', 'Klgx')
        break
    except ConnectionError as e:
        retry += 1
        logger.warning("cannot connect to redis DB - retrying (%d)", retry)
        time.sleep(1)

assert retry < 10, "Cannot connect to redis DB - aborting"

# load default configuration from database
retry = 0
while retry < 10:
    default_config = redis.hget('default', 'configuration')
    default_config_timestamp = redis.hget('default', 'timestamp')
    if default_config:
        break
    time.sleep(5)

assert retry < 10, "Cannot retrieve default config from redis DB - aborting"

base_config = json.loads(default_config)

services, merged_config = config.load_service_config(args.config, base_config)
assert len(services) == 1, "workers are now dedicated to one single service"
service = next(iter(services))

current_configuration = None
configurations = {}
Exemplo n.º 6
0
app.logger.setLevel(logging.getLevelName(
                    app.iniconfig.get('default', 'log_level', fallback='ERROR')))

redis = RedisDatabase(app.iniconfig.get('redis', 'host'),
                      app.iniconfig.get('redis', 'port', fallback=6379),
                      app.iniconfig.get('redis', 'db', fallback=0),
                      app.iniconfig.get('redis', 'password', fallback=None))

assert app.iniconfig.get('default', 'taskfile_dir'), "missing taskfile_dir from settings.ini"
taskfile_dir = app.iniconfig.get('default', 'taskfile_dir')
assert os.path.isdir(taskfile_dir), "taskfile_dir (%s) must be a directory" % taskfile_dir

retry = 0
while retry < 10:
    try:
        current_default_config = redis.exists("default") and redis.hget("default", "configuration")
        break
    except (ConnectionError, AssertionError) as e:
        retry += 1
        time.sleep(1)

assert retry < 10, "Cannot connect to redis DB - aborting"

if current_default_config != default_config:
    redis.hset("default", "configuration", default_config)
    redis.hset("default", "timestamp", time.time())


def append_version(v):
    global VERSION
    VERSION += ":" + v
Exemplo n.º 7
0
# make sure notify events are set
redis.config_set('notify-keyspace-events', 'Klgx')

services, base_config = config.load_services(cfg.get('default', 'config_dir'))

# remove busy state from resources
for key in redis.keys('busy:*'):
    redis.delete(key)
# remove reserved state from resources
for key in redis.keys('reserved:*'):
    redis.delete(key)

# On startup, add all active tasks in the work queue.
for task_id in task.list_active(redis):
    with redis.acquire_lock(task_id):
        status = redis.hget('task:' + task_id, 'status')
        if status == 'queue' or status == 'allocating' or status == 'allocated':
            task.service_queue(redis, task_id,
                               redis.hget('task:' + task_id, 'service'))
            task.set_status(redis, 'task:' + task_id, 'queued')
        else:
            task.work_queue(redis, task_id)

# Desallocate all resources that are not anymore associated to a running task
for service in services:
    resources = services[service].list_resources()
    for resource in resources:
        keyr = 'resource:%s:%s' % (service, resource)
        running_tasks = redis.hgetall(keyr)
        for g, task_id in six.iteritems(running_tasks):
            with redis.acquire_lock(task_id):