Пример #1
0
def action_over_worker_job_type(workflow_instance_id, worker_id, option, redis_score):
    connection = get_connection(CONFIG.get('all'))
    workflow_instance_data = _get_workflow_instance(connection, workflow_instance_id)
    workflow_id = workflow_instance_data.get('type')
    workflow = _get_workflow(workflow_id)
    redis_key = "workflows:%s:workflow_instances:%s:workers:%s:errors" % (
        workflow_instance_data.get('type'), workflow_instance_id, worker_id
    )
    if redis_score == 'all':
        errors_list = connection.zrange(redis_key, 0, -1, withscores=True)
    else:
        errors_list = connection.zrangebyscore(redis_key, redis_score, redis_score, withscores=True)
    if len(errors_list) > 0:
        for raw_data, redis_score in errors_list:
            if option == 'retry':
                meta_worker = [
                    worker.get('meta_worker') for it_worker_id, worker in workflow.get('workers', {}).iteritems()
                    if it_worker_id == worker_id
                ]
                add_data_to_next_worker(connection, meta_worker[0], raw_data)
            else:
                # discard
                connection.zadd(
                    REDIS_KEY_INSTANCE_WORKER_DISCARDED % (workflow_instance_data.get('type'), workflow_instance_id, worker_id),
                    time.time(),
                    raw_data
                )
            connection.zremrangebyscore(redis_key, redis_score, redis_score)
    else:
        abort(404, "No such error id: %s" % redis_score)
    return jsonify({})
Пример #2
0
def get_workflow_instance_outputs(workflow_instance_id):
    connection = get_connection(CONFIG.get('all'))
    workflow_instance_data = _get_workflow_instance(connection, workflow_instance_id)
    workflow_id = workflow_instance_data.get('type')
    workflow = _get_workflow(workflow_id)
    # TODO: Manage multiple final nodes
    worker_id = _find_last_worker_id(workflow, workflow.get('start_worker_id'))[0]
    target_key = REDIS_KEY_INSTANCE_WORKER_FINALIZED_JOBS % (workflow_id, workflow_instance_id, worker_id)
    return to_json([
        json_loads(data).get('workers_output', {}).get(worker_id) for data in connection.zrange(target_key, 0, -1)
    ])
Пример #3
0
def get_workflow_instance_status(workflow_instance_id):
    # TODO: refactor this code
    # TODO: support discarded
    connection = get_connection(CONFIG.get('all'))
    workflow_instance_data = _get_workflow_instance(connection, workflow_instance_id)
    if not workflow_instance_data.get('start_time'):
        return jsonify({'status': 'pending'})

    errors_keys = connection.keys(
        REDIS_KEY_INSTANCE_WORKER_ERRORS % (
            workflow_instance_data.get('type'), workflow_instance_id, '*'
        )
    )
    if len(errors_keys) > 0:
        return jsonify({'status': 'errors'})

    total_jobs_keys = connection.keys(
        REDIS_KEY_INSTANCE_WORKER_JOBS % (
            workflow_instance_data.get('type'), workflow_instance_id, '*', 'total'
        )
    )

    done_jobs_keys = connection.keys(
        REDIS_KEY_INSTANCE_WORKER_JOBS % (
            workflow_instance_data.get('type'), workflow_instance_id, '*', 'done'
        )
    )

    pipe = connection.pipeline()
    temp_union = 'temp:%s:%s' % (datetime.now().isoformat().split('T')[0], str(uuid.uuid4()))
    pipe.sunionstore(temp_union, *total_jobs_keys)

    temp_diff = 'temp:%s:%s' % (datetime.now().isoformat().split('T')[0], str(uuid.uuid4()))
    pipe.sdiffstore(temp_diff, temp_union, *done_jobs_keys)
    pipe.delete(temp_diff)
    pipe.delete(temp_union)
    execute_result = pipe.execute()
    pending_jobs = execute_result[1]

    if pending_jobs > 0:
        status = 'running'
    else:
        status = 'done'
    runned_for = ''
    if workflow_instance_data.get('start_time') and workflow_instance_data.get('update_time'):
        try:
            runned_for = float(workflow_instance_data.get('update_time')) - \
                float(workflow_instance_data.get('start_time'))
        except:
            pass
    return jsonify({'status': status, 'runned_for': runned_for})
Пример #4
0
def get_workflow_instance_stats(workflow_instance_id):
    connection = get_connection(CONFIG.get('all'))
    workflow_instance_data = _get_workflow_instance(connection, workflow_instance_id)

    workers_prefix = "workflows:%s:workflow_instances:%s:workers:" % (
        workflow_instance_data.get('type'), workflow_instance_id
    )
    workers_data = {}

    for status_type in ('done', 'total'):
        pipe = connection.pipeline()
        workers_keys = connection.keys(REDIS_KEY_INSTANCE_WORKER_JOBS % (workflow_instance_data.get('type'), workflow_instance_id, '*', status_type))
        for key in workers_keys:
            pipe.scard(key)
        workers_data[status_type] = _workers_dict_data(workers_prefix, workers_keys, pipe.execute())

    for status_type in ('errors', 'discarded', 'finalized'):
        pipe = connection.pipeline()
        workers_keys = connection.keys(workers_prefix+"*:"+status_type)
        for key in workers_keys:
            pipe.zcard(key)
        workers_data[status_type] = _workers_dict_data(workers_prefix, workers_keys, pipe.execute())

    workers_stats = {}
    for status_type, data in workers_data.iteritems():
        for worker, worker_num in data.iteritems():
            if not workers_stats.get(worker):
                workers_stats[worker] = {}
            if status_type == 'jobs_ids':
                workers_stats[worker]['total'] = worker_num
            else:
                workers_stats[worker][status_type] = worker_num

    for key in connection.keys(workers_prefix+'*:timing'):
        worker = key.split(workers_prefix)[1].split(':')[0]
        key_values = [float(x) for x in connection.lrange(key, 0, -1)]
        workers_stats[worker]['total_time'] = sum(key_values)
        workers_stats[worker]['average_time'] = workers_stats[worker]['total_time']/len(key_values)

    for worker, values in workers_stats.iteritems():
        workers_stats[worker]['pending'] = int(values.get('total', 0)) - int(
            values.get('done', 0)) - int(values.get('errors', 0)) - int(values.get('discarded', 0))
    return jsonify({
        'workers': workers_stats,
        'start_time': workflow_instance_data.get('start_time'),
        'creation_time': workflow_instance_data.get('creation_time'),
        'update_time': workflow_instance_data.get('update_time')
    })
Пример #5
0
def get_workflow_instance_worker_jobs_type(workflow_instance_id, worker_id, option):
    connection = get_connection(CONFIG.get('all'))
    workflow_instance_data = _get_workflow_instance(connection, workflow_instance_id)
    jobs = connection.zrange(
        "workflows:%s:workflow_instances:%s:workers:%s:%s" % (
            workflow_instance_data.get('type'), workflow_instance_id, worker_id, option
        ),
        -20, -1, withscores=True
    )
    for i, data in enumerate(jobs):
        raw_data, score = data
        jobs[i] = json.loads(raw_data)
        jobs[i]['redis_score'] = score
    return Response(
        json.dumps(jobs), mimetype='application/json'
    )
Пример #6
0
def create_workflow_workflow_instance(workflow_id):
    redis_con = get_connection(CONFIG.get('all'))
    workflow = _get_workflow(workflow_id)
    data = request.get_json()
    start_worker_id = workflow.get('start_worker_id')
    meta_worker = ([
        worker.get('meta_worker') for worker_id, worker in workflow.get('workers', {}).iteritems()
        if worker_id == start_worker_id
    ] or [None])[0]
    if meta_worker:
        workflow_instance_id = start_workflow_instance(
            redis_con, workflow_id, meta_worker, workflow.get('start_worker_id'),
            data.get('name'), data.get('data', {})
        )
        return Response(json.dumps({'id': workflow_instance_id}), status=201)
    return Response(status=404)
Пример #7
0
def get_workflow_instance(workflow_instance_id):
    connection = get_connection(CONFIG.get('all'))
    workflow = _get_workflow_instance(connection, workflow_instance_id)
    return jsonify(workflow)
Пример #8
0
def get_workflow_workflow_instances(workflow_id):
    redis_con = get_connection(CONFIG.get('all'))
    workflow_instances = redis_con.zrange('workflows:%s:workflow_instances' % workflow_id, -25, -1)
    return_data = [json.loads(workflow_instance) for workflow_instance in workflow_instances]
    return Response(json.dumps(return_data), mimetype='application/json')