Beispiel #1
0
def _add_workflow(data, user, id=0):
    logger.debug("workflow name = %s", data[NAME])

    if id:
        # get existing workflow
        workflow = Workflow.objects.get(pk=id)

        if not workflow.editable:
            raise IgniteException(ERR_WF_NOT_EDITABLE)

        for task_const in workflow.task_list:
            task.update_ref_count(task_const[TASK_ID], -1)
    else:
        # create new workflow
        workflow = Workflow()

    workflow.name = data[NAME]
    workflow.submit = data[SUBMIT]
    workflow.task_list = data[TASK_LIST]
    workflow.updated_by = user
    workflow.save()

    # increment ref count of tasks used in this workflow
    for task_const in workflow.task_list:
        task.update_ref_count(task_const[TASK_ID], 1)

    return workflow
Beispiel #2
0
 def _create_workflow(self, name, tenant_id, content, env):
     status = 'CREATED'
     created_at = datetime.datetime.now().replace(microsecond=0)
     workflow = Workflow(name=name,
                         tenant_id=tenant_id,
                         content=content,
                         env=env,
                         env_result=env,
                         status=status,
                         created_at=created_at)
     self.db_session.add(workflow)
     return workflow
Beispiel #3
0
 def post(self):
     workflow = Workflow(**json.decode(self.request.body))
     versions = Workflow.query(Workflow.name == workflow.name).order(-Workflow.version).fetch(1)
     if any(versions): # bump version to one greater that last known one
         workflow.version = versions[0].version + 1
     new_key = workflow.put()
     logging.info("Create/update: %s", new_key.id())
     if any(versions): # replace earlier with this version in relevant agent workflow sets
         old_id = versions[0].key.id()
         for agent in Agent.query(Agent.trackVersion == True, Agent.workflows == old_id):
             agent.workflows.remove(old_id)
             agent.workflows.append(new_key.id())
     self.redirect('/workflows')
Beispiel #4
0
            def decider_remote_child_workflow(*args, **kwargs):
                """
                Treats this decision task as if it were a child workflow in remote mode.
                :param args:
                :param kwargs:
                :return:
                """

                decision_context = scanner.decision_context

                try:
                    cwf = decision_context.child_workflows_iter.next()

                    # Deserialize results before returning the promise
                    if cwf.result:
                        serialized_result = result_data_store.get(cwf.result)
                        cwf.result = result_data_serializer.deserialize_result(serialized_result)
                    return DistributedChildWorkflowPromise(cwf)

                # Do we have a new activity to schedule?
                except StopIteration:
                    # We have one of two branches here:  either we have a function, or we have a method.  We have to be
                    # careful with methods because we don't want to serialize `self`.
                    if inspect.ismethod(f):
                        # Ok, it's a method.  So, where's self?  is it in kwargs or args?
                        if 'self' in kwargs:
                            # Save the self somewhere just in case
                            method_self = kwargs['self']
                            del kwargs['self']
                        else:
                            method_self = args.pop(0)

                    # By this point, we've assured that args and kwargs are save for serialization
                    serialized_input = input_data_serializer.serialize_input(args, kwargs)
                    task_id = decision_context.get_next_id()
                    key = '{}-cwf-{}'.format(swf_workflow_type, task_id)
                    swf_input = input_data_store.put(serialized_input, key)
                    workflow_id = 'wf-{}'.format(uuid.uuid4())
                    decision_context.decisions.start_child_workflow_execution(workflow_type_name=swf_workflow_type,
                                                                              workflow_type_version=swf_workflow_version,
                                                                              input=swf_input, task_list=swf_task_list,
                                                                              workflow_id=workflow_id)
                    cwf = Workflow(state='SCHEDULED')
                    return DistributedChildWorkflowPromise(cwf)
def add_workflow():
    name = request.json['name']

    # Check if the name already exist
    existing_workflow = (Workflow.query.filter(
        Workflow.name == name).one_or_none())

    if existing_workflow is None:
        # Create a new workflow
        schema = WorkflowSchema()
        new_workflow = Workflow(name=name)

        # Add the workflow to the database
        db.session.add(new_workflow)
        db.session.commit()

        # Serialize and return the newly created data in the response
        data = schema.dump(new_workflow)

        return jsonify(data), 201

    # Otherwise, nope, data exists already
    else:
        abort(400, "Workflow {name} already exists ".format(name=name))
Beispiel #6
0
def run_pipeline(pipeline_id):
    engine = create_engine(DATABASE_URI)
    Session = sessionmaker(bind=engine)

    session = Session()
    Base.metadata.drop_all(engine, checkfirst=True)
    CeleryTask.__table__.drop(engine, checkfirst=True)
    CeleryTask.__table__.create(engine, checkfirst=True)
    Base.metadata.create_all(engine)

    if pipeline_id == 0:

        for i in range(8):
            session.add(Task(sleep=random.randint(2, 7)))  # sleep for 1-7 secs

            session.add(
                Workflow(dag_adjacency_list=dict([(1, [3]), (2, [4]), (
                    3, [5]), (4, [5]), (5, [6, 7]), (6, [8]), (7, [8])])))
    elif pipeline_id == 1:
        for i in range(15):
            session.add(Task(sleep=random.randint(2, 7)))  # sleep for 1-7 secs

            session.add(
                Workflow(dag_adjacency_list=dict([(1, [2]), (2, [3]), (
                    3, [10]), (4, [5]), (5, [6]), (6, [10]), (7, [8]), (
                        8, [9]), (9, [12]), (10, [11]), (11, [14]), (
                            12, [13]), (13, [14]), (14, [15])])))
    elif pipeline_id == 2:
        for i in range(42):
            session.add(Task(sleep=random.randint(2, 7)))  # sleep for 1-7 secs

            session.add(
                Workflow(dag_adjacency_list=dict([(1, [2]), (2, [3]), (
                    3, [25]), (4, [5]), (5, [6]), (6, [25]), (7, [8]), (
                        8, [9]), (9, [25]), (10, [11]), (11, [12]), (
                            12, [26]), (13, [14]), (14, [15]), (
                                15, [26]), (16, [17]), (17, [18]), (
                                    18, [27]), (19, [20]), (
                                        20, [21]), (21, [27]), (
                                            22, [23]), (23, [24]), (
                                                24,
                                                [27]), (25,
                                                        [28]), (26, [29, 30]),
                                                  (27, [31, 32, 33]), (
                                                      28,
                                                      [35]), (29,
                                                              [35]), (30,
                                                                      [34]),
                                                  (31, [34]), (32,
                                                               [37]), (33,
                                                                       [38]),
                                                  (34, [40]), (35,
                                                               [36]), (36,
                                                                       [40]),
                                                  (38, [39]), (
                                                      39, [40]), (40,
                                                                  [41, 42])])))

    session.commit()

    workflow = session.query(Workflow).all()[-1]

    print("WORKFLOW", workflow.id)
    session.flush()
    session.close()
    task = celery.send_task('mytasks.pipeline',
                            args=(workflow.id, ),
                            kwargs={})
    task_info[task.id] = ["Task submitted"]
    return task
Beispiel #7
0
 def test_workflow_init_defaults(self):
     workflow = Workflow(name="Richard's WF")
     self.assertEqual(1, workflow.version)
Beispiel #8
0
    {
        "workflow_id": 2,
        "image_id": 5
    },
    {
        "workflow_id": 2,
        "image_id": 6
    },
    {
        "workflow_id": 2,
        "image_id": 7
    },
]

# Insert the test data in the database
for workflow in WORKFLOW:
    wf = Workflow(name=workflow.get("name"))
    db.session.add(wf)

for image in IMAGE:
    img = Image(url=image.get("url"))
    db.session.add(img)

for progression in PROGRESSION:
    pg = Progression(
        image_id=progression.get("image_id"),
        workflow_id=progression.get("workflow_id"),
    )
    db.session.add(pg)

db.session.commit()  # commit the changes to the db