コード例 #1
0
ファイル: test.py プロジェクト: acrespo/Simulaciones
    def test_lots_of_awesome(self):
        p1 = Project(2 * 480, 1, 2)
        p2 = Project(2 * 480, 1, 2)

        awesome = Project(160, 1, 1)
        awesome.is_awesome = True
        awesome.extra_devs = 1

        not_awesome = Project(2 * 160, 1, 1)
        not_awesome.is_awesome = True
        not_awesome.extra_devs = 1

        w = Workflow(7, 3, [p1, p2, awesome, not_awesome])
        self.assertTrue(w.is_deliverable())

        more_awesome = Project(6 * 160, 1, 1)
        more_awesome.periods_to_delivery = 3
        more_awesome.is_awesome = True
        more_awesome.extra_devs = 2

        w2 = w.add_project(more_awesome)
        self.assertFalse(w2.is_deliverable(), msg = "Fails, it need one more dev")

        more_awesome.extra_devs = 3
        self.assertTrue(w2.is_deliverable())
コード例 #2
0
ファイル: test.py プロジェクト: acrespo/Simulaciones
    def test_add(self):

        w = Workflow(20, 0, [])
        self.assertEqual(w.projects, [])

        p = Project(20, 1, 3)
        new_w = w.add_project(p)

        self.assertNotEqual(w, new_w)
        self.assertEqual(new_w.projects, [p])
        self.assertEqual(w.projects, [])
コード例 #3
0
 def post(self):
     workflow = Workflow(**json.decode(self.request.body))
     versions = Workflow.query(Workflow.name == workflow.name).order(-Workflow.version).fetch(1)
     if any(versions): # bump version to one greater that last known one
         workflow.version = versions[0].version + 1
     new_key = workflow.put()
     logging.info("Create/update: %s", new_key.id())
     if any(versions): # replace earlier with this version in relevant agent workflow sets
         old_id = versions[0].key.id()
         for agent in Agent.query(Agent.trackVersion == True, Agent.workflows == old_id):
             agent.workflows.remove(old_id)
             agent.workflows.append(new_key.id())
     self.redirect('/workflows')
コード例 #4
0
ファイル: test.py プロジェクト: acrespo/Simulaciones
    def test_deliverable_with_reserved(self):
        p1 = Project(300, 1, 3)
        p2 = Project(300, 1, 3)

        w = Workflow(4, 2, [p1, p2])

        self.assertFalse(w.is_deliverable())

        w2 = w.add_project(Project(300, 1, 3))
        self.assertFalse(w2.is_deliverable())

        w.resources = 6
        self.assertTrue(w.is_deliverable())
コード例 #5
0
ファイル: workflow.py プロジェクト: whchoi98/ignite
def _add_workflow(data, user, id=0):
    logger.debug("workflow name = %s", data[NAME])

    if id:
        # get existing workflow
        workflow = Workflow.objects.get(pk=id)

        if not workflow.editable:
            raise IgniteException(ERR_WF_NOT_EDITABLE)

        for task_const in workflow.task_list:
            task.update_ref_count(task_const[TASK_ID], -1)
    else:
        # create new workflow
        workflow = Workflow()

    workflow.name = data[NAME]
    workflow.submit = data[SUBMIT]
    workflow.task_list = data[TASK_LIST]
    workflow.updated_by = user
    workflow.save()

    # increment ref count of tasks used in this workflow
    for task_const in workflow.task_list:
        task.update_ref_count(task_const[TASK_ID], 1)

    return workflow
コード例 #6
0
ファイル: test.py プロジェクト: acrespo/Simulaciones
    def test_undeliverable_awesome(self):
        p1 = Project(300, 1, 3)
        p2 = Project(300, 1, 3)

        awesome = Project(300, 1, 3)
        awesome.is_awesome = True
        awesome.extra_devs = 2

        w = Workflow(6, 2, [p1, p2, awesome])
        self.assertTrue(w.is_deliverable())

        not_awesome = Project(300, 1, 3)
        not_awesome.is_awesome = True
        not_awesome.extra_devs = 2

        w2 = w.add_project(not_awesome)
        self.assertFalse(w2.is_deliverable())
コード例 #7
0
 def get(self):
     agent = find_agent(self.request)
     if agent is None:  # get 'em all
         wf_list = { "workflows" : [build_uri(self, key.id()) for key in Workflow.query().iter(keys_only=True)] }
     else: # just those for agent
         wf_list = { "workflows" : [build_uri(self, key_id) for key_id in agent.workflows] }
     self.response.headers['Content-Type'] = 'application/json'
     self.response.write(json.encode(wf_list))
コード例 #8
0
ファイル: test.py プロジェクト: acrespo/Simulaciones
    def test_parallel_awesome(self):
        p1 = Project(2 * 480, 1, 2)
        p2 = Project(2 * 480, 1, 2)

        awesome = Project(160, 1, 1)
        awesome.is_awesome = True
        awesome.extra_devs = 1

        w = Workflow(6, 2, [p1, p2, awesome])
        self.assertTrue(w.is_deliverable())

        not_awesome = Project(160, 1, 1)
        not_awesome.is_awesome = True
        not_awesome.extra_devs = 1

        w2 = w.add_project(not_awesome)
        self.assertTrue(w2.is_deliverable())
コード例 #9
0
ファイル: test.py プロジェクト: acrespo/Simulaciones
    def test_sequential_awesome(self):
        p1 = Project(2 * 480, 1, 2)
        p2 = Project(2 * 480, 1, 2)

        awesome = Project(320, 1, 3)
        awesome.is_awesome = True
        awesome.extra_devs = 2

        w = Workflow(6, 2, [p1, p2, awesome])
        self.assertTrue(w.is_deliverable())

        not_awesome = Project(320, 1, 1)
        not_awesome.is_awesome = True
        not_awesome.extra_devs = 2

        w2 = w.add_project(not_awesome)
        self.assertTrue(w2.is_deliverable())
コード例 #10
0
ファイル: workflow.py プロジェクト: datacenter/ignite
def _add_workflow(data, user, id=0):
    logger.debug("workflow name = %s", data[NAME])

    if id:
        # get existing workflow
        workflow = Workflow.objects.get(pk=id)

        if not workflow.editable:
            raise IgniteException(ERR_WF_NOT_EDITABLE)

        for task_const in workflow.task_list:
            task.update_ref_count(task_const[TASK_ID], -1)
    else:
        # create new workflow
        workflow = Workflow()

    workflow.name = data[NAME]
    workflow.submit = data[SUBMIT]
    workflow.task_list = data[TASK_LIST]
    workflow.updated_by = user
    workflow.save()

    # increment ref count of tasks used in this workflow
    for task_const in workflow.task_list:
        task.update_ref_count(task_const[TASK_ID], 1)

    return workflow
コード例 #11
0
ファイル: test.py プロジェクト: acrespo/Simulaciones
    def test_deliverable_awesome(self):
        p1 = Project(300, 1, 3)
        p2 = Project(300, 1, 3)

        w = Workflow(6, 2, [p1, p2])
        self.assertTrue(w.is_deliverable())

        awesome = Project(300, 1, 3)
        awesome.is_awesome = True

        w2 = w.add_project(awesome)
        self.assertFalse(w2.is_deliverable())

        awesome.extra_devs = 1
        self.assertFalse(w2.is_deliverable())

        awesome.extra_devs = 2
        self.assertTrue(w2.is_deliverable())
コード例 #12
0
 def _create_workflow(self, name, tenant_id, content, env):
     status = 'CREATED'
     created_at = datetime.datetime.now().replace(microsecond=0)
     workflow = Workflow(name=name,
                         tenant_id=tenant_id,
                         content=content,
                         env=env,
                         env_result=env,
                         status=status,
                         created_at=created_at)
     self.db_session.add(workflow)
     return workflow
コード例 #13
0
ファイル: test.py プロジェクト: acrespo/Simulaciones
    def test_workload(self):

        p1 = Project(30, 1, 3)
        p2 = Project(30, 1, 3)

        w = Workflow(20, 0, [p1, p2])

        self.assertEqual(w.average_workload(), 3)

        w.reserved_resources = 4
        self.assertAlmostEqual(w.average_workload(), 3.75)

        p3 = Project(30, 1, 3)
        p3.hours_left = 5

        w2 = w.add_project(p3)
        self.assertAlmostEqual(w2.average_workload(), 4.0625)

        w3 = Workflow(20, 20, [])
        self.assertEqual(w3.average_workload(), 0)

        w4 = Workflow(20, 20, [p1, p2])
        self.assertEqual(w4.average_workload(), 0)
コード例 #14
0
def parse_args(parser):
    """
    Runs the argument parser

    :returns: a workflow instance and kwargs parsed by argparse
    """
    parsed_args = parser.parse_args()
    kwargs = dict(parsed_args._get_kwargs())

    #extract wf_kwargs from kwargs
    wf_kwargs = dict([ (k,kwargs[k]) for k
                       in ['name','default_queue','root_output_dir','restart','delete_intermediates','prompt_confirm','dry_run'] ])
    wf_kwargs['comments'] = '$ ' +' '.join([os.path.basename(sys.argv[0])]+sys.argv[1:])

    wf = Workflow.start(**wf_kwargs)

    wf.log.info('Parsed kwargs:\n{0}'.format(pprint.pformat(kwargs)))
    return wf,kwargs
コード例 #15
0
            def decider_remote_child_workflow(*args, **kwargs):
                """
                Treats this decision task as if it were a child workflow in remote mode.
                :param args:
                :param kwargs:
                :return:
                """

                decision_context = scanner.decision_context

                try:
                    cwf = decision_context.child_workflows_iter.next()

                    # Deserialize results before returning the promise
                    if cwf.result:
                        serialized_result = result_data_store.get(cwf.result)
                        cwf.result = result_data_serializer.deserialize_result(serialized_result)
                    return DistributedChildWorkflowPromise(cwf)

                # Do we have a new activity to schedule?
                except StopIteration:
                    # We have one of two branches here:  either we have a function, or we have a method.  We have to be
                    # careful with methods because we don't want to serialize `self`.
                    if inspect.ismethod(f):
                        # Ok, it's a method.  So, where's self?  is it in kwargs or args?
                        if 'self' in kwargs:
                            # Save the self somewhere just in case
                            method_self = kwargs['self']
                            del kwargs['self']
                        else:
                            method_self = args.pop(0)

                    # By this point, we've assured that args and kwargs are save for serialization
                    serialized_input = input_data_serializer.serialize_input(args, kwargs)
                    task_id = decision_context.get_next_id()
                    key = '{}-cwf-{}'.format(swf_workflow_type, task_id)
                    swf_input = input_data_store.put(serialized_input, key)
                    workflow_id = 'wf-{}'.format(uuid.uuid4())
                    decision_context.decisions.start_child_workflow_execution(workflow_type_name=swf_workflow_type,
                                                                              workflow_type_version=swf_workflow_version,
                                                                              input=swf_input, task_list=swf_task_list,
                                                                              workflow_id=workflow_id)
                    cwf = Workflow(state='SCHEDULED')
                    return DistributedChildWorkflowPromise(cwf)
コード例 #16
0
def add_workflow():
    name = request.json['name']

    # Check if the name already exist
    existing_workflow = (Workflow.query.filter(
        Workflow.name == name).one_or_none())

    if existing_workflow is None:
        # Create a new workflow
        schema = WorkflowSchema()
        new_workflow = Workflow(name=name)

        # Add the workflow to the database
        db.session.add(new_workflow)
        db.session.commit()

        # Serialize and return the newly created data in the response
        data = schema.dump(new_workflow)

        return jsonify(data), 201

    # Otherwise, nope, data exists already
    else:
        abort(400, "Workflow {name} already exists ".format(name=name))
コード例 #17
0
ファイル: views.py プロジェクト: dip-kush/CrawlerUI
def pathSourcetoSink(fsm,crawl):
        graph = fsm.graph
        criticalStates = fsm.criticalStates
        
        sink_nodes = [node for node, outdegree in graph.out_degree
                        (graph.nodes()).items() if outdegree == 0]
        source_nodes = [node for node, indegree in graph.in_degree
                        (graph.nodes()).items() if indegree == 0]
        if source_nodes == []:
            source_nodes.append(0)
        wflow_no = 1
        for sink in sink_nodes:
            for source in source_nodes:
                for path in nx.all_simple_paths(graph, source=source, target=sink):
                    print path
                    obj = Workflow(scan_id=crawl,wflow_no = wflow_no) 
                    obj.save()
                    critical = False
                    critical_path = False
                    for i in range(len(path)-1):
                        #print path[i]
                        critical = False
                        link = graph.edge[path[i]][path[i+1]][0]["event"].xpath
                        header = graph.edge[path[i]][path[i+1]][0]["header"] 
                        dom = graph.node[path[i+1]]['nodedata'].domString
                        wflow = Workflow.objects.get(scan_id=crawl,wflow_no = wflow_no)
                        if path[i+1] in criticalStates:
                            critical = True
                            critical_path = True        
                        #print wflow.wflow_no
                        linkobj = Link(wflow_id = wflow, link = link, order_id = i+1,header=header, response_dom=dom, critical_node=critical)
                        linkobj.save()
                        print graph.edge[path[i]][path[i+1]][0]["event"].xpath
                    if critical_path==True:
                        obj.critical = True
                        obj.save()
                    wflow_no+=1    
        start_url_header = fsm.start_header
        login_url_header = fsm.login_header
        login_dom = fsm.login_dom
        headerObj = StartHeader(scan_id=crawl,start_url_header=start_url_header,login_url_header=login_url_header, login_dom = login_dom)
        headerObj.save()            
コード例 #18
0
ファイル: test.py プロジェクト: acrespo/Simulaciones
    def test_empty(self):

        w = Workflow(20, 0, [])
        self.assertTrue(w.is_deliverable())
        self.assertEqual(w.average_workload(), 0)
コード例 #19
0
    {
        "workflow_id": 2,
        "image_id": 5
    },
    {
        "workflow_id": 2,
        "image_id": 6
    },
    {
        "workflow_id": 2,
        "image_id": 7
    },
]

# Insert the test data in the database
for workflow in WORKFLOW:
    wf = Workflow(name=workflow.get("name"))
    db.session.add(wf)

for image in IMAGE:
    img = Image(url=image.get("url"))
    db.session.add(img)

for progression in PROGRESSION:
    pg = Progression(
        image_id=progression.get("image_id"),
        workflow_id=progression.get("workflow_id"),
    )
    db.session.add(pg)

db.session.commit()  # commit the changes to the db
コード例 #20
0
ファイル: test_model.py プロジェクト: MITLibraries/scads
 def test_workflow_init_defaults(self):
     workflow = Workflow(name="Richard's WF")
     self.assertEqual(1, workflow.version)
コード例 #21
0
ファイル: director.py プロジェクト: sanderegg/osparc-lab
def run_pipeline(pipeline_id):
    engine = create_engine(DATABASE_URI)
    Session = sessionmaker(bind=engine)

    session = Session()
    Base.metadata.drop_all(engine, checkfirst=True)
    CeleryTask.__table__.drop(engine, checkfirst=True)
    CeleryTask.__table__.create(engine, checkfirst=True)
    Base.metadata.create_all(engine)

    if pipeline_id == 0:

        for i in range(8):
            session.add(Task(sleep=random.randint(2, 7)))  # sleep for 1-7 secs

            session.add(
                Workflow(dag_adjacency_list=dict([(1, [3]), (2, [4]), (
                    3, [5]), (4, [5]), (5, [6, 7]), (6, [8]), (7, [8])])))
    elif pipeline_id == 1:
        for i in range(15):
            session.add(Task(sleep=random.randint(2, 7)))  # sleep for 1-7 secs

            session.add(
                Workflow(dag_adjacency_list=dict([(1, [2]), (2, [3]), (
                    3, [10]), (4, [5]), (5, [6]), (6, [10]), (7, [8]), (
                        8, [9]), (9, [12]), (10, [11]), (11, [14]), (
                            12, [13]), (13, [14]), (14, [15])])))
    elif pipeline_id == 2:
        for i in range(42):
            session.add(Task(sleep=random.randint(2, 7)))  # sleep for 1-7 secs

            session.add(
                Workflow(dag_adjacency_list=dict([(1, [2]), (2, [3]), (
                    3, [25]), (4, [5]), (5, [6]), (6, [25]), (7, [8]), (
                        8, [9]), (9, [25]), (10, [11]), (11, [12]), (
                            12, [26]), (13, [14]), (14, [15]), (
                                15, [26]), (16, [17]), (17, [18]), (
                                    18, [27]), (19, [20]), (
                                        20, [21]), (21, [27]), (
                                            22, [23]), (23, [24]), (
                                                24,
                                                [27]), (25,
                                                        [28]), (26, [29, 30]),
                                                  (27, [31, 32, 33]), (
                                                      28,
                                                      [35]), (29,
                                                              [35]), (30,
                                                                      [34]),
                                                  (31, [34]), (32,
                                                               [37]), (33,
                                                                       [38]),
                                                  (34, [40]), (35,
                                                               [36]), (36,
                                                                       [40]),
                                                  (38, [39]), (
                                                      39, [40]), (40,
                                                                  [41, 42])])))

    session.commit()

    workflow = session.query(Workflow).all()[-1]

    print("WORKFLOW", workflow.id)
    session.flush()
    session.close()
    task = celery.send_task('mytasks.pipeline',
                            args=(workflow.id, ),
                            kwargs={})
    task_info[task.id] = ["Task submitted"]
    return task