Пример #1
0
 def setUp(self):
     self.test_snapshot = Snapshot.objects.create_snapshot({})
     self.test_snapshot.save()
     self.test_project = Project.objects.create(
         name="proj",
         creator="creator",
     )
     self.test_project.save()
     # prepare test data
     instance_id = uniqid()
     template_id = uniqid()
     self.instance_in_pipeline = InstanceInPipeline.objects.create(
         instance_id=instance_id,
         atom_total=0,
         subprocess_total=0,
         gateways_total=0)
     self.pipeline_template = PipelineTemplate.objects.create(
         template_id=template_id,
         creator="creator",
         snapshot=self.test_snapshot)
     self.pipeline_instance = PipelineInstance.objects.create(
         instance_id=instance_id,
         creator="creator",
         snapshot=self.test_snapshot,
         template=self.pipeline_template)
     self.task_template = TaskTemplate.objects.create(
         project=self.test_project,
         pipeline_template=self.pipeline_template)
     self.taskflow_instance = TaskFlowInstance.objects.create(
         project=self.test_project,
         pipeline_instance=self.pipeline_instance,
         template_id=template_id)
Пример #2
0
 def setUp(self):
     self.name = "test"
     self.task_template_name = "task_template_name"
     self.creator = "tester"
     self.extra_info = {"extra_info": "val"}
     self.pipeline_tree = {
         "constants": {
             "key_1": {"value": "val_1", "show_type": "show"},
             "key_2": {"value": "val_2", "show_type": "hide"},
         },
         "activities": {},
         "start_event": {
             "id": "id1",
             "name": "",
             "type": "EmptyStartEvent",
             "incoming": None,
             "outgoing": "flow_id1",
         },
         "end_event": {"id": "id2", "name": "", "type": "EmptyEndEvent", "incoming": "flow_id1", "outgoing": None},
         "gateways": {},
         "flows": {"flow_id1": {"id": "flow_id1", "source": "id1", "target": "id2"}},
     }
     self.project = Project.objects.create(name="test_project", time_zone="Asia/Shanghai", creator="test", desc="")
     self.invalid_project = Project.objects.create(
         name="invalid_project", time_zone="Asia/Shanghai", creator="test", desc=""
     )
     self.snapshot = Snapshot.objects.create_snapshot({})
     self.pipeline_template = PipelineTemplate.objects.create(
         template_id=uniqid(), name=self.task_template_name, creator=self.creator, snapshot=self.snapshot
     )
     task_template = TaskTemplate(project=self.project, pipeline_template=self.pipeline_template,)
     task_template.save()
     self.template = task_template
     self.task = self.create_a_task()
Пример #3
0
def build_tree(start_elem, id=None, data=None, replace_id=False):
    tree = copy.deepcopy(__skeleton)
    elem_queue = Queue.Queue()
    processed_elem = set()

    tree[__incoming] = {}
    elem_queue.put(start_elem)

    while not elem_queue.empty():
        # get elem
        elem = elem_queue.get()

        # update node when we meet again
        if elem.id in processed_elem:
            __update(tree, elem)
            continue

        # add to queue
        for e in elem.outgoing:
            elem_queue.put(e)

        # mark as processed
        processed_elem.add(elem.id)

        # tree grow
        __grow(tree, elem)

    del tree[__incoming]
    tree[PE.id] = id or uniqid()
    user_data = data.to_dict() if isinstance(data, Data) else data
    tree[PE.data] = user_data or tree[PE.data]
    if replace_id:
        replace_all_id(tree)
    return tree
Пример #4
0
 def __init__(self, **kwargs):
     self.pipeline = kwargs.get('pipeline', PipelineObject(data=kwargs.get('pipeline_data'),
                                                           context=kwargs.get('pipeline_context')))
     self.next = mock.MagicMock(return_value=kwargs.get('next', uniqid()))
     self.data = kwargs.get('data', MockData())
     self.prepare_rerun_data = mock.MagicMock()
     super(MockSubprocessActivity, self).__init__(kwargs.get('id'))
Пример #5
0
def build_tree(start_elem, id=None, data=None):
    tree = copy.deepcopy(__skeleton)
    elem_queue = Queue.Queue()
    processed_elem = set()

    tree[__incoming] = {}
    elem_queue.put(start_elem)

    while not elem_queue.empty():
        # get elem
        elem = elem_queue.get()

        # update node when we meet again
        if elem.id in processed_elem:
            __update(tree, elem)
            continue

        # add to queue
        for e in elem.outgoing:
            elem_queue.put(e)

        # mark as processed
        processed_elem.add(elem.id)

        # tree grow
        __grow(tree, elem)

    del tree[__incoming]
    tree[PE.id] = id or uniqid()
    tree[PE.data] = data or tree[PE.data]
    return tree
Пример #6
0
def forced_fail(node_id):
    """
    forced fail a node
    :param node_id:
    :return:
    """

    try:
        process = PipelineProcess.objects.get(current_node_id=node_id)
    except PipelineProcess.DoesNotExist:
        return False

    node = process.top_pipeline.node(node_id)
    if not isinstance(node, ServiceActivity):
        return False

    result = Status.objects.transit(node_id, to_state=states.FAILED)
    if not result:
        return False

    try:
        node.failure_handler(process.root_pipeline.data)
    except Exception as e:
        pass

    with transaction.atomic():
        s = Status.objects.get(id=node.id)
        ScheduleService.objects.delete_schedule(s.id, s.version)
        Data.objects.forced_failed(node_id)
        ProcessCeleryTask.objects.revoke(process.id)
        process.sleep(adjust_status=True)
        s.version = uniqid.uniqid()
        s.save()

    return True
Пример #7
0
    def retry(self, process, node, inputs):
        if RERUN_MAX_LIMIT != 0 and self.get(id=node.id).loop > RERUN_MAX_LIMIT:
            return ActionResult(result=False, message='rerun times exceed max limit: {limit}, can not retry'.format(
                limit=RERUN_MAX_LIMIT
            ))

        action_res = self.transit(id=node.id, to_state=states.READY, appoint=True)
        if not action_res.result:
            return action_res

        # add retry times
        s = Status.objects.get(id=node.id)
        s.version = uniqid()
        history = History.objects.record(s)
        LogEntry.objects.link_history(node_id=node.id, history_id=history.id)
        s.retry += 1
        s.save()

        # update inputs
        if inputs:
            new_data = DataObject(inputs=inputs, outputs={})
            node.data = new_data
            Data.objects.write_node_data(node)

        # mark
        node.next_exec_is_retry()

        self.recover_from_block(process.root_pipeline.id, process.subprocess_stack)
        signals.node_retry_ready.send(sender=Status, process=process, node=node)

        # because node may be updated
        process.save()

        return action_res
Пример #8
0
    def retry(self, process, node, inputs):
        action_res = self.transit(id=node.id,
                                  to_state=states.READY,
                                  appoint=True)
        if not action_res.result:
            return action_res

        # add retry times
        s = Status.objects.get(id=node.id)
        s.version = uniqid()
        history = History.objects.record(s)
        LogEntry.objects.link_history(node_id=node.id, history_id=history.id)
        s.retry += 1
        s.save()

        # update inputs
        if inputs:
            new_data = DataObject(inputs=inputs, outputs={})
            node.data = new_data
            Data.objects.write_node_data(node)

        self.recover_from_block(process.root_pipeline.id,
                                process.subprocess_stack)
        signals.node_retry_ready.send(sender=Status,
                                      process=process,
                                      node=node)

        # because node may be updated
        process.save()

        return action_res
Пример #9
0
 def delete_model(self, template_ids):
     if not isinstance(template_ids, list):
         template_ids = [template_ids]
     qs = self.filter(template_id__in=template_ids)
     for template in qs:
         template.is_deleted = True
         template.name = uniqid()
         template.save()
Пример #10
0
 def delete_model(self, instance_ids):
     if not isinstance(instance_ids, list):
         instance_ids = [instance_ids]
     qs = self.filter(instance_id__in=instance_ids)
     for instance in qs:
         instance.is_deleted = True
         instance.name = uniqid()
         instance.save()
Пример #11
0
 def setUp(self):
     self.test_snapshot = Snapshot.objects.create_snapshot({})
     self.test_snapshot.save()
     self.test_project = Project.objects.create(
         name="proj",
         creator="creator",
     )
     self.test_project.save()
     # prepare test data
     instance_id = uniqid()
     template_id = uniqid()
     component_code = uniqid()
     node_id = uniqid()
     self.component_execute_data = ComponentExecuteData.objects.create(
         component_code=component_code,
         instance_id=instance_id,
         node_id=node_id,
         started_time=datetime(2021,
                               9,
                               18,
                               14,
                               57,
                               18,
                               609564,
                               tzinfo=timezone.utc),
     )
     self.pipeline_template = PipelineTemplate.objects.create(
         template_id=template_id,
         creator="creator",
         snapshot=self.test_snapshot)
     self.pipeline_instance = PipelineInstance.objects.create(
         instance_id=instance_id,
         creator="creator",
         snapshot=self.test_snapshot,
         template=self.pipeline_template)
     self.taskflow_instance = TaskFlowInstance.objects.create(
         project=self.test_project,
         pipeline_instance=self.pipeline_instance,
         template_id=template_id)
     self.task_template = TaskTemplate.objects.create(
         project=self.test_project,
         pipeline_template=self.pipeline_template)
Пример #12
0
 def __init__(self, id=None, **kwargs):
     self.id = id or ('%s%s' % (uniqid(), uniqid()))
     self.activity_id = self.id[:32]
     self.version = self.id[32:]
     self.destroy = mock.MagicMock()
     self.service_act = ServiceActObject(interval=None,
                                         id=self.activity_id,
                                         schedule_return=kwargs.get('schedule_return'),
                                         schedule_exception=kwargs.get('schedule_exception'),
                                         timeout=kwargs.get('service_timeout'),
                                         error_ignorable=kwargs.get('service_err_ignore', False),
                                         is_schedule_done=kwargs.get('schedule_done', False),
                                         result_bit=kwargs.get('result_bit', True))
     self.callback_data = kwargs.get('callback_data', 'callback_data')
     self.wait_callback = kwargs.get('wait_callback', False)
     self.process_id = kwargs.get('process_id', uniqid())
     self.is_finished = kwargs.get('is_finished', False)
     self.schedule_times = 0
     self.finish = mock.MagicMock()
     self.set_next_schedule = mock.MagicMock()
     self.callback = mock.MagicMock()
Пример #13
0
 def delete_model(self, template_ids):
     """
     删除模板对象
     @param template_ids: 模板对象 ID 列表或 ID
     @return:
     """
     if not isinstance(template_ids, list):
         template_ids = [template_ids]
     qs = self.filter(template_id__in=template_ids)
     for template in qs:
         template.is_deleted = True
         template.name = uniqid()
         template.save()
Пример #14
0
 def delete_model(self, instance_ids):
     """
     删除流程实例对象
     @param instance_ids: 实例 ID 或 ID 列表
     @return:
     """
     if not isinstance(instance_ids, list):
         instance_ids = [instance_ids]
     qs = self.filter(instance_id__in=instance_ids)
     for instance in qs:
         instance.is_deleted = True
         instance.name = uniqid()
         instance.save()
Пример #15
0
 def setUp(self):
     self.test_snapshot = Snapshot.objects.create_snapshot({})
     self.test_snapshot.save()
     self.test_project = Project.objects.create(
         name="proj",
         creator="creator",
     )
     self.test_project.save()
     # prepare test data
     template_id = uniqid()
     component_code = uniqid()
     node_id = uniqid()
     self.component_in_template = ComponentInTemplate.objects.create(
         component_code=component_code,
         template_id=template_id,
         node_id=node_id)
     self.pipeline_template = PipelineTemplate.objects.create(
         template_id=template_id,
         creator="creator",
         snapshot=self.test_snapshot)
     self.task_template = TaskTemplate.objects.create(
         project=self.test_project,
         pipeline_template=self.pipeline_template)
Пример #16
0
 def setUp(self):
     self.name = 'test'
     self.creator = 'tester'
     self.extra_info = {'extra_info': 'val'}
     self.pipeline_tree = {
         'constants': {
             'key_1': {
                 'value': 'val_1',
                 'show_type': 'show',
             },
             'key_2': {
                 'value': 'val_2',
                 'show_type': 'hide',
             }
         }
     }
     self.business = Business.objects.create(
         cc_id=1,
         cc_name='mock business',
         cc_owner='tester',
         cc_company='',
         life_cycle='2',
         executor='',
     )
     self.invalid_business = Business.objects.create(
         cc_id=2,
         cc_name='mock business',
         cc_owner='tester',
         cc_company='',
         life_cycle='2',
         executor='',
     )
     self.snapshot, _ = Snapshot.objects.create_or_get_snapshot({})
     self.pipeline_template = PipelineTemplate.objects.create(
         template_id=uniqid(),
         name=self.name,
         creator=self.creator,
         snapshot=self.snapshot)
     task_template = TaskTemplate(
         business=self.business,
         pipeline_template=self.pipeline_template,
     )
     task_template.save()
     self.template = task_template
     self.task = self.create_a_task()
Пример #17
0
    def __init__(self,
                 loop=0,
                 id=None,
                 state=None,
                 started_time=None,
                 archived_time=None,
                 retry=False,
                 skip=False):
        super(MockStatus, self).__init__(id=id)
        self.version = uniqid()
        self.loop = loop
        self.state = state
        self.started_time = started_time
        self.archived_time = archived_time
        self.retry = retry
        self.skip = skip

        self.save = MagicMock()
Пример #18
0
 def __init__(self,
              inputs,
              parent_data,
              execute_assertion,
              schedule_assertion,
              name='',
              patchers=None,
              execute_call_assertion=None,
              schedule_call_assertion=None,
              service_id=None):
     self.inputs = inputs
     self.parent_data = parent_data
     self.execute_assertion = execute_assertion
     self.execute_call_assertion = execute_call_assertion or []
     self.schedule_call_assertion = schedule_call_assertion or []
     self.schedule_assertion = schedule_assertion
     self.name = name
     self.patchers = patchers or []
     self.service_id = service_id or uniqid()
Пример #19
0
def forced_fail(node_id, kill=False, ex_data=''):
    """
    forced fail a node
    :param node_id:
    :param kill:
    :param ex_data:
    :return:
    """

    try:
        process = PipelineProcess.objects.get(current_node_id=node_id)
    except PipelineProcess.DoesNotExist:
        return ActionResult(
            result=False,
            message=
            'invalid operation, this node is finished or pipeline have been revoked'
        )

    node = process.top_pipeline.node(node_id)
    if not isinstance(node, ServiceActivity):
        return ActionResult(result=False,
                            message='can\'t not forced fail this type of node')

    action_result = Status.objects.transit(node_id, to_state=states.FAILED)
    if not action_result.result:
        return action_result

    try:
        node.failure_handler(process.root_pipeline.data)
    except Exception:
        pass

    with transaction.atomic():
        s = Status.objects.get(id=node.id)
        ScheduleService.objects.delete_schedule(s.id, s.version)
        Data.objects.forced_fail(node_id, ex_data)
        ProcessCeleryTask.objects.revoke(process.id, kill)
        process.sleep(adjust_status=True)
        s.version = uniqid.uniqid()
        s.save()

    return ActionResult(result=True, message='success')
Пример #20
0
 def setUp(self):
     self.name = 'test'
     self.task_template_name = 'task_template_name'
     self.creator = 'tester'
     self.extra_info = {'extra_info': 'val'}
     self.pipeline_tree = {
         'constants': {
             'key_1': {
                 'value': 'val_1',
                 'show_type': 'show',
             },
             'key_2': {
                 'value': 'val_2',
                 'show_type': 'hide',
             }
         }
     }
     self.project = Project.objects.create(name='test_project',
                                           time_zone='Asia/Shanghai',
                                           creator='test',
                                           desc='')
     self.invalid_project = Project.objects.create(
         name='invalid_project',
         time_zone='Asia/Shanghai',
         creator='test',
         desc='')
     self.snapshot, _ = Snapshot.objects.create_or_get_snapshot({})
     self.pipeline_template = PipelineTemplate.objects.create(
         template_id=uniqid(),
         name=self.task_template_name,
         creator=self.creator,
         snapshot=self.snapshot)
     task_template = TaskTemplate(
         project=self.project,
         pipeline_template=self.pipeline_template,
     )
     task_template.save()
     self.template = task_template
     self.task = self.create_a_task()
Пример #21
0
def convert_atom_from_v2_step_to_v3_act(step, constants):
    act_id = uniqid()
    v3_act = {
        'id': act_id,
        'incoming': '',
        'outgoing': '',
        'name': step['step_name'],
        'error_ignorable': bool(step['is_ignore']),
        'optional': bool(step['is_adjust']),
        'type': 'ServiceActivity',
        'loop': 1,
        'component': {
            'code': '',
            'data': {}
        }
    }

    tag_code = step['tag_code']
    component_code = component_code_v2_to_v3.get(tag_code)
    if not component_code:
        raise Exception("unknown tag code: %s" % tag_code)

    data = step['tag_data']['data']
    tag_data = {}

    if tag_code == 'requests':
        mount_constant(act_id, tag_code, data, constants)
        for key, val in data.items():
            hook = True if val['hook'] == 'on' else False
            tag_data[tag_v2_to_v3[tag_code][key]] = {
                'hook': hook,
                'value': val['constant'] if hook else val['value']
            }

    # TODO another tag

    v3_act['component']['code'] = component_code
    v3_act['component']['data'] = tag_data
    return v3_act
Пример #22
0
    def retry(self, process, node, inputs):
        result = self.transit(id=node.id, to_state=states.READY, appoint=True)
        if not result:
            return result

        # add retry times
        s = Status.objects.get(id=node.id)
        s.version = uniqid()
        history = History.objects.record(s)
        LogEntry.objects.link_history(node_id=node.id, history_id=history.id)
        s.retry += 1
        s.save()

        # update inputs
        if inputs:
            new_data = DataObject(inputs=inputs, outputs={})
            node.data = new_data
            Data.objects.write_node_data(node)
            process.save()

        self.recover_from_block(process.root_pipeline.id,
                                process.subprocess_stack)
        return result
Пример #23
0
    def create_task(self,
                    name,
                    template,
                    cron,
                    data,
                    creator,
                    timezone=None,
                    extra_info=None,
                    spread=False):
        snapshot, _ = Snapshot.objects.create_or_get_snapshot(data)
        schedule, _ = CrontabSchedule.objects.get_or_create(
            minute=cron.get('minute', '*'),
            hour=cron.get('hour', '*'),
            day_of_week=cron.get('day_of_week', '*'),
            day_of_month=cron.get('day_of_month', '*'),
            month_of_year=cron.get('month_of_year', '*'),
            timezone=timezone or 'UTC')
        _ = schedule.schedule  # noqa

        task = self.create(name=name,
                           template=template,
                           snapshot=snapshot,
                           cron=schedule.__str__(),
                           creator=creator,
                           extra_info=extra_info)

        kwargs = {'period_task_id': task.id, 'spread': spread}
        celery_task = DjCeleryPeriodicTask.objects.create(
            crontab=schedule,
            name=uniqid(),
            task='pipeline.contrib.periodic_task.tasks.periodic_task_start',
            enabled=False,
            kwargs=json.dumps(kwargs))
        task.celery_task = celery_task
        task.save()
        return task
Пример #24
0
def main_test():
    pipe1 = {
        "id": uniqid(),
        "activities": {
            "d4b43834dbea0981f07db76d3c2d12ce": {
                "outgoing": "65085d5739b76b8c3c0152ca1e845a6f",
                "incoming": "30722fa74d29e61ce9289c5029a48a49",
                "name": "\u8282\u70b9_1",
                "error_ignorable": False,
                "component": {
                    "code": "job_fast_execute_script",
                    "data": {
                        "job_account": {
                            "hook": False,
                            "value": "root"
                        },
                        "job_script_timeout": {
                            "hook": False,
                            "value": "600"
                        },
                        "job_ip_list": {
                            "hook": False,
                            "value": "${ip}"
                        },
                        "job_content": {
                            "hook": False,
                            "value": "echo 1"
                        },
                        "job_script_type": {
                            "hook": False,
                            "value": "1"
                        },
                        "job_script_param": {
                            "hook": False,
                            "value": ""
                        }
                    }
                },
                "optional": False,
                "type": "ServiceActivity",
                "id": "d4b43834dbea0981f07db76d3c2d12ce",
                "loop": None
            }
        },
        "end_event": {
            "type": "EmptyEndEvent",
            "outgoing": "",
            "incoming": "65085d5739b76b8c3c0152ca1e845a6f",
            "id": "ad3d6364cbf1f3ff4b0a9d0c98a99d99",
            "name": ""
        },
        "outputs": [],
        "flows": {
            "65085d5739b76b8c3c0152ca1e845a6f": {
                "is_default": False,
                "source": "d4b43834dbea0981f07db76d3c2d12ce",
                "id": "65085d5739b76b8c3c0152ca1e845a6f",
                "target": "ad3d6364cbf1f3ff4b0a9d0c98a99d99"
            },
            "30722fa74d29e61ce9289c5029a48a49": {
                "is_default": False,
                "source": "3e8f33f335b6695201cc259fb5f365e0",
                "id": "30722fa74d29e61ce9289c5029a48a49",
                "target": "d4b43834dbea0981f07db76d3c2d12ce"
            }
        },
        "start_event": {
            "type": "EmptyStartEvent",
            "outgoing": "30722fa74d29e61ce9289c5029a48a49",
            "incoming": "",
            "id": "3e8f33f335b6695201cc259fb5f365e0",
            "name": ""
        },
        "constants": {
            "${ip}": {
                "source_tag": "var_ip_picker",
                "source_info": {},
                "name": "ip",
                "index": 0,
                "custom_type": "ip",
                "value": {
                    "var_ip_picker": {
                        "var_ip_custom_value": "",
                        "var_ip_select_module": ["111"],
                        "var_ip_input_set": "",
                        "var_ip_value_type": "ip",
                        "var_ip_select_set": ["2"],
                        "var_ip_input_module": "",
                        "var_ip_method": "select"
                    }
                },
                "show_type": "hide",
                "source_type": "custom",
                "key": "${ip}",
                "desc": ""
            }
        },
        "gateways": {}
    }
    parser_obj = WebPipelineAdapter(pipe1)
    run_pipeline(parser_obj.parser())
Пример #25
0
    def transit(self,
                id,
                to_state,
                is_pipeline=False,
                appoint=False,
                start=False,
                name='',
                version=None):
        """
        尝试改变某个节点的状态
        :param id: 节点 ID
        :param to_state: 目标状态
        :param is_pipeline: 该节点是否是 pipeline
        :param appoint: 该动作是否由用户发起(非引擎内部操作)
        :param start: 是否刷新其开始事件
        :param name: 节点名称
        :param version: 节点版本
        :return:
        """
        defaults = {'name': name, 'state': to_state, 'version': uniqid()}
        if start:
            defaults['started_time'] = timezone.now()
        _, created = self.get_or_create(id=id, defaults=defaults)

        # reservation or first creation
        if created:
            return True

        with transaction.atomic():
            kwargs = {'id': id}
            if version:
                kwargs['version'] = version

            try:
                status = self.select_for_update().get(**kwargs)
            except Status.DoesNotExist:
                return False

            if states.can_transit(from_state=status.state,
                                  to_state=to_state,
                                  is_pipeline=is_pipeline,
                                  appoint=appoint):
                # 在冻结状态下不能改变 pipeline 的状态

                if is_pipeline:
                    subprocess_rel = SubProcessRelationship.objects.filter(
                        subprocess_id=id)
                    if subprocess_rel:
                        process = PipelineProcess.objects.get(
                            id=subprocess_rel[0].process_id)
                        if process.is_frozen:
                            return False

                    processes = PipelineProcess.objects.filter(
                        root_pipeline_id=id)
                    if processes and processes[0].is_frozen:
                        return False

                status.state = to_state
                if name:
                    status.name = name
                if start:
                    status.started_time = timezone.now()
                if to_state in states.ARCHIVED_STATES:
                    status.archived_time = timezone.now()
                status.save()
                return True
            else:
                return False
Пример #26
0
 def __init__(self, id=None, node=None):
     self.next = mock.MagicMock(return_value=node or uniqid())
     super(StartEventObject, self).__init__(id=id)
Пример #27
0
 def __init__(self, **kwargs):
     self.outgoing = kwargs.get('outgoing', MockSequenceCollection())
     self.converge_gateway_id = kwargs.get('converge_gateway_id', uniqid())
Пример #28
0
    def transit(self,
                id,
                to_state,
                is_pipeline=False,
                appoint=False,
                start=False,
                name='',
                version=None,
                unchanged_pass=False):
        """
        尝试改变某个节点的状态
        :param id: 节点 ID
        :param to_state: 目标状态
        :param is_pipeline: 该节点是否是 pipeline
        :param appoint: 该动作是否由用户发起(非引擎内部操作)
        :param start: 是否刷新其开始时间
        :param name: 节点名称
        :param version: 节点版本
        :param unchanged_pass: 当 to_state 与当前节点状态相同时则视为操作成功
        :return:
        """
        defaults = {'name': name, 'state': to_state, 'version': uniqid()}
        if start:
            defaults['started_time'] = timezone.now()
        status, created = self.get_or_create(id=id, defaults=defaults)

        # reservation or first creation
        if created:
            return ActionResult(result=True, message='success', extra=status)

        with transaction.atomic():
            kwargs = {'id': id}
            if version:
                kwargs['version'] = version

            try:
                status = self.select_for_update().get(**kwargs)

            except Status.DoesNotExist:
                return ActionResult(
                    result=False,
                    message='node not exists or not be executed yet')

            if unchanged_pass and status.state == to_state:
                return ActionResult(result=True,
                                    message='success',
                                    extra=status)

            if states.can_transit(from_state=status.state,
                                  to_state=to_state,
                                  is_pipeline=is_pipeline,
                                  appoint=appoint):

                # 在冻结状态下不能改变 pipeline 的状态
                if is_pipeline:
                    subprocess_rel = SubProcessRelationship.objects.filter(
                        subprocess_id=id)
                    if subprocess_rel:
                        process = PipelineProcess.objects.get(
                            id=subprocess_rel[0].process_id)
                        if process.is_frozen:
                            return ActionResult(
                                result=False,
                                message=
                                'engine is frozen, can not perform operation')

                    processes = PipelineProcess.objects.filter(
                        root_pipeline_id=id)
                    if processes and processes[0].is_frozen:
                        return ActionResult(
                            result=False,
                            message=
                            'engine is frozen, can not perform operation')

                if name:
                    status.name = name
                if start:
                    status.started_time = timezone.now()
                if to_state in states.ARCHIVED_STATES:
                    status.archived_time = timezone.now()

                # from FINISHED to RUNNING
                if states.is_rerunning(from_state=status.state,
                                       to_state=to_state):
                    history = History.objects.record(status, is_rerunning=True)
                    if history:
                        LogEntry.objects.link_history(node_id=status.id,
                                                      history_id=history.id)
                    status.loop += 1
                    status.skip = False
                    status.version = uniqid()

                status.state = to_state
                status.save()
                return ActionResult(result=True,
                                    message='success',
                                    extra=status)
            else:
                return ActionResult(
                    result=False,
                    message='can\'t transit state(%s) from %s to %s' %
                    (id, status.state, to_state),
                    extra=status)
Пример #29
0
 def setUp(self):
     self.subprocess_id = uniqid()
     self.process_id = uniqid()
Пример #30
0
 def __init__(self, id=None, name=''):
     self.id = id or uniqid()
     self.name = name