Пример #1
0
    def _handle_ref_node(self, node, widget, upstream=False, recursive=True):

        # get the reference snapshot (should maybe use the loader or
        # at least share the code
        instance = Xml.get_attribute(node,"instance")
        search_type = Xml.get_attribute(node,"search_type")
        search_id = Xml.get_attribute(node,"search_id")
        context = Xml.get_attribute(node,"context")
        version = Xml.get_attribute(node,"version")
        # this is often the Maya file node name or XSI long clip name
        node_name = Xml.get_attribute(node, "node")
        my_name = Xml.get_node_name(node)

        # get the snapshot
        ref_snapshot = Snapshot.get_by_version(search_type, search_id,\
                    context, version)
        #ref_snapshot = Snapshot.get_latest(search_type,search_id, context)
        if ref_snapshot == None:
            widget.add("|---> <font color='red'>Error: No reference found for [%s, %s, %s]</font>" % \
                (search_type, search_id, context) )
            return

        toggle_id = self.generate_unique_id('toggle')
        widget.add(FloatDivWdg(), toggle_id)
        version = ref_snapshot.get_value("version")

        
        try: 
            sobject = ref_snapshot.get_sobject()
        except SObjectNotFoundException, e:
            widget.add('[%s|%s] may have been deleted or is not viewable.' % (ref_snapshot.get_value('search_type'),\
                ref_snapshot.get_value('search_id')))
            return
Пример #2
0
    def get_action_nodes(my, scope="dependent"):
        action_nodes = []
        nodes = Xml.get_children(my.node)
        for node in nodes:
            node_name = Xml.get_node_name(node)
            if node_name == "action":
                node_scope = Xml.get_attribute(node, "scope")
                if scope and node_scope != scope:
                    continue

                action_nodes.append(node)
        return action_nodes
Пример #3
0
    def get_action_nodes(my, scope="dependent"):
        action_nodes = []
        nodes = Xml.get_children(my.node)
        for node in nodes:
            node_name = Xml.get_node_name(node)
            if node_name == "action":
                node_scope = Xml.get_attribute(node, "scope")
                if scope and node_scope != scope:
                    continue

                action_nodes.append(node)
        return action_nodes
Пример #4
0
    def get_action_node(my, event_name, scope="dependent"):
        nodes = Xml.get_children(my.node)
        for node in nodes:
            node_name = Xml.get_node_name(node)
            if node_name == "action":
                node_event = Xml.get_attribute(node, "event")
                if node_event != event_name:
                    continue

                node_scope = Xml.get_attribute(node, "scope")
                if scope and node_scope != scope:
                    continue

                return node
Пример #5
0
    def get_action_node(my, event_name, scope="dependent"):
        nodes = Xml.get_children(my.node)
        for node in nodes:
            node_name = Xml.get_node_name(node)
            if node_name == "action":
                node_event = Xml.get_attribute(node, "event")
                if node_event != event_name:
                    continue

                node_scope = Xml.get_attribute(node, "scope")
                if scope and node_scope != scope:
                    continue

                return node
Пример #6
0
    def get_action_options(my, event_name, scope="dependent"):
        options = {}
        action_node = my.get_action_node(event_name, scope=scope)
        if action_node is None:
            return options

        nodes = Xml.get_children(action_node)
        for node in nodes:
            name = Xml.get_node_name(node)
            if name == "#text":
                continue
            value = Xml.get_node_value(node)
            options[name] = value

        return options
Пример #7
0
    def get_action_options(my, event_name, scope="dependent"):
        options = {}
        action_node = my.get_action_node(event_name, scope=scope)
        if action_node is None:
            return options

        nodes = Xml.get_children(action_node)
        for node in nodes:
            name = Xml.get_node_name(node)
            if name == "#text":
                continue
            value = Xml.get_node_value(node)
            options[name] = value

        return options
Пример #8
0
    def add_xml_rules(my, xml):
        '''xml should be an XML object with the data in the form of
        <rules>
          <group type='sobject' default='<default>'>
            <rule key='<key>' access='<access>'/>
          </group>
        </rules>
        '''

        if isinstance(xml, basestring):
            xmlx = Xml()
            xmlx.read_string(xml)
            xml = xmlx


        my.xml = xml

        # parse shorthand rules
        rule_nodes = xml.get_nodes("rules/rule")
        if not rule_nodes:
            return


        if my.project_codes == None:
            search = Search('sthpw/project')
            projects = search.get_sobjects()
            my.project_codes = [x.get_code() for x in projects]
            my.project_codes.append('*')

        for rule_node in rule_nodes:
            # initiate the project_code here for each loop
            project_code = '*'
            group_type = Xml.get_attribute( rule_node, "group" )
            if not group_type:
                # category is the preferred name over group now
                # TODO: phase out the use of group completely
                group_type = Xml.get_attribute( rule_node, "category" )

            # get an existing rule set or create a new one
            if my.groups.has_key(group_type):
                rules = my.groups[group_type]
            else:
                rules = {}
                my.groups[group_type] = rules

            # set the default, if specified
            group_default = xml.get_attribute( rule_node, "default" )
            if group_default:
                rules['__DEFAULT__'] = group_default
                continue


            # generate the rule key
            #rule_key = xml.get_attribute(rule_node, 'key')
            attrs = xml.get_attributes(rule_node)
            attrs2 = {}
            count = 0
            for name, value in attrs.items():
                if name in ['access', 'group', 'category', 'project']:
                    continue
                # have to turn everything into strings
                attrs2[str(name)] = str(value)
                count += 1


            if count == 1 and attrs2.has_key('key'):
                # backwards compatibility
                rule_key = attrs2['key']
            else:
                #rule_key = str(attrs2)
                rule_key = str(Common.get_dict_list(attrs2))

            rule_project =  xml.get_attribute(rule_node, 'project')
            if rule_project:
                project_code = rule_project
                # special treatment for search_filter to enable
                # project-specific search
                if group_type=='search_filter':
                    attrs2['project'] = rule_project
            
            # if there is a value, then combine it with the key
            rule_value = xml.get_attribute(rule_node, 'value')
            if rule_value:
                rule_key = "%s||%s" % (rule_key, rule_value)

            # add a project code qualifier
            rule_keys = []
            if project_code == '*' and group_type != 'search_filter':
                for code in my.project_codes:
                    key = "%s?project=%s" % (rule_key, code)
                    rule_keys.append(key)
            else:
                key= "%s?project=%s" % (rule_key, project_code)

                #key = str(key) # may need to stringify unicode string
                rule_keys.append(key)

            rule_access = xml.get_attribute(rule_node, 'access')

            #if rule_access == "":
            #    raise AccessException("Cannot have empty 'access':\n%s" \
            #        % xml.to_string(rule_node) )

            # if no key is specified, it is considered a DEFAULT
            if not rule_keys and not rule_value:
                rule_keys = ['__DEFAULT__']
            for rule_key in rule_keys:
                # check if rule_access exists first, which doesn't for search_filter,
                # but it has to go into the rules regardless
                # if the rule already exists, take the highest one
                if rule_access and rules.has_key(rule_key):
                    curr_access, cur_attrs = rules[rule_key]

                    try:
                        access_enum = my._get_access_enum(rule_access)
                        if my._get_access_enum(curr_access) > access_enum:
                            continue
                    except:
                        if group_type == "builtin":
                            continue
                        else:
                            raise


                rules[rule_key] = rule_access, attrs2


        #for rule, values in rules.items():
        #    print "rule: ", rule, values[0]


        # FIXME: this one doesn't support the multi-attr structure
        # convert this to a python data structure
        group_nodes = xml.get_nodes("rules/group")
        for group_node in group_nodes:

            group_type = Xml.get_attribute( group_node, "type" )

            # get an existing rule set or create a new one
            if my.groups.has_key(group_type):
                rules = my.groups[group_type]
            else:
                rules = {}
                my.groups[group_type] = rules

            # set the default, if specified
            group_default = xml.get_attribute( group_node, "default" )
            if group_default != "":
                rules['__DEFAULT__'] = group_default


            # get all of the rule nodes
            rule_nodes = Xml.get_children(group_node)
            for rule_node in rule_nodes:
                project_code='*'

                if Xml.get_node_name(rule_node) != 'rule':
                    continue

                rule_key = xml.get_attribute(rule_node, 'key')
                rule_access = xml.get_attribute(rule_node, 'access')

                rule_project =  xml.get_attribute(rule_node, 'project')
                if rule_project:
                    project_code = rule_project
                if rule_access == "":
                    raise AccessException("Cannot have empty 'access':\n%s" \
                        % xml.to_string(rule_node) )

                rule_keys = []
                attrs2 = {'key': rule_key}

                # add a project code qualifier
                if project_code == '*' and group_type != 'search_filter':
                    for code in my.project_codes:
                        key = "%s?project=%s" % (rule_key, code)
                        rule_keys.append(key)
                else:
                    key= "%s?project=%s" % (rule_key, project_code)
                    rule_keys.append(key)

                for rule_key in rule_keys:
                    rules[rule_key] = rule_access, attrs2
Пример #9
0
    def execute(my):
        log = my.log
        rules = my.rules

        # Give rules.  Only notes will get through
        # we need heirarchical rules.  This will ensure that only notes
        # for project/assets will pass
        # Here, the second one is much more difficult to do.
        rulesXXX = '''
        <rule group='heirarchy' key='project/asset.sthpw/note' access='allow'/>
        <rule group='heirarchy' key="project/asset.sthpw/note['assigned','beth']" access='allow'/>"
        '''

        access_manager = AccessManager()
        access_manager.add_xml_rules(rules)


        # filter out project
        namespace = log.get_value("namespace")
        key1 = { 'code': namespace }
        key2 = { 'code': '*' }
        keys = [key1, key2]
        if not access_manager.check_access("project", keys, "allow", default="deny"):
            my.filtered_xml = Xml()
            my.filtered_xml.read_string("<transaction/>")
            my.message = "Transaction prevented due to project restriction"
            return


        # filter the transaction against the security model
        xml = log.get_xml_value("transaction")

        my.filtered_xml = Xml()
        my.filtered_xml.create_doc("transaction")
        root2 = my.filtered_xml.get_root_node()

        nodes = xml.get_nodes("transaction/*")
        num_nodes = len(nodes)
        count = 0


        for node in nodes:
            if Xml.get_node_name(node) ==  "sobject":
                search_type = xml.get_attribute(node, "search_type")
                parts = search_type.split("?")
                search_type = parts[0]

                # filter search types
                key1 = { 'code': search_type }
                key2 = { 'code': "*" }
                keys = [ key1, key2 ]
                if not access_manager.check_access("search_type", keys, "allow", default="deny"):
                    continue

                # check hierachical rule
                parent_type = xml.get_attribute(node, "parent_type")
                key = "%s.%s" % (parent_type, search_type)
                
                my.filtered_xml.append_child(root2, node)
                count += 1
                
            else:
                my.filtered_xml.append_child(root2, node)
                count += 1

        if len(nodes) != 0 and len(my.filtered_xml.get_nodes("transaction/*")) == 0:
            my.message = "All actions filtered due to security restrictions (%s actions)" % num_nodes
Пример #10
0
    def execute(my):
        log = my.log
        rules = my.rules

        # Give rules.  Only notes will get through
        # we need heirarchical rules.  This will ensure that only notes
        # for project/assets will pass
        # Here, the second one is much more difficult to do.
        rulesXXX = '''
        <rule group='heirarchy' key='project/asset.sthpw/note' access='allow'/>
        <rule group='heirarchy' key="project/asset.sthpw/note['assigned','beth']" access='allow'/>"
        '''

        access_manager = AccessManager()
        access_manager.add_xml_rules(rules)

        # filter out project
        namespace = log.get_value("namespace")
        key1 = {'code': namespace}
        key2 = {'code': '*'}
        keys = [key1, key2]
        if not access_manager.check_access(
                "project", keys, "allow", default="deny"):
            my.filtered_xml = Xml()
            my.filtered_xml.read_string("<transaction/>")
            my.message = "Transaction prevented due to project restriction"
            return

        # filter the transaction against the security model
        xml = log.get_xml_value("transaction")

        my.filtered_xml = Xml()
        my.filtered_xml.create_doc("transaction")
        root2 = my.filtered_xml.get_root_node()

        nodes = xml.get_nodes("transaction/*")
        num_nodes = len(nodes)
        count = 0

        for node in nodes:
            if Xml.get_node_name(node) == "sobject":
                search_type = xml.get_attribute(node, "search_type")
                parts = search_type.split("?")
                search_type = parts[0]

                # filter search types
                key1 = {'code': search_type}
                key2 = {'code': "*"}
                keys = [key1, key2]
                if not access_manager.check_access(
                        "search_type", keys, "allow", default="deny"):
                    continue

                # check hierachical rule
                parent_type = xml.get_attribute(node, "parent_type")
                key = "%s.%s" % (parent_type, search_type)

                my.filtered_xml.append_child(root2, node)
                count += 1

            else:
                my.filtered_xml.append_child(root2, node)
                count += 1

        if len(nodes) != 0 and len(
                my.filtered_xml.get_nodes("transaction/*")) == 0:
            my.message = "All actions filtered due to security restrictions (%s actions)" % num_nodes
Пример #11
0
    def add_xml_rules(self, xml, project_code=None):
        '''xml should be an XML object with the data in the form of
        <rules>
          <group type='sobject' default='<default>'>
            <rule key='<key>' access='<access>'/>
          </group>
        </rules>
        '''

        from pyasm.search import SObject
        if isinstance(xml, SObject):
            sobject = xml
            xml = sobject.get_xml_value("access_rules")
            if not project_code:
                project_code = sobject.get_value("project_code")

        project_override = project_code
        if isinstance(xml, basestring):
            xmlx = Xml()
            xmlx.read_string(xml)
            xml = xmlx


        # parse shorthand rules
        rule_nodes = xml.get_nodes("rules/rule")
        if not rule_nodes:
            return

        # store all of the project codes (this will only run once)
        if self.project_codes == None:
            search = Search('sthpw/project')
            projects = search.get_sobjects()
            self.project_codes = [x.get_code() for x in projects]
            self.project_codes.append('*')

        for rule_node in rule_nodes:
            # initiate the project_code here for each loop
            project_code = '*'
            group_type = Xml.get_attribute( rule_node, "group" )
            if not group_type:
                # category is the preferred name over group now
                # TODO: phase out the use of group completely
                group_type = Xml.get_attribute( rule_node, "category" )

            # get an existing rule set or create a new one
            if self.groups.has_key(group_type):
                rules = self.groups[group_type]
            else:
                rules = {}
                self.groups[group_type] = rules

            # set the default, if specified
            group_default = xml.get_attribute( rule_node, "default" )
            if group_default:
                rules['__DEFAULT__'] = group_default
                continue


            # generate the rule key
            #rule_key = xml.get_attribute(rule_node, 'key')
            attrs = xml.get_attributes(rule_node)
            attrs2 = {}
            count = 0
            for name, value in attrs.items():
                if name in ['access', 'group', 'category', 'project']:
                    continue
                # have to turn everything into strings
                attrs2[str(name)] = str(value)
                count += 1


            if count == 1 and attrs2.has_key('key'):
                # backwards compatibility
                rule_key = attrs2['key']
            else:
                #rule_key = str(attrs2)
                rule_key = str(Common.get_dict_list(attrs2))

            if project_override:
                rule_project = project_override
            else:
                rule_project =  xml.get_attribute(rule_node, 'project')

            if rule_project:
                project_code = rule_project
                # special treatment for search_filter to enable
                # project-specific search
                if group_type=='search_filter':
                    attrs2['project'] = rule_project
            
            # if there is a value, then combine it with the key
            rule_value = xml.get_attribute(rule_node, 'value')
            if rule_value:
                rule_key = "%s||%s" % (rule_key, rule_value)

            # add a project code qualifier
            rule_keys = []
         
            # project rule is special
            if group_type == 'project':
                key = str(rule_key)
                rule_keys.append(key)
            elif project_code == '*' and group_type != 'search_filter':
                for code in self.project_codes:
                    key = "%s?project=%s" % (rule_key, code)
                    rule_keys.append(key)
            else:
                key= "%s?project=%s" % (rule_key, project_code)

                #key = str(key) # may need to stringify unicode string
                rule_keys.append(key)
                    
                #key= "%s?project=*" % (rule_key)
                #rule_keys.append(key)

            rule_access = xml.get_attribute(rule_node, 'access')

            #if rule_access == "":
            #    raise AccessException("Cannot have empty 'access':\n%s" \
            #        % xml.to_string(rule_node) )

            # if no key is specified, it is considered a DEFAULT
            if not rule_keys and not rule_value:
                rule_keys = ['__DEFAULT__']
            for rule_key in rule_keys:
                # check if rule_access exists first, which doesn't for search_filter,
                # but it has to go into the rules regardless
                # if the rule already exists, take the highest one
                if rule_access and rules.has_key(rule_key):
                    curr_access, cur_attrs = rules[rule_key]

                    try:
                        access_enum = self._get_access_enum(rule_access)
                        if self._get_access_enum(curr_access) > access_enum:
                            continue
                    except:
                        if group_type == "builtin":
                            continue
                        else:
                            raise


                rules[rule_key] = rule_access, attrs2
            

        # FIXME: this one doesn't support the multi-attr structure
        # convert this to a python data structure
        group_nodes = xml.get_nodes("rules/group")
        for group_node in group_nodes:

            group_type = Xml.get_attribute( group_node, "type" )

            # get an existing rule set or create a new one
            if self.groups.has_key(group_type):
                rules = self.groups[group_type]
            else:
                rules = {}
                self.groups[group_type] = rules

            # set the default, if specified
            group_default = xml.get_attribute( group_node, "default" )
            if group_default != "":
                rules['__DEFAULT__'] = group_default


            # get all of the rule nodes
            rule_nodes = Xml.get_children(group_node)
            for rule_node in rule_nodes:
                project_code='*'

                if Xml.get_node_name(rule_node) != 'rule':
                    continue

                rule_key = xml.get_attribute(rule_node, 'key')
                rule_access = xml.get_attribute(rule_node, 'access')

                if project_override:
                    rule_project = project_override
                else:
                    rule_project =  xml.get_attribute(rule_node, 'project')

                if rule_project:
                    project_code = rule_project
                if rule_access == "":
                    raise AccessException("Cannot have empty 'access':\n%s" \
                        % xml.to_string(rule_node) )

                rule_keys = []
                attrs2 = {'key': rule_key}

                # add a project code qualifier
                if project_code == '*' and group_type != 'search_filter':
                    for code in self.project_codes:
                        key = "%s?project=%s" % (rule_key, code)
                        rule_keys.append(key)
                else:
                    key= "%s?project=%s" % (rule_key, project_code)
                    rule_keys.append(key)

                for rule_key in rule_keys:
                    rules[rule_key] = rule_access, attrs2
Пример #12
0
class Pipeline(SObject):
    '''Represents a pipeline of process and their relationships'''
    SEARCH_TYPE = "sthpw/pipeline"

    def __init__(self,
                 search_type="sthpw/pipeline",
                 columns=None,
                 result=None,
                 fast_data=None):
        super(Pipeline, self).__init__(search_type,
                                       columns,
                                       result,
                                       fast_data=fast_data)

        self.processes = []
        self.recursive_processes = []
        self.connects = {}
        self.pipeline_dict = {}
        #if columns != None:
        # putting no exception here to ensure that this can be put into
        # a select widget which no uses distinct for the value column
        xml_str = self.get_value("pipeline", no_exception=True)

        # don't cache a potential empty xml when Pipeline.create() is call
        if xml_str:
            self.set_pipeline(xml_str, cache=False)

        self.process_sobjects = None

    def get_defaults(self):
        '''The default, if not specified is to set the current project'''
        defaults = {}

        project_code = Project.get_project_code()
        defaults['project_code'] = project_code

        self.update_dependencies()

        return defaults

    def on_updateX(self):

        # initialize the triggers for the workflow
        """
        event = "change|sthpw/pipeline"
        trigger = SearchType.create("sthpw/trigger")
        trigger.set_value("event", event)
        trigger.set_value("class_name", ProjectPipelineTrigger)
        trigger.set_value("mode", "same process,same transaction")
        Trigger.append_static_trigger(trigger, startup=startup)
        """

        if self.SEARCH_TYPE == "config/pipeline":
            return

        code = self.get_value("code")
        search = Search("config/pipeline")
        search.add_filter("code", code)
        pipeline = search.get_sobject()

        if not pipeline:
            pipeline = SearchType.create("config/pipeline")

        items = self.data.items()

        for name, value in items:
            if name.startswith("__"):
                continue
            if name in ["id", "project_code"]:
                continue
            if not value:
                continue
            pipeline.set_value(name, value)

        pipeline.commit(triggers="none")

    def on_insertX(self):

        # Copy this to the config/pipeline table.  Currently this table
        # is not being used, however, pipelines really should be defined
        # there.  It is an unfortunate historical wart that pipelines
        # are stored in the sthpw database.  In some future release of
        # TACTIC, the pipeline table in the sthpw database will be deprecated
        # This copy will ensure that over time, the impact of this move over
        # will be minimized
        if self.SEARCH_TYPE == "config/pipeline":
            return
        search = Search("config/pipeline")
        search.add_filter("code", self.get_code())
        pipeline = search.get_sobject()

        if not pipeline:
            pipeline = SearchType.create("config/pipeline")
        for name, value in self.get_data().items():
            if name.startswith("__"):
                continue
            if name in ["id", "project_code"]:
                continue
            if not value:
                continue

            pipeline.set_value(name, value)

        pipeline.commit(triggers="none")

    def on_deleteX(self):
        if self.SEARCH_TYPE == "config/pipeline":
            return

        search = Search("config/pipeline")
        search.add_filter("code", self.get_code())
        pipeline = search.get_sobject()
        if pipeline:
            pipeline.delete()

    def update_dependencies(self):
        '''Function that should be run on insert/update. It's already automatically called during insert.
        On update, the caller needs to call this explicitly. It checks the search type
        this pipeline is associated with and if there is no pipeline code
        column, then update it.  It updates the process table also.'''
        search_type = self.get_value('search_type')
        self.update_process_table(search_type=search_type)

        # don't do anything for task sType
        if search_type == 'sthpw/task':
            return

        if not search_type:
            return

        if ProdSetting.get_value_by_key('autofill_pipeline_code') != 'false':
            try:
                columns = SearchType.get_columns(search_type)
                if not 'pipeline_code' in columns:
                    # add the pipeline code column
                    from pyasm.command import ColumnAddCmd
                    cmd = ColumnAddCmd(search_type, "pipeline_code", "varchar")
                    cmd.execute()
            except SqlException as e:
                print("Error creating column [pipeline_code] for %" %
                      search_type)
                pass

            # go through all of the sobjects and set all the empty ones
            # to the new pipeline
            search = Search(search_type)
            search.add_op("begin")
            search.add_filter("pipeline_code", "NULL", op='is', quoted=False)
            search.add_filter("pipeline_code", "")
            search.add_op("or")
            sobject_ids = search.get_sobject_ids()

            if sobject_ids:
                # this is much faster and memory efficient
                db_resource = SearchType.get_db_resource_by_search_type(
                    search_type)
                sql = DbContainer.get(db_resource)
                tbl = search.get_table()
                sobject_ids = [str(x) for x in sobject_ids]
                pipeline_code = self.get_value("code")
                sql.do_update(
                    '''UPDATE "%s" SET "pipeline_code" = '%s' WHERE id in (%s) '''
                    % (tbl, pipeline_code, ','.join(sobject_ids)))
            """
            for sobject in sobjects:    
                if not sobject.get_value("pipeline_code"):
                    sobject.set_value("pipeline_code", self.get_value("code") )
                    sobject.commit(triggers=False)
            """

    def update_process_table(self, search_type=None):
        ''' make sure to update process table'''

        template = self.get_template_pipeline()
        if template:
            if template.get_code() == self.get_code():
                template_processes = []
            else:
                template_processes = template.get_process_names()
        else:
            template_processes = []

        process_names = self.get_process_names()
        pipeline_code = self.get_code()

        search = Search("config/process")
        search.add_filter("pipeline_code", pipeline_code)
        process_sobjs = search.get_sobjects()
        existing_names = SObject.get_values(process_sobjs, 'process')

        pipeline_has_updates = False
        count = 1
        for process_name in process_names:

            exists = False
            for process_sobj in process_sobjs:
                # if it already exist, then update
                if process_sobj.get_value("process") == process_name:
                    exists = True
                    break

            if not exists:
                process_sobj = SearchType.create("config/process")

                # default to (main) for non-task status pipeline
                if search_type and search_type != 'sthpw/task':
                    process_sobj.set_value('subcontext_options', '(main)')
                process_sobj.set_value("pipeline_code", pipeline_code)
                process_sobj.set_value("process", process_name)

            # copy information over from the template
            if process_name in template_processes:
                template_attrs = template.get_process_attrs(process_name)
                process = self.get_process(process_name)
                for name, value in template_attrs.items():
                    if name in ['xpos', 'ypos', 'name']:
                        continue
                    process.set_attribute(name, value)
                    pipeline_has_updates = True

                search = Search("config/process")
                search.add_filter("process", process_name)
                # NEED ANOTHER FILTER for templates here
                search.add_filter("pipeline_code", "%/__TEMPLATE__", op="like")

                # copy certain values from the template
                template_process = search.get_sobject()
                for name, value in template_process.get_data().items():
                    if not value:
                        continue
                    if name in ['checkin_mode']:
                        process_sobj.set_value(name, value)

            attrs = self.get_process_attrs(process_name)
            color = attrs.get('color')
            if color:
                process_sobj.set_value("color", color)

            process_sobj.set_value("sort_order", count)
            process_sobj.commit()
            count += 1

        if pipeline_has_updates:
            self.set_value("pipeline", self.get_pipeline_xml().to_string())
            self.commit()

        # delete obsolete
        obsolete = set(existing_names) - set(process_names)
        if obsolete:
            for obsolete_name in obsolete:
                for process_sobj in process_sobjs:
                    if process_sobj.get_value("process") != obsolete_name:
                        continue
                    # FIXME: this node type is always None
                    process_obj = self.get_process(obsolete_name)
                    if process_obj:
                        node_type = process_obj.get_type()
                        try:
                            from pyasm.command import CustomProcessConfig
                            handler = CustomProcessConfig.get_delete_handler(
                                node_type, {})
                        except Exception as e:
                            handler = None

                        if handler:
                            handler.execute()

                    # delete it
                    process_sobj.delete()

    def get_name(self, long=False):
        '''this is the old function, kept for backward-compatibility'''
        #TODO: remove this function here
        return self.get_code()

    def set_pipeline(self, pipeline_xml, cache=True):
        '''set the pipeline externally'''
        # cache according to pipeline code, which will share the same xml object
        if self.is_insert():
            cache = False

        search_key = self.get_search_key()

        xml_dict = Container.get("Pipeline:xml")

        if xml_dict == None:
            xml_dict = {}
            Container.put("Pipeline:xml", xml_dict)

        self.xml = xml_dict.get(search_key)

        if self.xml == None:
            self.xml = Xml()
            if cache:
                xml_dict[search_key] = self.xml

            if not pipeline_xml:
                pipeline_xml = "<pipeline/>"

            try:
                self.xml.read_string(pipeline_xml)
            except XmlException as e:
                self.xml.read_string("<pipeline/>")

        # clear these again when set externally
        self.processes = []
        self.recursive_processes = []

        # create the process and pipelines
        process_nodes = self.xml.get_nodes(
            "pipeline/process | pipeline/pipeline")
        for node in process_nodes:
            node_name = self.xml.get_node_name(node)
            process = Process(node)
            process.set_parent_pipeline_code(self.get_code())
            self.processes.append(process)

            if node_name == "pipeline":
                name = Xml.get_attribute(node, "name")

                # prevent infinite loop
                if name == self.get_code():
                    continue

                child = Pipeline.get_by_code(name)
                if not child:
                    continue
                self.pipeline_dict[name] = child
                process.set_child_pipeline(child)

    def get_pipeline_xml(self):
        return self.xml

    def to_string(self):
        return self.xml.to_string()

    def get_process(self, name):
        '''returns a Process object'''
        if type(name) not in types.StringTypes:
            name = name.get_name()

        # first try the top level
        for process in self.processes:
            if process.get_name() == name:
                return process

        # Then iterate.  This may be slow
        processes = self.get_processes(recurse=True)

        for process in processes:
            if process.get_full_name() == name:
                return process
        return None
        #raise PipelineException( "Pipeline '%s' does not have process '%s'" % \
        #    (self.get_name(),name) )

    def get_processes(self, recurse=False, type=None):
        '''returns all the Process objects in this pipeline'''

        if type and isinstance(type, basestring):
            types = [type]
        else:
            types = type

        if recurse:
            if self.recursive_processes:
                return self.recursive_processes
            else:
                # add child processes
                for process in self.processes:

                    if types and process.get_type() not in types:
                        continue

                    self.recursive_processes.append(process)

                    child_pipeline = process.get_child_pipeline()
                    if not child_pipeline:
                        continue

                    child_processes = child_pipeline.get_processes(
                        recurse=recurse)
                    for x in child_processes:
                        x.set_sub_pipeline_process(True)
                    self.recursive_processes.extend(child_processes)
                return self.recursive_processes

        else:
            if types:
                ret_processes = []
                for process in self.processes:
                    if process.get_type() not in types:
                        continue
                    ret_processes.append(process)
                return ret_processes
            else:
                return self.processes

    def get_process_attrs(self, name):
        process = self.get_process(name)
        if process:
            return process.get_attributes()
        else:
            return {}

    def get_process_names(self, recurse=False, type=None):
        '''returns all the Process names in this pipeline'''

        if type and isinstance(type, basestring):
            types = [type]
        else:
            types = type

        processes = self.get_processes(recurse, type=types)
        if recurse:
            process_names = []
            for process in processes:
                if types and process.get_type() not in types:
                    continue

                if process.is_from_sub_pipeline():
                    process_names.append(process.get_full_name())
                else:
                    process_names.append(process.get_name())
            return process_names
        else:
            return [process.get_name() for process in processes]

    def get_process_sobject(self, process):
        # search all processes and cache all of the sobject locally
        if self.process_sobjects == None:

            search = Search("config/process")
            search.add_filter("pipeline_code", self.get_code())
            sobjects = search.get_sobjects()

            self.process_sobjects = {}

            for process_sobject in sobjects:
                # prevent changing variable process
                pcs = process_sobject.get("process")
                self.process_sobjects[pcs] = process_sobject

        process_sobject = self.process_sobjects.get(process)
        return process_sobject

    def get_process_sobjects(self):

        process_name = "dummy"

        self.get_process_sobject(process_name)

        return self.process_sobjects

    def get_index(self, name):
        index = 0
        for process in self.processes:
            if process.get_name() == name:
                return index
            index += 1

    def _get_connects(self, process="", direction='from'):

        if direction == "from":
            opposite = "to"
        else:
            opposite = "from"

        if not process:
            connect_nodes = self.xml.get_nodes("pipeline/connect")
        else:
            connect_nodes = self.xml.get_nodes(\
                "pipeline/connect[@%s='%s' or @%s='*']" % (direction, process, direction))
        connects = []
        for node in connect_nodes:
            opposite_name = Xml.get_attribute(node, opposite)
            full_name = "%s/%s" % (self.get_name(), opposite_name)
            if process == opposite_name or process == full_name:
                continue
            connects.append(ProcessConnect(node))
        return connects

    def get_input_processes(self, process, type=None, to_attr=None):
        connects = self._get_connects(process, direction='to')
        processes = []
        for connect in connects:

            if to_attr:
                connect_to_attr = connect.get_attr("to_attr")
                if connect_to_attr != to_attr:
                    continue

            from_connect = connect.get_from()
            process = self.get_process(from_connect)
            if process:
                if type and process.get_type() != type:
                    continue
                processes.append(process)

        return processes

    def get_input_process_names(self, process, type=None, from_attr=None):
        input_processes = self.get_input_processes(process, type, from_attr)
        process_names = [x.get_name() for x in input_processes]
        return process_names

    def get_output_processes(self, process, type=None, from_attr=None):
        connects = self._get_connects(process, direction="from")
        if not connects:
            return []

        processes = []
        for connect in connects:
            # make sure there are no empty contexts
            to = connect.get_to()

            if from_attr:
                connect_from_attr = connect.get_attr("from_attr")
                if connect_from_attr != from_attr:
                    continue

            to_pipeline = connect.get_to_pipeline()
            if to_pipeline:
                pipeline = Pipeline.get_by_code(to_pipeline)
                process = pipeline.get_process(to)

                if type and process.get_type() != type:
                    continue

                if process:
                    processes.append(process)

            else:
                process = self.get_process(to)
                if process:
                    processes.append(process)

        return processes

    def get_output_process_names(self, process, type=None, from_attr=None):
        output_processes = self.get_output_processes(process, type, from_attr)
        process_names = [x.get_name() for x in output_processes]
        return process_names

    def get_output_contexts(self, process, show_process=False):
        connects = self._get_connects(process, direction="from")
        if not connects:
            if show_process:
                data = (None, process)
            else:
                data = process
            return [data]

        contexts = []
        for connect in connects:
            # make sure there are no empty contexts
            context = connect.get_context()
            if not context:
                context = connect.get_to()

            if show_process:
                data = (connect.get_to(), context)
            else:
                data = context
            contexts.append(data)

        return contexts

    def get_input_contexts(self, process, show_process=False):
        connects = self._get_connects(process, direction='to')
        contexts = []
        for connect in connects:
            # make sure there are no empty contexts
            context = connect.get_context()
            if not context:
                context = connect.get_from()
            if show_process:
                data = (connect.get_from(), context)
            else:
                data = context
            contexts.append(data)

        return contexts

    def get_group(self, process_name):
        process = self.get_process(process_name)
        return process.get_group()

    def get_input_connects(self, process):
        connects = self._get_connects(process, direction="to")
        if not connects:
            return []
        else:
            return connects

    def get_output_connects(self, process):
        connects = self._get_connects(process, direction="from")
        if not connects:
            return []
        else:
            return connects

    # DEPRECATED
    def get_forward_connects(self, process):
        connects = self._get_connects(process)
        process_names = []
        for connect in connects:
            process_names.append(connect.get_to())

        return process_names

    # DEPRECATED
    def get_backward_connects(self, process):
        connects = self._get_connects(process, direction='to')

        process_names = []
        for connect in connects:
            process_names.append(connect.get_from())

        return process_names

    def get_all_contexts(self):
        connects = self._get_connects()

        contexts = []
        for connect in connects:
            context = connect.get_context()
            if context not in contexts:
                contexts.append(context)

        return contexts

    #
    # support for new pipeline methods
    #
    def get_input_snapshots(self,
                            sobject,
                            process_name,
                            input_name,
                            version='latest'):
        '''gets the snapshots of the input'''
        assert version in ['latest', 'current']

        process_node = self.xml.get_node(
            "pipeline/process[@name='%s']/input[@name='%s']" %
            (process_name, input_name))

        search_type = Xml.get_attribute(process_node, "search_type")
        context = Xml.get_attribute(process_node, "context")
        filter = Xml.get_attribute(process_node, "filter")

        # get the sobjects
        sobjects = sobject.get_all_children(search_type)

        # get the snapshots
        search = Search("sthpw/snapshot")
        search.add_filter('context', context)
        #if version == 'latest':
        #    search.add_filter("is_latest", 1)
        #elif version == 'current':
        #    search.add_filter("is_current", 1)

        # build filters for search_type, search_id combinations
        filters = []
        for sobject in sobjects:
            filter = "(\"search_type\" = '%s' and \"search_id\" = %s)" % (
                sobject.get_search_type(), sobject.get_id())
            filters.append(filter)

        search.add_where("( %s )" % " or ".join(filters))

        snapshots = search.get_sobjects()
        return snapshots

    #
    # Static methods
    #

    def create(name, desc, search_type, xml=None, code=None, color=None):
        '''will only create if it does not exist, otherwise it just updates'''

        if code:
            sobject = Pipeline.get_by_code(code)
        else:
            sobject = None

        if sobject == None:
            #sobject = Pipeline( Pipeline.SEARCH_TYPE )
            sobject = SearchType.create(Pipeline.SEARCH_TYPE)
        else:
            return sobject

        if not xml:
            xml = Xml()
            xml.create_doc('pipeline')

        if isinstance(xml, basestring):
            xml_string = xml
            xml = Xml()
            xml.read_string(xml_string)

        sobject.set_value("pipeline", xml.get_xml())
        sobject.set_pipeline(xml.to_string())

        sobject.set_value('timestamp',
                          Sql.get_default_timestamp_now(),
                          quoted=False)
        if code:
            sobject.set_value('code', code.strip())
        sobject.set_value('name', name.strip())
        sobject.set_value('search_type', search_type)
        sobject.set_value('description', desc)

        if color:
            sobject.set_value("color", color)

        sobject.commit()

        process_names = sobject.get_process_names()

        for i, process_name in enumerate(process_names):
            process = SearchType.create("config/process")
            process.set_value("pipeline_code", sobject.get_code())
            process.set_value("process", process_name)
            process.set_value("sort_order", i)
            process.set_value("subcontext_options", "(main)")
            process.commit()

        return sobject

    create = staticmethod(create)

    def get_by_code(cls, code, allow_default=False):
        '''it is fatal not to have a pipeline, so put a default'''
        if not code:
            return None

        # first look at project specific pipeline
        pipeline = Search.get_by_code("config/pipeline", code)

        if not pipeline:
            pipeline = super(Pipeline, cls).get_by_code(code)

        if not pipeline:
            if code == 'task':

                # Remap this to a default from projects settings
                task_code = ProjectSetting.get_by_key("task_pipeline")
                if not task_code:
                    task_code = "task"

                # Create a default task pipeline
                pipeline = SearchType.create("sthpw/pipeline")
                pipeline.set_value("code", task_code)
                from pyasm.biz import Task
                xml = Task.get_default_task_xml()
                pipeline.set_value("pipeline", xml)
                pipeline.set_pipeline(xml)
                pipeline.set_value("search_type", "sthpw/task")

            elif code == 'approval':
                # Create a default task approval pipeline
                pipeline = SearchType.create("sthpw/pipeline")
                pipeline.set_value("code", "approval")
                from pyasm.biz import Task
                xml = Task.get_default_approval_xml()
                pipeline.set_value("pipeline", xml)
                pipeline.set_pipeline(xml)
                pipeline.set_value("search_type", "sthpw/task")

            elif code == 'dependency':
                pipeline = SearchType.create("sthpw/pipeline")
                pipeline.set_value("code", "dependency")
                from pyasm.biz import Task
                xml = Task.get_default_dependency_xml()
                pipeline.set_value("pipeline", xml)
                pipeline.set_pipeline(xml)
                pipeline.set_value("search_type", "sthpw/task")

            elif code == 'progress':
                pipeline = SearchType.create("sthpw/pipeline")
                pipeline.set_value("code", "progress")
                from pyasm.biz import Task
                xml = Task.get_default_dependency_xml()
                pipeline.set_value("pipeline", xml)
                pipeline.set_pipeline(xml)
                pipeline.set_value("search_type", "sthpw/task")

            elif code == 'milestone':
                pipeline = SearchType.create("sthpw/pipeline")
                pipeline.set_value("code", "milestone")
                from pyasm.biz import Task
                xml = Task.get_default_milestone_xml()
                pipeline.set_value("pipeline", xml)
                pipeline.set_pipeline(xml)
                pipeline.set_value("search_type", "sthpw/milestone")

            elif code == 'snapshot':
                pipeline = SearchType.create("sthpw/pipeline")
                pipeline.set_value("code", "snapshot")
                from pyasm.biz import Task
                xml = Task.get_default_snapshot_xml()
                pipeline.set_value("pipeline", xml)
                pipeline.set_pipeline(xml)
                pipeline.set_value("search_type", "sthpw/snapshot")

        if not pipeline and allow_default:
            search = Search(cls)
            search.add_filter('code', 'default')
            pipeline = search.get_sobject()
            if not pipeline:

                pipeline = cls.create('default',  \
                    'default pipeline', '')

                xml = pipeline.get_xml_value("pipeline")

                # create a default process for the table
                root = xml.get_root_node()
                element = xml.create_element("process")
                Xml.set_attribute(element, "name", "default_process")
                Xml.append_child(root, element)

                pipeline.set_value('pipeline', xml.get_xml())
                pipeline.commit()

                # set the pipeline
                pipeline.set_pipeline(pipeline.get_value('pipeline'))
                Environment.add_warning("pipeline autogenerated", \
                    "[default] pipeline has just been created.")
        # Sometimes, a pipeline is instantiated without calling set_pipeline()
        # to be looked into
        if pipeline and not pipeline.get_processes():
            pipeline.set_pipeline(pipeline.get_value('pipeline'))
        return pipeline

    get_by_code = classmethod(get_by_code)

    # DEPRECATED
    def get_by_name(name):
        ''' for backward-compatibility, name has been renamed as code '''
        return Pipeline.get_by_code(name)

    get_by_name = staticmethod(get_by_name)

    def get_by_search_type(cls, search_type, project_code=''):
        # make sure this is a be search type
        assert search_type
        search_type_obj = SearchType.get(search_type)
        if not search_type_obj:
            return []
        search_type = search_type_obj.get_base_key()

        cache_key = "%s|%s" % (search_type, project_code)

        # commenting out until we have a full implementation of
        # project pipelines
        """
        search = Search("config/pipeline")
        if search_type:
            search.add_filter("search_type", search_type)
        search.add_project_filter(project_code)
        pipelines = cls.get_by_search(search, cache_key, is_multi=True)
        """

        search = Search("sthpw/pipeline")
        if search_type:
            search.add_filter("search_type", search_type)
        search.add_project_filter(project_code)
        pipelines = cls.get_by_search(search, cache_key, is_multi=True)
        if not pipelines:
            return []
        for pipe in pipelines:
            code = pipe.get_code()
            cls.cache_sobject('sthpw/pipeline|%s' % code, pipe)
        return pipelines

    get_by_search_type = classmethod(get_by_search_type)

    def get_process_name_dict(search_type,
                              project_code='',
                              is_group_restricted=False,
                              sobject=None):
        '''get process names for pipelines with a particular search type'''
        pipes = []
        if sobject:
            pipe_code = sobject.get_value('pipeline_code', no_exception=True)
            if pipe_code:
                pipe = Pipeline.get_by_code(pipe_code)
                if pipe:
                    pipes = [pipe]

        if not pipes:
            pipes = Pipeline.get_by_search_type(search_type, project_code)

        process_name_dict = {}

        my_group_names = LoginGroup.get_group_names()
        if pipes:
            for pipe in pipes:

                visible_process_names = []
                process_names = pipe.get_process_names(recurse=True)
                if is_group_restricted:
                    for process_name in process_names:
                        group_name = pipe.get_group(process_name)
                        if group_name and group_name not in my_group_names:
                            continue
                        else:
                            visible_process_names.append(process_name)
                else:
                    visible_process_names.extend(process_names)

                process_name_dict[pipe.get_code()] = visible_process_names

        return process_name_dict

    get_process_name_dict = staticmethod(get_process_name_dict)

    def get_default():
        return Pipeline.get_by_code("default")

    get_default = staticmethod(get_default)

    def get_process_select_data(search_type,
                                extra_process=[],
                                project_code='',
                                is_filter=False,
                                is_group_restricted=False,
                                sobject=None):
        '''get a tuple of data used for the SelectWdg'''
        context_dict = Pipeline.get_process_name_dict(search_type,
                                                      project_code,
                                                      is_group_restricted,
                                                      sobject=sobject)
        labels = []
        values = []
        keys = context_dict.keys()
        keys.sort()
        process_values = Common.sort_dict(context_dict)
        for idx, value in enumerate(process_values):
            key = keys[idx]
            labels.append('&lt;&lt; %s &gt;&gt;' % key)
            if is_filter:
                # add all the process names in this pipeline into the value
                values.append(','.join(value))
            else:
                values.append('')
            # extra process may not be needed
            if extra_process:
                value.extend(extra_process)

            if len(context_dict) > 1 and idx < len(context_dict) - 1:
                value.append('')

            values.extend(value)
            labels.extend(value)
        return labels, values

    get_process_select_data = staticmethod(get_process_select_data)

    def get_by_sobject(sobject, allow_default=False):
        ''' get the pipeline of the sobject'''
        pipeline_name = ''
        if not sobject:
            return None
        if sobject.has_value("pipeline_code"):
            pipeline_name = sobject.get_value("pipeline_code")
        elif sobject.has_value("pipeline"):
            pipeline_name = sobject.get_value("pipeline")
        pipeline = Pipeline.get_by_code(pipeline_name,
                                        allow_default=allow_default)
        return pipeline

    get_by_sobject = staticmethod(get_by_sobject)

    def get_template_pipeline(cls, search_type=None):
        search = Search("sthpw/pipeline")
        search.add_filter("name", "VFX Processes")
        pipeline = search.get_sobject()
        return pipeline

    get_template_pipeline = classmethod(get_template_pipeline)

    def create_pipeline_xml(cls,
                            statuses,
                            process_types=[],
                            process_xpos=[],
                            process_ypos=[]):
        '''create regular pipeline with process_types, xpos, ypos or plain task status pipeline'''
        if not statuses:
            statuses = []

        xml = []

        xml.append('''<pipeline>''')

        if process_types:

            for i, status in enumerate(statuses):

                if status == '':
                    continue

                process_type = process_types[i]

                if len(process_xpos) > i:
                    xpos = process_xpos[i]
                else:
                    xpos = None
                if len(process_ypos) > i:
                    ypos = process_ypos[i]
                else:
                    ypos = None

                if xpos and ypos:
                    xml.append(
                        '''  <process name="%s" type="%s" xpos="%s" ypos="%s"/>'''
                        % (status, process_type, xpos, ypos))
                else:
                    xml.append('''  <process name="%s" type="%s"/>''' %
                               (status, process_type))
        else:
            for status in statuses:
                if status == '':
                    continue
                xml.append('''  <process name="%s"/>''' % status)

        last_status = None
        for i, status in enumerate(statuses):
            if status == '':
                continue

            if i == 0 or last_status == None:
                last_status = status
                continue

            xml.append('''  <connect from="%s" to="%s"/>''' %
                       (last_status, status))
            last_status = status

        xml.append('''</pipeline>''')
        return "\n".join(xml)

    create_pipeline_xml = classmethod(create_pipeline_xml)