def handle_include(my, node): path = my.xml.get_attribute(node, "path") if not path: raise TacticException("No path found for include in manifest") path = "%s/%s" % (my.plugin_dir, path) if path.endswith(".py"): from tactic.command import PythonCmd cmd = PythonCmd(file_path=path) manifest = cmd.execute() if not manifest: print "No manifest discovered in [%s]" %path return xml = Xml() xml.read_string(manifest) nodes = xml.get_nodes("manifest/*") sobjects = [] for i, node in enumerate(nodes): name = my.xml.get_node_name(node) if name == 'sobject': dumped_sobjects = my.handle_sobject(node) if not dumped_sobjects: dumped_sobjects = [] sobjects.extend(dumped_sobjects) elif name == 'search_type': my.handle_search_type(node) elif name == 'include': my.handle_include(node)
def get_file_paths(my, transaction, mode='lib'): transaction_xml = transaction.get_xml_value("transaction") if not transaction_xml: return [] from pyasm.common import Xml, Environment if isinstance(transaction_xml, basestring): xml = Xml() xml.read_string(transaction_xml) else: xml = transaction_xml base_dir = Environment.get_asset_dir() paths = [] # get all of the file nodes nodes = xml.get_nodes("transaction/file") for node in nodes: if xml.get_attribute(node, "type") == 'create': src = xml.get_attribute(node, "src") if mode == 'relative': path = src else: path = "%s/%s" % (base_dir, src) paths.append(path) return paths
def delete_files(my, nodes): # clean out all of the files for node in nodes: name = my.xml.get_node_name(node) if name == "include": path = my.xml.get_attribute(node, "path") if not path: print("WARNING: No path found for search type in manifest") continue path = "%s/%s" % (my.plugin_dir, path) if path.endswith(".py"): from tactic.command import PythonCmd cmd = PythonCmd(file_path=path) manifest = cmd.execute() if manifest: xml = Xml() xml.read_string(manifest) include_nodes = xml.get_nodes("manifest/*") my.delete_files(include_nodes) elif name == "python": # don't delete python node file pass else: path = my.get_path_from_node(node) if path and os.path.exists(path): print "Deleting: ", path os.unlink(path)
def verify_dir(my, base): # ignore transactions that a derived from this server server_code = Config.get_value("install", "server") if base.startswith("%sTRANSACTION" % server_code): return False if base.find("TRANSACTION") == -1: return False if not os.path.isdir(base): if base.endswith(".zip.enc"): return True elif base.endswith(".zip"): return True else: return False asset_dir = Environment.get_asset_dir() transaction_path = "%s/_transaction.xml" % base if not os.path.exists(transaction_path): return False xml = Xml() xml.read_file(transaction_path) nodes = xml.get_nodes("transaction/file") # verify that all the files are present for node in nodes: code = xml.get_attribute(node, "code") file_sobj = Search.get_by_code("sthpw/file", code) src = xml.get_attribute(node, "src") rel_path = xml.get_attribute(node, "rel_path") src_path = "%s/%s" % (base, rel_path) if not os.path.exists(src_path): print "[%s] has not arrived" % src_path return False st_size = xml.get_attribute(node, "size") if st_size: st_size = int(st_size) else: st_size = -1 md5 = xml.get_attribute(node, "md5") if st_size != -1: # check that the size is the same if st_size != os.path.getsize(src_path): print "[%s] size does not match" % src_path return False # all the tests have passed return True
class Package(Command): def __init__(self, search_key, context, package): self.search_key = search_key self.context = context self.package = package self.package_xml = Xml() self.package_xml.read_string(package) super(Package, self).__init__() def execute(self): from tactic_client_lib import TacticServerStub server = TacticServerStub.get(protocol='local') # create a new snapshot snapshot = server.create_snapshot(self.search_key, self.context) # get all of the file_types file_nodes = self.package_xml.get_nodes("package/file_type") count = 0 for file_node in file_nodes: name = self.package_xml.get_attribute(file_node, "name") values = self.package_xml.get_node_values_of_children(file_node) expression = values.get("expression") dir_naming = values.get("dir_naming") file_naming = values.get("file_naming") files = Search.eval(expression) for file in files: file_type = "%s%s" % (name, count) try: # FIXME: the assumed action is to checkin server.add_file(snapshot, file, file_type=file_type, mode='copy', dir_naming=dir_naming, file_naming=file_naming) # What if we just wished to copy? Can we run the files # through a naming convention filter? count += 1 except Exception as e: print "WARNING: ", str(e)
def get_default_task_xml(): global TASK_PIPELINE from pyasm.web import Palette palette = Palette.get() xml = Xml() xml.read_string(TASK_PIPELINE) nodes = Xml.get_nodes(xml, "pipeline/process") for node in nodes: process = Xml.get_attribute(node, "name") color = Task.get_default_color(process) Xml.set_attribute(node, "color", color) return xml.to_string()
def handle_include(my, node): path = my.xml.get_attribute(node, "path") if not path: raise TacticException("No path found for search type in manifest") path = "%s/%s" % (my.plugin_dir, path) if path.endswith(".py"): from tactic.command import PythonCmd cmd = PythonCmd(file_path=path) manifest = cmd.execute() xml = Xml() xml.read_string(manifest) nodes = xml.get_nodes("manifest/*") nodes.reverse() my.handle_nodes(nodes)
def handle_include(my, node): path = my.xml.get_attribute(node, "path") if not path: raise TacticException("No path found for search type in manifest") path = "%s/%s" % (my.plugin_dir, path) if path.endswith(".py"): from tactic.command import PythonCmd cmd = PythonCmd(file_path=path) manifest = cmd.execute() if not manifest: return xml = Xml() xml.read_string(manifest) nodes = xml.get_nodes("manifest/*") nodes.reverse() my.handle_nodes(nodes)
def execute(my): sobject = my.sobject name = my.get_name() web = WebContainer.get_web() naming = web.get_form_value(name) if not naming: return xml = Xml(string=naming) sample_name = xml.get_value("naming/@sample") parts = re.split( '[\\/._]', sample_name) # make some adjustments based on selections nodes = xml.get_nodes("naming/part") for idx, node in enumerate(nodes): type_value = web.get_form_value("type_%s" % idx) part = parts[idx] if not type_value: continue if type_value == "placeholder": Xml.set_attribute(nodes[idx], "type", "placeholder") Xml.set_attribute(nodes[idx], "value", part) else: a, b = type_value.split("/") Xml.set_attribute(nodes[idx], "type", a) Xml.set_attribute(nodes[idx], "name", b) naming = xml.to_string() sobject.set_value(name, naming)
def execute(self): mode = self.kwargs.get('mode') if not mode: mode = 'lib' transaction_xml = self.kwargs.get("transaction_xml") assert(transaction_xml) from pyasm.common import Xml, Environment if isinstance(transaction_xml, basestring): xml = Xml() xml.read_string(transaction_xml) else: xml = transaction_xml base_dir = Environment.get_asset_dir() paths = [] # get all of the file nodes nodes = xml.get_nodes("transaction/file") for node in nodes: if xml.get_attribute(node, "type") == 'create': src = xml.get_attribute(node, "src") if mode == 'relative': path = src else: if src.startswith(base_dir): path = src else: path = "%s/%s" % (base_dir, src) paths.append(path) return paths
def execute(self): sobject = self.sobject name = self.get_name() web = WebContainer.get_web() naming = web.get_form_value(name) if not naming: return xml = Xml(string=naming) sample_name = xml.get_value("naming/@sample") parts = re.split( '[\\/._]', sample_name) # make some adjustments based on selections nodes = xml.get_nodes("naming/part") for idx, node in enumerate(nodes): type_value = web.get_form_value("type_%s" % idx) part = parts[idx] if not type_value: continue if type_value == "placeholder": Xml.set_attribute(nodes[idx], "type", "placeholder") Xml.set_attribute(nodes[idx], "value", part) else: a, b = type_value.split("/") Xml.set_attribute(nodes[idx], "type", a) Xml.set_attribute(nodes[idx], "name", b) naming = xml.to_string() sobject.set_value(name, naming)
class SyncFilter(object): def __init__(self, **kwargs): self.kwargs = kwargs self.log = self.kwargs.get("transaction") self.rules = self.kwargs.get("rules") self.message = "" def execute(self): log = self.log rules = self.rules # Give rules. Only notes will get through # we need heirarchical rules. This will ensure that only notes # for project/assets will pass # Here, the second one is much more difficult to do. rulesXXX = ''' <rule group='heirarchy' key='project/asset.sthpw/note' access='allow'/> <rule group='heirarchy' key="project/asset.sthpw/note['assigned','beth']" access='allow'/>" ''' access_manager = AccessManager() access_manager.add_xml_rules(rules) # filter out project namespace = log.get_value("namespace") key1 = { 'code': namespace } key2 = { 'code': '*' } keys = [key1, key2] if not access_manager.check_access("project", keys, "allow", default="deny"): self.filtered_xml = Xml() self.filtered_xml.read_string("<transaction/>") self.message = "Transaction prevented due to project restriction" return # filter the transaction against the security model xml = log.get_xml_value("transaction") self.filtered_xml = Xml() self.filtered_xml.create_doc("transaction") root2 = self.filtered_xml.get_root_node() nodes = xml.get_nodes("transaction/*") num_nodes = len(nodes) count = 0 for node in nodes: if Xml.get_node_name(node) == "sobject": search_type = xml.get_attribute(node, "search_type") parts = search_type.split("?") search_type = parts[0] # filter search types key1 = { 'code': search_type } key2 = { 'code': "*" } keys = [ key1, key2 ] if not access_manager.check_access("search_type", keys, "allow", default="deny"): continue # check hierachical rule parent_type = xml.get_attribute(node, "parent_type") key = "%s.%s" % (parent_type, search_type) self.filtered_xml.append_child(root2, node) count += 1 else: self.filtered_xml.append_child(root2, node) count += 1 if len(nodes) != 0 and len(self.filtered_xml.get_nodes("transaction/*")) == 0: self.message = "All actions filtered due to security restrictions (%s actions)" % num_nodes def get_filtered_xml(self): return self.filtered_xml def get_message(self): return self.message
class Pipeline(SObject): '''Represents a pipeline of process and their relationships''' SEARCH_TYPE = "sthpw/pipeline" def __init__(self, search_type="sthpw/pipeline", columns=None, result=None, fast_data=None): super(Pipeline, self).__init__(search_type, columns, result, fast_data=fast_data) self.processes = [] self.recursive_processes = [] self.connects = {} self.pipeline_dict = {} #if columns != None: # putting no exception here to ensure that this can be put into # a select widget which no uses distinct for the value column xml_str = self.get_value("pipeline", no_exception=True) # don't cache a potential empty xml when Pipeline.create() is call if xml_str: self.set_pipeline(xml_str, cache=False) self.process_sobjects = None def get_defaults(self): '''The default, if not specified is to set the current project''' defaults = {} project_code = Project.get_project_code() defaults['project_code'] = project_code self.update_dependencies() return defaults def on_updateX(self): # initialize the triggers for the workflow """ event = "change|sthpw/pipeline" trigger = SearchType.create("sthpw/trigger") trigger.set_value("event", event) trigger.set_value("class_name", ProjectPipelineTrigger) trigger.set_value("mode", "same process,same transaction") Trigger.append_static_trigger(trigger, startup=startup) """ if self.SEARCH_TYPE == "config/pipeline": return code = self.get_value("code") search = Search("config/pipeline") search.add_filter("code", code) pipeline = search.get_sobject() if not pipeline: pipeline = SearchType.create("config/pipeline") items = self.data.items() for name, value in items: if name.startswith("__"): continue if name in ["id", "project_code"]: continue if not value: continue pipeline.set_value(name, value) pipeline.commit(triggers="none") def on_insertX(self): # Copy this to the config/pipeline table. Currently this table # is not being used, however, pipelines really should be defined # there. It is an unfortunate historical wart that pipelines # are stored in the sthpw database. In some future release of # TACTIC, the pipeline table in the sthpw database will be deprecated # This copy will ensure that over time, the impact of this move over # will be minimized if self.SEARCH_TYPE == "config/pipeline": return search = Search("config/pipeline") search.add_filter("code", self.get_code()) pipeline = search.get_sobject() if not pipeline: pipeline = SearchType.create("config/pipeline") for name, value in self.get_data().items(): if name.startswith("__"): continue if name in ["id", "project_code"]: continue if not value: continue pipeline.set_value(name, value) pipeline.commit(triggers="none") def on_deleteX(self): if self.SEARCH_TYPE == "config/pipeline": return search = Search("config/pipeline") search.add_filter("code", self.get_code()) pipeline = search.get_sobject() if pipeline: pipeline.delete() def update_dependencies(self): '''Function that should be run on insert/update. It's already automatically called during insert. On update, the caller needs to call this explicitly. It checks the search type this pipeline is associated with and if there is no pipeline code column, then update it. It updates the process table also.''' search_type = self.get_value('search_type') self.update_process_table(search_type=search_type) # don't do anything for task sType if search_type == 'sthpw/task': return if not search_type: return if ProdSetting.get_value_by_key('autofill_pipeline_code') != 'false': try: columns = SearchType.get_columns(search_type) if not 'pipeline_code' in columns: # add the pipeline code column from pyasm.command import ColumnAddCmd cmd = ColumnAddCmd(search_type, "pipeline_code", "varchar") cmd.execute() except SqlException as e: print("Error creating column [pipeline_code] for %" % search_type) pass # go through all of the sobjects and set all the empty ones # to the new pipeline search = Search(search_type) search.add_op("begin") search.add_filter("pipeline_code", "NULL", op='is', quoted=False) search.add_filter("pipeline_code", "") search.add_op("or") sobject_ids = search.get_sobject_ids() if sobject_ids: # this is much faster and memory efficient db_resource = SearchType.get_db_resource_by_search_type( search_type) sql = DbContainer.get(db_resource) tbl = search.get_table() sobject_ids = [str(x) for x in sobject_ids] pipeline_code = self.get_value("code") sql.do_update( '''UPDATE "%s" SET "pipeline_code" = '%s' WHERE id in (%s) ''' % (tbl, pipeline_code, ','.join(sobject_ids))) """ for sobject in sobjects: if not sobject.get_value("pipeline_code"): sobject.set_value("pipeline_code", self.get_value("code") ) sobject.commit(triggers=False) """ def update_process_table(self, search_type=None): ''' make sure to update process table''' template = self.get_template_pipeline() if template: if template.get_code() == self.get_code(): template_processes = [] else: template_processes = template.get_process_names() else: template_processes = [] process_names = self.get_process_names() pipeline_code = self.get_code() search = Search("config/process") search.add_filter("pipeline_code", pipeline_code) process_sobjs = search.get_sobjects() existing_names = SObject.get_values(process_sobjs, 'process') pipeline_has_updates = False count = 1 for process_name in process_names: exists = False for process_sobj in process_sobjs: # if it already exist, then update if process_sobj.get_value("process") == process_name: exists = True break if not exists: process_sobj = SearchType.create("config/process") # default to (main) for non-task status pipeline if search_type and search_type != 'sthpw/task': process_sobj.set_value('subcontext_options', '(main)') process_sobj.set_value("pipeline_code", pipeline_code) process_sobj.set_value("process", process_name) # copy information over from the template if process_name in template_processes: template_attrs = template.get_process_attrs(process_name) process = self.get_process(process_name) for name, value in template_attrs.items(): if name in ['xpos', 'ypos', 'name']: continue process.set_attribute(name, value) pipeline_has_updates = True search = Search("config/process") search.add_filter("process", process_name) # NEED ANOTHER FILTER for templates here search.add_filter("pipeline_code", "%/__TEMPLATE__", op="like") # copy certain values from the template template_process = search.get_sobject() for name, value in template_process.get_data().items(): if not value: continue if name in ['checkin_mode']: process_sobj.set_value(name, value) attrs = self.get_process_attrs(process_name) color = attrs.get('color') if color: process_sobj.set_value("color", color) process_sobj.set_value("sort_order", count) process_sobj.commit() count += 1 if pipeline_has_updates: self.set_value("pipeline", self.get_pipeline_xml().to_string()) self.commit() # delete obsolete obsolete = set(existing_names) - set(process_names) if obsolete: for obsolete_name in obsolete: for process_sobj in process_sobjs: if process_sobj.get_value("process") != obsolete_name: continue # FIXME: this node type is always None process_obj = self.get_process(obsolete_name) if process_obj: node_type = process_obj.get_type() try: from pyasm.command import CustomProcessConfig handler = CustomProcessConfig.get_delete_handler( node_type, {}) except Exception as e: handler = None if handler: handler.execute() # delete it process_sobj.delete() def get_name(self, long=False): '''this is the old function, kept for backward-compatibility''' #TODO: remove this function here return self.get_code() def set_pipeline(self, pipeline_xml, cache=True): '''set the pipeline externally''' # cache according to pipeline code, which will share the same xml object if self.is_insert(): cache = False search_key = self.get_search_key() xml_dict = Container.get("Pipeline:xml") if xml_dict == None: xml_dict = {} Container.put("Pipeline:xml", xml_dict) self.xml = xml_dict.get(search_key) if self.xml == None: self.xml = Xml() if cache: xml_dict[search_key] = self.xml if not pipeline_xml: pipeline_xml = "<pipeline/>" try: self.xml.read_string(pipeline_xml) except XmlException as e: self.xml.read_string("<pipeline/>") # clear these again when set externally self.processes = [] self.recursive_processes = [] # create the process and pipelines process_nodes = self.xml.get_nodes( "pipeline/process | pipeline/pipeline") for node in process_nodes: node_name = self.xml.get_node_name(node) process = Process(node) process.set_parent_pipeline_code(self.get_code()) self.processes.append(process) if node_name == "pipeline": name = Xml.get_attribute(node, "name") # prevent infinite loop if name == self.get_code(): continue child = Pipeline.get_by_code(name) if not child: continue self.pipeline_dict[name] = child process.set_child_pipeline(child) def get_pipeline_xml(self): return self.xml def to_string(self): return self.xml.to_string() def get_process(self, name): '''returns a Process object''' if type(name) not in types.StringTypes: name = name.get_name() # first try the top level for process in self.processes: if process.get_name() == name: return process # Then iterate. This may be slow processes = self.get_processes(recurse=True) for process in processes: if process.get_full_name() == name: return process return None #raise PipelineException( "Pipeline '%s' does not have process '%s'" % \ # (self.get_name(),name) ) def get_processes(self, recurse=False, type=None): '''returns all the Process objects in this pipeline''' if type and isinstance(type, basestring): types = [type] else: types = type if recurse: if self.recursive_processes: return self.recursive_processes else: # add child processes for process in self.processes: if types and process.get_type() not in types: continue self.recursive_processes.append(process) child_pipeline = process.get_child_pipeline() if not child_pipeline: continue child_processes = child_pipeline.get_processes( recurse=recurse) for x in child_processes: x.set_sub_pipeline_process(True) self.recursive_processes.extend(child_processes) return self.recursive_processes else: if types: ret_processes = [] for process in self.processes: if process.get_type() not in types: continue ret_processes.append(process) return ret_processes else: return self.processes def get_process_attrs(self, name): process = self.get_process(name) if process: return process.get_attributes() else: return {} def get_process_names(self, recurse=False, type=None): '''returns all the Process names in this pipeline''' if type and isinstance(type, basestring): types = [type] else: types = type processes = self.get_processes(recurse, type=types) if recurse: process_names = [] for process in processes: if types and process.get_type() not in types: continue if process.is_from_sub_pipeline(): process_names.append(process.get_full_name()) else: process_names.append(process.get_name()) return process_names else: return [process.get_name() for process in processes] def get_process_sobject(self, process): # search all processes and cache all of the sobject locally if self.process_sobjects == None: search = Search("config/process") search.add_filter("pipeline_code", self.get_code()) sobjects = search.get_sobjects() self.process_sobjects = {} for process_sobject in sobjects: # prevent changing variable process pcs = process_sobject.get("process") self.process_sobjects[pcs] = process_sobject process_sobject = self.process_sobjects.get(process) return process_sobject def get_process_sobjects(self): process_name = "dummy" self.get_process_sobject(process_name) return self.process_sobjects def get_index(self, name): index = 0 for process in self.processes: if process.get_name() == name: return index index += 1 def _get_connects(self, process="", direction='from'): if direction == "from": opposite = "to" else: opposite = "from" if not process: connect_nodes = self.xml.get_nodes("pipeline/connect") else: connect_nodes = self.xml.get_nodes(\ "pipeline/connect[@%s='%s' or @%s='*']" % (direction, process, direction)) connects = [] for node in connect_nodes: opposite_name = Xml.get_attribute(node, opposite) full_name = "%s/%s" % (self.get_name(), opposite_name) if process == opposite_name or process == full_name: continue connects.append(ProcessConnect(node)) return connects def get_input_processes(self, process, type=None, to_attr=None): connects = self._get_connects(process, direction='to') processes = [] for connect in connects: if to_attr: connect_to_attr = connect.get_attr("to_attr") if connect_to_attr != to_attr: continue from_connect = connect.get_from() process = self.get_process(from_connect) if process: if type and process.get_type() != type: continue processes.append(process) return processes def get_input_process_names(self, process, type=None, from_attr=None): input_processes = self.get_input_processes(process, type, from_attr) process_names = [x.get_name() for x in input_processes] return process_names def get_output_processes(self, process, type=None, from_attr=None): connects = self._get_connects(process, direction="from") if not connects: return [] processes = [] for connect in connects: # make sure there are no empty contexts to = connect.get_to() if from_attr: connect_from_attr = connect.get_attr("from_attr") if connect_from_attr != from_attr: continue to_pipeline = connect.get_to_pipeline() if to_pipeline: pipeline = Pipeline.get_by_code(to_pipeline) process = pipeline.get_process(to) if type and process.get_type() != type: continue if process: processes.append(process) else: process = self.get_process(to) if process: processes.append(process) return processes def get_output_process_names(self, process, type=None, from_attr=None): output_processes = self.get_output_processes(process, type, from_attr) process_names = [x.get_name() for x in output_processes] return process_names def get_output_contexts(self, process, show_process=False): connects = self._get_connects(process, direction="from") if not connects: if show_process: data = (None, process) else: data = process return [data] contexts = [] for connect in connects: # make sure there are no empty contexts context = connect.get_context() if not context: context = connect.get_to() if show_process: data = (connect.get_to(), context) else: data = context contexts.append(data) return contexts def get_input_contexts(self, process, show_process=False): connects = self._get_connects(process, direction='to') contexts = [] for connect in connects: # make sure there are no empty contexts context = connect.get_context() if not context: context = connect.get_from() if show_process: data = (connect.get_from(), context) else: data = context contexts.append(data) return contexts def get_group(self, process_name): process = self.get_process(process_name) return process.get_group() def get_input_connects(self, process): connects = self._get_connects(process, direction="to") if not connects: return [] else: return connects def get_output_connects(self, process): connects = self._get_connects(process, direction="from") if not connects: return [] else: return connects # DEPRECATED def get_forward_connects(self, process): connects = self._get_connects(process) process_names = [] for connect in connects: process_names.append(connect.get_to()) return process_names # DEPRECATED def get_backward_connects(self, process): connects = self._get_connects(process, direction='to') process_names = [] for connect in connects: process_names.append(connect.get_from()) return process_names def get_all_contexts(self): connects = self._get_connects() contexts = [] for connect in connects: context = connect.get_context() if context not in contexts: contexts.append(context) return contexts # # support for new pipeline methods # def get_input_snapshots(self, sobject, process_name, input_name, version='latest'): '''gets the snapshots of the input''' assert version in ['latest', 'current'] process_node = self.xml.get_node( "pipeline/process[@name='%s']/input[@name='%s']" % (process_name, input_name)) search_type = Xml.get_attribute(process_node, "search_type") context = Xml.get_attribute(process_node, "context") filter = Xml.get_attribute(process_node, "filter") # get the sobjects sobjects = sobject.get_all_children(search_type) # get the snapshots search = Search("sthpw/snapshot") search.add_filter('context', context) #if version == 'latest': # search.add_filter("is_latest", 1) #elif version == 'current': # search.add_filter("is_current", 1) # build filters for search_type, search_id combinations filters = [] for sobject in sobjects: filter = "(\"search_type\" = '%s' and \"search_id\" = %s)" % ( sobject.get_search_type(), sobject.get_id()) filters.append(filter) search.add_where("( %s )" % " or ".join(filters)) snapshots = search.get_sobjects() return snapshots # # Static methods # def create(name, desc, search_type, xml=None, code=None, color=None): '''will only create if it does not exist, otherwise it just updates''' if code: sobject = Pipeline.get_by_code(code) else: sobject = None if sobject == None: #sobject = Pipeline( Pipeline.SEARCH_TYPE ) sobject = SearchType.create(Pipeline.SEARCH_TYPE) else: return sobject if not xml: xml = Xml() xml.create_doc('pipeline') if isinstance(xml, basestring): xml_string = xml xml = Xml() xml.read_string(xml_string) sobject.set_value("pipeline", xml.get_xml()) sobject.set_pipeline(xml.to_string()) sobject.set_value('timestamp', Sql.get_default_timestamp_now(), quoted=False) if code: sobject.set_value('code', code.strip()) sobject.set_value('name', name.strip()) sobject.set_value('search_type', search_type) sobject.set_value('description', desc) if color: sobject.set_value("color", color) sobject.commit() process_names = sobject.get_process_names() for i, process_name in enumerate(process_names): process = SearchType.create("config/process") process.set_value("pipeline_code", sobject.get_code()) process.set_value("process", process_name) process.set_value("sort_order", i) process.set_value("subcontext_options", "(main)") process.commit() return sobject create = staticmethod(create) def get_by_code(cls, code, allow_default=False): '''it is fatal not to have a pipeline, so put a default''' if not code: return None # first look at project specific pipeline pipeline = Search.get_by_code("config/pipeline", code) if not pipeline: pipeline = super(Pipeline, cls).get_by_code(code) if not pipeline: if code == 'task': # Remap this to a default from projects settings task_code = ProjectSetting.get_by_key("task_pipeline") if not task_code: task_code = "task" # Create a default task pipeline pipeline = SearchType.create("sthpw/pipeline") pipeline.set_value("code", task_code) from pyasm.biz import Task xml = Task.get_default_task_xml() pipeline.set_value("pipeline", xml) pipeline.set_pipeline(xml) pipeline.set_value("search_type", "sthpw/task") elif code == 'approval': # Create a default task approval pipeline pipeline = SearchType.create("sthpw/pipeline") pipeline.set_value("code", "approval") from pyasm.biz import Task xml = Task.get_default_approval_xml() pipeline.set_value("pipeline", xml) pipeline.set_pipeline(xml) pipeline.set_value("search_type", "sthpw/task") elif code == 'dependency': pipeline = SearchType.create("sthpw/pipeline") pipeline.set_value("code", "dependency") from pyasm.biz import Task xml = Task.get_default_dependency_xml() pipeline.set_value("pipeline", xml) pipeline.set_pipeline(xml) pipeline.set_value("search_type", "sthpw/task") elif code == 'progress': pipeline = SearchType.create("sthpw/pipeline") pipeline.set_value("code", "progress") from pyasm.biz import Task xml = Task.get_default_dependency_xml() pipeline.set_value("pipeline", xml) pipeline.set_pipeline(xml) pipeline.set_value("search_type", "sthpw/task") elif code == 'milestone': pipeline = SearchType.create("sthpw/pipeline") pipeline.set_value("code", "milestone") from pyasm.biz import Task xml = Task.get_default_milestone_xml() pipeline.set_value("pipeline", xml) pipeline.set_pipeline(xml) pipeline.set_value("search_type", "sthpw/milestone") elif code == 'snapshot': pipeline = SearchType.create("sthpw/pipeline") pipeline.set_value("code", "snapshot") from pyasm.biz import Task xml = Task.get_default_snapshot_xml() pipeline.set_value("pipeline", xml) pipeline.set_pipeline(xml) pipeline.set_value("search_type", "sthpw/snapshot") if not pipeline and allow_default: search = Search(cls) search.add_filter('code', 'default') pipeline = search.get_sobject() if not pipeline: pipeline = cls.create('default', \ 'default pipeline', '') xml = pipeline.get_xml_value("pipeline") # create a default process for the table root = xml.get_root_node() element = xml.create_element("process") Xml.set_attribute(element, "name", "default_process") Xml.append_child(root, element) pipeline.set_value('pipeline', xml.get_xml()) pipeline.commit() # set the pipeline pipeline.set_pipeline(pipeline.get_value('pipeline')) Environment.add_warning("pipeline autogenerated", \ "[default] pipeline has just been created.") # Sometimes, a pipeline is instantiated without calling set_pipeline() # to be looked into if pipeline and not pipeline.get_processes(): pipeline.set_pipeline(pipeline.get_value('pipeline')) return pipeline get_by_code = classmethod(get_by_code) # DEPRECATED def get_by_name(name): ''' for backward-compatibility, name has been renamed as code ''' return Pipeline.get_by_code(name) get_by_name = staticmethod(get_by_name) def get_by_search_type(cls, search_type, project_code=''): # make sure this is a be search type assert search_type search_type_obj = SearchType.get(search_type) if not search_type_obj: return [] search_type = search_type_obj.get_base_key() cache_key = "%s|%s" % (search_type, project_code) # commenting out until we have a full implementation of # project pipelines """ search = Search("config/pipeline") if search_type: search.add_filter("search_type", search_type) search.add_project_filter(project_code) pipelines = cls.get_by_search(search, cache_key, is_multi=True) """ search = Search("sthpw/pipeline") if search_type: search.add_filter("search_type", search_type) search.add_project_filter(project_code) pipelines = cls.get_by_search(search, cache_key, is_multi=True) if not pipelines: return [] for pipe in pipelines: code = pipe.get_code() cls.cache_sobject('sthpw/pipeline|%s' % code, pipe) return pipelines get_by_search_type = classmethod(get_by_search_type) def get_process_name_dict(search_type, project_code='', is_group_restricted=False, sobject=None): '''get process names for pipelines with a particular search type''' pipes = [] if sobject: pipe_code = sobject.get_value('pipeline_code', no_exception=True) if pipe_code: pipe = Pipeline.get_by_code(pipe_code) if pipe: pipes = [pipe] if not pipes: pipes = Pipeline.get_by_search_type(search_type, project_code) process_name_dict = {} my_group_names = LoginGroup.get_group_names() if pipes: for pipe in pipes: visible_process_names = [] process_names = pipe.get_process_names(recurse=True) if is_group_restricted: for process_name in process_names: group_name = pipe.get_group(process_name) if group_name and group_name not in my_group_names: continue else: visible_process_names.append(process_name) else: visible_process_names.extend(process_names) process_name_dict[pipe.get_code()] = visible_process_names return process_name_dict get_process_name_dict = staticmethod(get_process_name_dict) def get_default(): return Pipeline.get_by_code("default") get_default = staticmethod(get_default) def get_process_select_data(search_type, extra_process=[], project_code='', is_filter=False, is_group_restricted=False, sobject=None): '''get a tuple of data used for the SelectWdg''' context_dict = Pipeline.get_process_name_dict(search_type, project_code, is_group_restricted, sobject=sobject) labels = [] values = [] keys = context_dict.keys() keys.sort() process_values = Common.sort_dict(context_dict) for idx, value in enumerate(process_values): key = keys[idx] labels.append('<< %s >>' % key) if is_filter: # add all the process names in this pipeline into the value values.append(','.join(value)) else: values.append('') # extra process may not be needed if extra_process: value.extend(extra_process) if len(context_dict) > 1 and idx < len(context_dict) - 1: value.append('') values.extend(value) labels.extend(value) return labels, values get_process_select_data = staticmethod(get_process_select_data) def get_by_sobject(sobject, allow_default=False): ''' get the pipeline of the sobject''' pipeline_name = '' if not sobject: return None if sobject.has_value("pipeline_code"): pipeline_name = sobject.get_value("pipeline_code") elif sobject.has_value("pipeline"): pipeline_name = sobject.get_value("pipeline") pipeline = Pipeline.get_by_code(pipeline_name, allow_default=allow_default) return pipeline get_by_sobject = staticmethod(get_by_sobject) def get_template_pipeline(cls, search_type=None): search = Search("sthpw/pipeline") search.add_filter("name", "VFX Processes") pipeline = search.get_sobject() return pipeline get_template_pipeline = classmethod(get_template_pipeline) def create_pipeline_xml(cls, statuses, process_types=[], process_xpos=[], process_ypos=[]): '''create regular pipeline with process_types, xpos, ypos or plain task status pipeline''' if not statuses: statuses = [] xml = [] xml.append('''<pipeline>''') if process_types: for i, status in enumerate(statuses): if status == '': continue process_type = process_types[i] if len(process_xpos) > i: xpos = process_xpos[i] else: xpos = None if len(process_ypos) > i: ypos = process_ypos[i] else: ypos = None if xpos and ypos: xml.append( ''' <process name="%s" type="%s" xpos="%s" ypos="%s"/>''' % (status, process_type, xpos, ypos)) else: xml.append(''' <process name="%s" type="%s"/>''' % (status, process_type)) else: for status in statuses: if status == '': continue xml.append(''' <process name="%s"/>''' % status) last_status = None for i, status in enumerate(statuses): if status == '': continue if i == 0 or last_status == None: last_status = status continue xml.append(''' <connect from="%s" to="%s"/>''' % (last_status, status)) last_status = status xml.append('''</pipeline>''') return "\n".join(xml) create_pipeline_xml = classmethod(create_pipeline_xml)
class CustomXmlWdg(BaseTableElementWdg): '''Creates an widget from xml <widget name="Filter"> <display class="FilterSelectWdg"> <values>Sc01|Sc02|Sc03</values> </display> </widget> ''' def __init__(self, xml_string): self.xml_string = xml_string self.xml = Xml(string=self.xml_string) super(CustomXmlWdg, self).__init__() def init(self): self.widget_class = self.xml.get_value("widget/display/@class") self.draw = self.xml.get_value("widget/display/@draw") self.title = self.xml.get_value("widget/@name") self.name = self.title # convert the widget data options = {} nodes = self.xml.get_nodes("widget/display/*") for node in nodes: name = node.nodeName value = Xml.get_node_value(node) if options.has_key(name): # turn this into an array array = [] array.append(options.get(name)) array.append(value) options[name] = array else: options[name] = value self.options = options self.widget = Common.create_from_class_path(self.widget_class, [self.title]) def get_child_widget_class(self): return self.xml.get_value("widget/display/@class") def get_child_widget(self): return self.widget def get_title(self): self.widget = Common.create_from_class_path(self.widget_class, [self.title]) self.widget.options = self.options self.widget.set_title(self.title) self.widget.set_name(self.title) Container.put_dict("widgets", self.title, self.widget) index = self.get_current_index() self.widget.set_sobjects(self.sobjects) self.widget.set_current_index(index) if self.draw == "false": return "" else: return self.widget.get_title() def get_display_widget(self): return self.widget def get_display(self): self.widget.options = self.options self.widget.set_title(self.title) self.widget.set_name(self.title) self.widget.parent_wdg = self.parent_wdg Container.put_dict("widgets", self.title, self.widget) index = self.get_current_index() self.widget.set_sobjects(self.sobjects) self.widget.set_search(self.search) self.widget.set_current_index(index) if self.draw == "false": return None else: return self.widget
def get_display(my): my.kwargs['search_key'] = 'prod/asset?project=sample3d&code=chr001' custom = """<?xml version='1.0' encoding='UTF-8'?> <custom> <html> <div> This is html <textarea class='spt_test spt_input' name='description'> </textarea> <input class='spt_input' type='text' name='title'/> <br/> <input class='spt_button1' type='button' value='Press Me'/> <input class='spt_button2' type='button' value='Press Me2'/> <input class='spt_button3' type='button' value='Calendar'/> <input class='spt_refresh' type='button' value='Refresh'/> <element> <display class='tactic.ui.widget.CalendarWdg'/> </element> Much simpler!!! <elemeent class='CalendarWdg' blah='adasf'/> </div> </html> <behavior class='spt_button1'>{ "type": "click_up", "cbjs_action": ''' app.mel('sphere'); //var top = bvr.src_el.getParent(".spt_panel"); //var values = spt.api.Utility.get_input_values(top); //console.log(values); ''' }</behavior> <behavior class='spt_button2'>{ "type": "click_up", "cbjs_action": "alert(bvr.kwargs.search_key);" }</behavior> <behavior class='spt_button3'>{ "type": "click_up", "cbjs_action": ''' spt.panel.load('main_body', bvr.class_name, bvr.kwargs); //spt.panel.load('main_body', 'tactic.ui.widget.CalendarWdg', bvr.kwargs); ''' }</behavior> <behavior class='spt_refresh'>{ "type": "click_up", "cbjs_action": ''' var top = bvr.src_el.getParent(".spt_panel"); spt.panel.refresh(top); ''' }</behavior> </custom> """ xml = Xml() xml.read_string(custom) top = DivWdg() my.set_as_panel(top) top.add_class("spt_panel") inner = DivWdg() top.add(inner) html_node = xml.get_node("custom/html") html = xml.to_string(html_node) inner.add(html) behaviors = xml.get_nodes("custom/behavior") for behavior in behaviors: css_class = Xml.get_attribute(behavior, 'class') value = Xml.get_node_value(behavior) value = eval(value) # add the kwargs to this so behaviors have access value['kwargs'] = my.kwargs value['class_name'] = Common.get_full_class_name(my) inner.add_behavior({ 'type': 'load', 'value': value, 'css_class': css_class, 'cbjs_action': ''' var el = bvr.src_el.getElement("."+bvr.css_class); if (!el) { alert("WARNING: element ["+bvr.css_clsss+"] does not exist"); } spt.behavior.add( el, bvr.value); ''' }) if my.kwargs.get("is_refresh"): return inner else: return top