def execute(my): key = "enable_workflow_engine" from prod_setting import ProdSetting setting = ProdSetting.get_value_by_key(key) if setting not in [True, 'true']: return # find the node in the pipeline task = my.get_caller() sobject = task.get_parent() if not sobject: return pipeline = None process_code = task.get_value("process_code", no_exception=True) if process_code: process_sobj = Search.get_by_code("config/process", process_code) if process_sobj: pipeline_code = process_sobj.get_value("pipeline_code") pipeline = Pipeline.get_by_code("sthpw/pipeline", pipeline_code) if not pipeline: pipeline = Pipeline.get_by_sobject(sobject) if not pipeline: return process_name = task.get_value("process") status = task.get_value("status") process = pipeline.get_process(process_name) if not process: # we don't have enough info here return node_type = process.get_type() process_name = process.get_name() event = "process|%s" % status.lower() output = { 'sobject': sobject, 'pipeline': pipeline, 'process': process_name, } Trigger.call(task, event, output=output)
def update_dependencies(self): '''Function that should be run on insert/update. It's already automatically called during insert. On update, the caller needs to call this explicitly. It checks the search type this pipeline is associated with and if there is no pipeline code column, then update it. It updates the process table also.''' search_type = self.get_value('search_type') self.update_process_table(search_type=search_type) # don't do anything for task sType if search_type == 'sthpw/task': return if not search_type: return if ProdSetting.get_value_by_key('autofill_pipeline_code') != 'false': try: columns = SearchType.get_columns(search_type) if not 'pipeline_code' in columns: # add the pipeline code column from pyasm.command import ColumnAddCmd cmd = ColumnAddCmd(search_type, "pipeline_code", "varchar") cmd.execute() except SqlException as e: print("Error creating column [pipeline_code] for %" % search_type) pass # go through all of the sobjects and set all the empty ones # to the new pipeline search = Search(search_type) search.add_op("begin") search.add_filter("pipeline_code", "NULL", op='is', quoted=False) search.add_filter("pipeline_code", "") search.add_op("or") sobject_ids = search.get_sobject_ids() if sobject_ids: # this is much faster and memory efficient db_resource = SearchType.get_db_resource_by_search_type( search_type) sql = DbContainer.get(db_resource) tbl = search.get_table() sobject_ids = [str(x) for x in sobject_ids] pipeline_code = self.get_value("code") sql.do_update( '''UPDATE "%s" SET "pipeline_code" = '%s' WHERE id in (%s) ''' % (tbl, pipeline_code, ','.join(sobject_ids))) """
def on_insert(my): '''Function that should be run on insert/update. It's already automatically called during insert. On update, the caller needs to call this explicitly. It checks the search type this pipeline is associated with and if there is no pipeline code column, then update it. It updates the process table also.''' search_type = my.get_value('search_type') my.update_process_table(search_type=search_type) # don't do anything for task sType if search_type =='sthpw/task': return if not search_type: return if ProdSetting.get_value_by_key('autofill_pipeline_code') != 'false': try: columns = SearchType.get_columns(search_type) if not 'pipeline_code' in columns: # add the pipeline code column from pyasm.command import ColumnAddCmd cmd = ColumnAddCmd(search_type, "pipeline_code", "varchar") cmd.execute() except SqlException, e: print "Error creating column [pipeline_code] for %" %search_type pass # go through all of the sobjects and set all the empty ones # to the new pipeline search = Search(search_type) search.add_op("begin") search.add_filter("pipeline_code", "NULL", op='is', quoted=False) search.add_filter("pipeline_code", "") search.add_op("or") sobject_ids = search.get_sobject_ids() if sobject_ids: # this is much faster and memory efficient db_resource = SearchType.get_db_resource_by_search_type(search_type) sql = DbContainer.get(db_resource) tbl = search.get_table() sobject_ids = [str(x) for x in sobject_ids] pipeline_code = my.get_value("code") sql.do_update('''UPDATE "%s" SET "pipeline_code" = '%s' WHERE id in (%s) ''' %(tbl, pipeline_code, ','.join(sobject_ids))) """
def add_ending(my, parts, auto_version=True): context = my.snapshot.get_value("context") filename = my.file_object.get_full_file_name() # make sure that the version in the file name does not yet exist version = my.get_version_from_file_name(filename) if not auto_version and version: # if the file version is not the same as the snapshot version # then check to see if the snapshot already exists if not context.startswith( "cache") and version != my.snapshot.get_value("version"): existing_snap = Snapshot.get_by_version(my.snapshot.get_value("search_type"),\ my.snapshot.get_value("search_id"), context, version) if existing_snap: raise TacticException( 'A snapshot with context "%s" and version "%s" already exists.' % (context, version)) my.snapshot.set_value("version", version) my.snapshot.commit() else: version = my.snapshot.get_value("version") if version == 0: version = "CURRENT" elif version == -1: version = "LATEST" else: version = "v%0.3d" % int(version) revision = my.snapshot.get_value("revision", no_exception=True) if revision: revision = "r%0.2d" % int(revision) ext = my.get_ext() parts.append(context.replace("/", "_")) if my.is_tactic_repo(): parts.append(version) if revision: parts.append(revision) # should all files be named with file_type ending? file_type = my.get_file_type() # backwards compatibility if file_type and file_type not in ['maya', 'main', 'geo', 'xml']: parts.append(file_type) #if file_type in ['web','icon']: # parts.append(file_type) value = ProdSetting.get_value_by_key("naming/add_initials") if value == "true": project = Project.get() initials = Project.get().get_initials() if initials: parts.append(initials) filename = "_".join(parts) if ext: filename = "%s%s" % (filename, ext) return filename
def add_initial_tasks(sobject, pipeline_code=None, processes=[], contexts=[], skip_duplicate=True, mode='standard',start_offset=0): '''add initial tasks based on the pipeline of the sobject''' from pipeline import Pipeline def _get_context(existing_task_dict, process_name, context=None): existed = False if not existing_task_dict: if context: context = context else: context = process_name else: compare_key = "%s:%s" %(process_name, context) max_num = 0 for item in existing_task_dict.keys(): item_stripped = re.sub('/\d+$', '', item) #if item.startswith(compare_key): if item_stripped == compare_key: existing_context = item.replace('%s:'%process_name,'') suffix = existing_context.split('/')[-1] try: num = int(suffix) except: num = 0 if num > max_num: max_num = num existed = True if existed: context = "%s/%0.3d" % (context, max_num+1) return context # get pipeline if not pipeline_code: pipeline_code = sobject.get_value("pipeline_code") if pipeline_code in ['', '__default__']: pipeline = SearchType.create("sthpw/pipeline") pipeline.set_value("code", "__default__") pipeline.set_value("pipeline", ''' <pipeline> <process name='publish'/> </pipeline> ''') # FIXME: HACK to initialize virtual pipeline pipeline.set_pipeline(pipeline.get_value("pipeline")) else: pipeline = Pipeline.get_by_code(pipeline_code) if not pipeline: print "WARNING: pipeline '%s' does not exist" % pipeline_code return [] #TODO: add recursive property here if processes: process_names = processes else: process_names = pipeline.get_process_names(recurse=True) # remember which ones already exist existing_tasks = Task.get_by_sobject(sobject, order=False) existing_task_dict = {} for x in existing_tasks: key1 = '%s:%s' %(x.get_value('process'),x.get_value("context")) existing_task_dict[key1] = True # for backward compatibility, if the process has been created, we will skip later below # we may remove this in the future #key2 = '%s' %(x.get_value('process')) #existing_task_dict[key2] = True # create all of the tasks description = "" tasks = [] start_date = Date() start_date.add_days(start_offset) bid_duration_unit = ProdSetting.get_value_by_key("bid_duration_unit") if not bid_duration_unit: bid_duration_unit = 'hour' # that's the date range in 5 days (not hours) default_duration = 5 default_bid_duration = 8 if bid_duration_unit == 'minute': default_bid_duration = 60 last_task = None # this is the explicit mode for creating task for a specific process:context combo if mode=='context': for context_combo in contexts: process_name, context = context_combo.split(':') # depend_id is None since these are arbitrary tasks depend_id = None # first check if it already exists when skip_duplicate is True key1 = '%s:%s' %(process_name, context) task_existed = False for item in existing_task_dict: if item.startswith(key1): task_existed = True break if skip_duplicate and task_existed: continue process_obj = pipeline.get_process(process_name) if not process_obj: continue context=_get_context(existing_task_dict,process_name, context) pipe_code = process_obj.get_task_pipeline() attrs = process_obj.get_attributes() duration = attrs.get("duration") if duration: duration = int(duration) else: duration = default_duration bid_duration = attrs.get("bid_duration") if not bid_duration: bid_duration = default_bid_duration else: bid_duration = int(bid_duration) end_date = start_date.copy() # for a task to be x days long, we need duration x-1. end_date.add_days(duration-1) start_date_str = start_date.get_db_date() end_date_str = end_date.get_db_date() # Create the task last_task = Task.create(sobject, process_name, description, depend_id=depend_id, pipeline_code=pipe_code, start_date=start_date_str, end_date=end_date_str, context=context, bid_duration=bid_duration) # this avoids duplicated tasks for process connecting to multiple processes new_key = '%s:%s' %(last_task.get_value('process'), last_task.get_value("context") ) existing_task_dict[new_key] = True # for backward compatibility, if the process has been created, we will skip later below tasks.append(last_task) start_date = end_date.copy() # start the day after start_date.add_days(1) return tasks for process_name in process_names: if last_task: depend_id = last_task.get_id() else: depend_id = None process_obj = pipeline.get_process(process_name) if not process_obj: continue attrs = process_obj.get_attributes() duration = attrs.get("duration") if duration: duration = int(duration) else: duration = default_duration bid_duration = attrs.get("bid_duration") if not bid_duration: bid_duration = default_bid_duration else: bid_duration = int(bid_duration) end_date = start_date.copy() if duration >= 1: # for a task to be x days long, we need duration x-1. end_date.add_days(duration-1) # output contexts could be duplicated from 2 different outout processes if mode == 'simple process': output_contexts = [process_name] else: output_contexts = pipeline.get_output_contexts(process_obj.get_name(), show_process=False) pipe_code = process_obj.get_task_pipeline() start_date_str = start_date.get_db_date() end_date_str = end_date.get_db_date() for context in output_contexts: # first check if it already exists when skip_duplicate is True key1 = '%s:%s' %(process_name, context) task_existed = False for item in existing_task_dict: if item.startswith(key1): task_existed = True break if skip_duplicate and task_existed: continue if contexts and context not in contexts: continue context = _get_context(existing_task_dict, process_name, context) last_task = Task.create(sobject, process_name, description, depend_id=depend_id, pipeline_code=pipe_code, start_date=start_date_str, end_date=end_date_str, context=context, bid_duration=bid_duration) # this avoids duplicated tasks for process connecting to multiple processes new_key = '%s:%s' %(last_task.get_value('process'), last_task.get_value("context") ) existing_task_dict[new_key] = True # for backward compatibility, if the process has been created, we will skip later below tasks.append(last_task) start_date = end_date.copy() # start the day after start_date.add_days(1) return tasks
def update_dependent_tasks(my, top=True): '''for purposes of dependent tasks''' if top: Task.tasks_updated = [] Task.tasks_updated.append(my.get_id()) # get the dependent tasks tasks = my.get_dependent_tasks() bid_start_date = my.get_value("bid_start_date") bid_end_date = my.get_value("bid_end_date") bid_duration_unit = ProdSetting.get_value_by_key("bid_duration_unit") if not bid_duration_unit: bid_duration_unit = 'hour' # if there is no end date specified, return if not bid_end_date: bid_duration = my.get_value("bid_duration") if bid_duration and bid_start_date: date = Date(db=bid_start_date) bid_duration = float(bid_duration) if bid_duration_unit == 'minute': date.add_minutes(bid_duration) else: date.add_hours(bid_duration) bid_end_date = date.get_db_time() else: return for task in tasks: # prevent circular dependency if for some reason they occur. if task.get_id() in Task.tasks_updated: Environment.add_warning("Circular dependency", "Circular dependency with task '%s'" % task.get_id() ) continue Task.tasks_updated.append(my.get_id()) # if the dependency is fixed, update the d #mode = task.get_value("mode") mode = "depend" # put the start date as the end date if mode == "depend": # add one day to the end date to get the start date date = Date(db=bid_end_date) date.add_days(1) bid_start_date = date.get_db_time() task.set_value("bid_start_date", bid_start_date ) # check if there is a duration in hours to this date bid_duration = task.get_value("bid_duration") if bid_duration: bid_duration = int(bid_duration) date = Date(db=bid_start_date) if bid_duration_unit == 'minute': date.add_minutes(bid_duration) else: date.add_hours(bid_duration) bid_end_date = date.get_db_time() task.set_value("bid_end_date", bid_end_date) task.commit() task.update_dependent_tasks(False)
def add_ending(self, parts, auto_version=True): context = self.snapshot.get_value("context") filename = self.file_object.get_full_file_name() # make sure that the version in the file name does not yet exist version = self.get_version_from_file_name(filename) if not auto_version and version: # if the file version is not the same as the snapshot version # then check to see if the snapshot already exists if not context.startswith("cache") and version != self.snapshot.get_value("version"): existing_snap = Snapshot.get_by_version(self.snapshot.get_value("search_type"),\ self.snapshot.get_value("search_id"), context, version) if existing_snap: raise TacticException('A snapshot with context "%s" and version "%s" already exists.' % (context, version) ) self.snapshot.set_value("version", version) self.snapshot.commit() else: version = self.snapshot.get_value("version") if version == 0: version = "CURRENT" elif version == -1: version = "LATEST" else: version = "v%0.3d" % int(version) revision = self.snapshot.get_value("revision", no_exception=True) if revision: revision = "r%0.2d" % int(revision) ext = self.get_ext() parts.append(context.replace("/", "_")) if self.is_tactic_repo(): parts.append(version) if revision: parts.append(revision) # should all files be named with file_type ending? file_type = self.get_file_type() # backwards compatibility if file_type and file_type not in ['maya','main','geo','xml']: parts.append(file_type) #if file_type in ['web','icon']: # parts.append(file_type) value = ProdSetting.get_value_by_key("naming/add_initials") if value == "true": project = Project.get() initials = Project.get().get_initials() if initials: parts.append(initials) filename = "_".join(parts) if ext: filename = "%s%s" % (filename, ext) return filename
def add_initial_tasks(sobject, pipeline_code=None, processes=[], contexts=[], skip_duplicate=True, mode='standard',start_offset=0): '''add initial tasks based on the pipeline of the sobject''' from pipeline import Pipeline def _get_context(existing_task_dict, process_name, context=None): existed = False if not existing_task_dict: if context: context = context else: context = process_name else: compare_key = "%s:%s" %(process_name, context) max_num = 0 for item in existing_task_dict.keys(): item_stripped = re.sub('/\d+$', '', item) #if item.startswith(compare_key): if item_stripped == compare_key: existing_context = item.replace('%s:'%process_name,'') suffix = existing_context.split('/')[-1] try: num = int(suffix) except: num = 0 if num > max_num: max_num = num existed = True if existed: context = "%s/%0.3d" % (context, max_num+1) return context # get pipeline if not pipeline_code: pipeline_code = sobject.get_value("pipeline_code") if pipeline_code in ['', '__default__']: pipeline = SearchType.create("sthpw/pipeline") pipeline.set_value("code", "__default__") pipeline.set_value("pipeline", ''' <pipeline> <process name='publish'/> </pipeline> ''') # FIXME: HACK to initialize virtual pipeline pipeline.set_pipeline(pipeline.get_value("pipeline")) else: pipeline = Pipeline.get_by_code(pipeline_code) if not pipeline: print "WARNING: pipeline '%s' does not exist" % pipeline_code return [] #TODO: add recursive property here if processes: process_names = processes else: process_names = pipeline.get_process_names(recurse=True, type=["node","approval"]) # remember which ones already exist existing_tasks = Task.get_by_sobject(sobject, order=False) existing_task_dict = {} for x in existing_tasks: key1 = '%s:%s' %(x.get_value('process'),x.get_value("context")) existing_task_dict[key1] = True # for backward compatibility, if the process has been created, we will skip later below # we may remove this in the future #key2 = '%s' %(x.get_value('process')) #existing_task_dict[key2] = True # create all of the tasks description = "" tasks = [] start_date = Date() start_date.add_days(start_offset) bid_duration_unit = ProdSetting.get_value_by_key("bid_duration_unit") if not bid_duration_unit: bid_duration_unit = 'hour' # that's the date range in 5 days (not hours) default_duration = 5 default_bid_duration = 8 if bid_duration_unit == 'minute': default_bid_duration = 60 last_task = None # this is the explicit mode for creating task for a specific process:context combo if mode=='context': for context_combo in contexts: process_name, context = context_combo.split(':') # depend_id is None since these are arbitrary tasks depend_id = None # first check if it already exists when skip_duplicate is True key1 = '%s:%s' %(process_name, context) task_existed = False for item in existing_task_dict: if item.startswith(key1): task_existed = True break if skip_duplicate and task_existed: continue process_obj = pipeline.get_process(process_name) if not process_obj: continue context=_get_context(existing_task_dict,process_name, context) pipe_code = process_obj.get_task_pipeline() attrs = process_obj.get_attributes() duration = attrs.get("duration") if duration: duration = int(duration) else: duration = default_duration bid_duration = attrs.get("bid_duration") if not bid_duration: bid_duration = default_bid_duration else: bid_duration = int(bid_duration) end_date = start_date.copy() # for a task to be x days long, we need duration x-1. end_date.add_days(duration-1) start_date_str = start_date.get_db_date() end_date_str = end_date.get_db_date() # Create the task last_task = Task.create(sobject, process_name, description, depend_id=depend_id, pipeline_code=pipe_code, start_date=start_date_str, end_date=end_date_str, context=context, bid_duration=bid_duration) # this avoids duplicated tasks for process connecting to multiple processes new_key = '%s:%s' %(last_task.get_value('process'), last_task.get_value("context") ) existing_task_dict[new_key] = True # for backward compatibility, if the process has been created, we will skip later below tasks.append(last_task) start_date = end_date.copy() # start the day after start_date.add_days(1) return tasks for process_name in process_names: if last_task: depend_id = last_task.get_id() else: depend_id = None process_obj = pipeline.get_process(process_name) if not process_obj: continue attrs = process_obj.get_attributes() duration = attrs.get("duration") if duration: duration = int(duration) else: duration = default_duration bid_duration = attrs.get("bid_duration") if not bid_duration: bid_duration = default_bid_duration else: bid_duration = int(bid_duration) end_date = start_date.copy() if duration >= 1: # for a task to be x days long, we need duration x-1. end_date.add_days(duration-1) # output contexts could be duplicated from 2 different outout processes if mode == 'simple process': output_contexts = [process_name] else: output_contexts = pipeline.get_output_contexts(process_obj.get_name(), show_process=False) pipe_code = process_obj.get_task_pipeline() start_date_str = start_date.get_db_date() end_date_str = end_date.get_db_date() for context in output_contexts: # first check if it already exists when skip_duplicate is True key1 = '%s:%s' %(process_name, context) task_existed = False for item in existing_task_dict: if item.startswith(key1): task_existed = True break if skip_duplicate and task_existed: continue if contexts and context not in contexts: continue context = _get_context(existing_task_dict, process_name, context) last_task = Task.create(sobject, process_name, description, depend_id=depend_id, pipeline_code=pipe_code, start_date=start_date_str, end_date=end_date_str, context=context, bid_duration=bid_duration) # this avoids duplicated tasks for process connecting to multiple processes new_key = '%s:%s' %(last_task.get_value('process'), last_task.get_value("context") ) existing_task_dict[new_key] = True # for backward compatibility, if the process has been created, we will skip later below tasks.append(last_task) start_date = end_date.copy() # start the day after start_date.add_days(1) return tasks