def start(self, ENV="dev", info=None, run=None): # TODO : much of this is common to many starters and could probably be streamlined # Specify run environment settings settings = settingsLib.get_settings(ENV) # Log identity = "starter_%s" % int(random.random() * 1000) log_file = "starter.log" # logFile = None logger = log.logger(log_file, settings.setLevel, identity) filename = info.file_name if filename is None: logger.error("Did not get a filename") return # Simple connect conn = boto.swf.layer1.Layer1(settings.aws_access_key_id, settings.aws_secret_access_key) # Start a workflow execution workflow_id = "PublishPerfectArticle_%s" % filename.replace( '/', '_') + str(int(random.random() * 1000)) workflow_name = "PublishPerfectArticle" workflow_version = "1" child_policy = None execution_start_to_close_timeout = str(60 * 30) workflow_input = S3NotificationInfo.to_dict(info) workflow_input['run'] = run workflow_input = json.dumps(workflow_input, default=lambda ob: ob.__dict__) try: response = conn.start_workflow_execution( settings.domain, workflow_id, workflow_name, workflow_version, settings.default_task_list, child_policy, execution_start_to_close_timeout, workflow_input) logger.info('got response: \n%s' % json.dumps(response, sort_keys=True, indent=4)) except boto.swf.exceptions.SWFWorkflowExecutionAlreadyStartedError: # There is already a running workflow with that ID, cannot start another message = 'SWFWorkflowExecutionAlreadyStartedError: There is already a running workflow with ID %s' % workflow_id logger.info(message)
def start(self, settings, info=None, run=None): # Log logger = helper.get_starter_logger( settings.setLevel, helper.get_starter_identity(self.const_name)) if hasattr(info, 'file_name') == False or info.file_name is None: raise NullRequiredDataException( "filename is Null / Did not get a filename.") input = S3NotificationInfo.to_dict(info) input['run'] = run input['version_lookup_function'] = "article_highest_version" input['force'] = True workflow_id, \ workflow_name, \ workflow_version, \ child_policy, \ execution_start_to_close_timeout, \ workflow_input = helper.set_workflow_information(self.const_name, "1", None, input, info.file_name.replace('/', '_')) # Simple connect conn = boto.swf.layer1.Layer1(settings.aws_access_key_id, settings.aws_secret_access_key) try: response = conn.start_workflow_execution( settings.domain, workflow_id, workflow_name, workflow_version, settings.default_task_list, child_policy, execution_start_to_close_timeout, workflow_input) logger.info('got response: \n%s' % json.dumps(response, sort_keys=True, indent=4)) except NullRequiredDataException as e: logger.exception(e.message) raise except boto.swf.exceptions.SWFWorkflowExecutionAlreadyStartedError: # There is already a running workflow with that ID, cannot start another message = 'SWFWorkflowExecutionAlreadyStartedError: There is already a running workflow with ID %s' % workflow_id logger.info(message)
def start(self, ENV="dev", info=None, run=None): # TODO : much of this is common to many starters and could probably be streamlined # Specify run environment settings settings = settingsLib.get_settings(ENV) # Log identity = "starter_%s" % int(random.random() * 1000) log_file = "starter.log" # logFile = None logger = log.logger(log_file, settings.setLevel, identity) filename = info.file_name if filename is None: logger.error("Did not get a filename") return # Simple connect conn = boto.swf.layer1.Layer1(settings.aws_access_key_id, settings.aws_secret_access_key) # Start a workflow execution workflow_id = "PublishPerfectArticle_%s" % filename.replace('/', '_') + str(int(random.random() * 1000)) workflow_name = "PublishPerfectArticle" workflow_version = "1" child_policy = None execution_start_to_close_timeout = str(60 * 30) workflow_input = S3NotificationInfo.to_dict(info) workflow_input['run'] = run workflow_input = json.dumps(workflow_input, default=lambda ob: ob.__dict__) try: response = conn.start_workflow_execution(settings.domain, workflow_id, workflow_name, workflow_version, settings.default_task_list, child_policy, execution_start_to_close_timeout, workflow_input) logger.info('got response: \n%s' % json.dumps(response, sort_keys=True, indent=4)) except boto.swf.exceptions.SWFWorkflowExecutionAlreadyStartedError: # There is already a running workflow with that ID, cannot start another message = 'SWFWorkflowExecutionAlreadyStartedError: There is already a running workflow with ID %s' % workflow_id logger.info(message)
def start(self, settings, run, info): # Log logger = helper.get_starter_logger(settings.setLevel, helper.get_starter_identity(self.const_name)) if hasattr(info, 'file_name') == False or info.file_name is None: raise NullRequiredDataException("filename is Null. Did not get a filename.") input = S3NotificationInfo.to_dict(info) input['run'] = run input['version_lookup_function'] = "article_next_version" workflow_id, \ workflow_name, \ workflow_version, \ child_policy, \ execution_start_to_close_timeout, \ workflow_input = helper.set_workflow_information(self.const_name, "1", None, input, info.file_name.replace('/', '_'), start_to_close_timeout=str(60 * 60 * 5)) # Simple connect conn = boto.swf.layer1.Layer1(settings.aws_access_key_id, settings.aws_secret_access_key) try: response = conn.start_workflow_execution(settings.domain, workflow_id, workflow_name, workflow_version, settings.default_task_list, child_policy, execution_start_to_close_timeout, workflow_input) logger.info('got response: \n%s' % json.dumps(response, sort_keys=True, indent=4)) except NullRequiredDataException as e: logger.exception(e.message) raise except boto.swf.exceptions.SWFWorkflowExecutionAlreadyStartedError: # There is already a running workflow with that ID, cannot start another message = 'SWFWorkflowExecutionAlreadyStartedError: ' \ 'There is already a running workflow with ID %s' % workflow_id logger.info(message)