def _update_status(self, status, message): if self.status == status: return if self.status == 'prepared': if status != 'pending': raise ValidationError('invalid-transition') elif self.status in ('claimed', 'pending'): if status == 'claimed': self._validate_message(message, self.assignee) self.claimed = scheme.current_timestamp() elif status in ('completed', 'declined'): if status == 'declined' and not message: raise ValidationError(structure={ 'message': ValidationError('message-required-for-status') }) self._validate_message(message, self.assignee) self.completed = scheme.current_timestamp() elif status == 'canceled': self.completed = scheme.current_timestamp() else: raise ValidationError('invalid-transition') else: raise ValidationError('invalid-transition') self.status = status return status
def purge(cls, session, lifetime): for task in session.query(cls).filter(cls.activation_limit != None, cls.activation_limit > cls.activations, cls.activated < (current_timestamp() - timedelta(days=lifetime))): session.delete(task) now = current_timestamp() for task in session.query(cls).filter(cls.timeout != None): if (task.created + timedelta(seconds=task.timeout)) < now: session.delete(task)
def purge(cls, session, lifetime): for task in session.query(cls).filter( cls.activation_limit != None, cls.activation_limit > cls.activations, cls.activated < (current_timestamp() - timedelta(days=lifetime))): session.delete(task) now = current_timestamp() for task in session.query(cls).filter(cls.timeout != None): if (task.created + timedelta(seconds=task.timeout)) < now: session.delete(task)
def purge(cls, session, lifetime): subquery = session.query(cls.task_id).filter( cls.activation_limit != None, cls.activation_limit > cls.activations, cls.activated < (current_timestamp() - timedelta(days=lifetime)), ) session.query(Task).filter(Task.id.in_(subquery)).delete(synchronize_session=False) now = current_timestamp() for task in session.query(cls).filter(cls.timeout != None): if (task.created + timedelta(seconds=task.timeout)) < now: session.delete(task)
def verify(self, session, bypass_checks=False): if not bypass_checks: session.refresh(self, lockmode='update') if self.status != 'executing': return payload = self._construct_payload(status='executing', for_executor=True) try: status, response = self.endpoint.request(payload) if status != COMPLETED: log('error', 'verification of %s failed during initial request\n%s', repr(self), response) return self.end(session, 'failed', bypass_checks=True) except Exception: log('exception', 'verification of %s failed during initial request', repr(self)) return self.end(session, 'failed', bypass_checks=True) self.communicated = current_timestamp() try: response = InitiationResponse.process(response) except Exception: log('exception', 'verification of %s failed due to invalid response', repr(self)) return self.end(session, 'failed', bypass_checks=True) status = response['status'] if status in ('completed', 'failed'): return self.end(session, status, response.get('output'), True) state = response.get('state') if state: self.state = state
def abort(self, session): self.status = 'aborted' self.ended = current_timestamp() try: Process.execute('update', {'status': 'aborted'}, subject=self.id) except GoneError: log('warning', 'no corresponding process resource for %r', self)
def create(cls, session, tag, action, status='pending', occurrence=None, failed_action=None, completed_action=None, description=None, retry_backoff=None, retry_limit=2, retry_timeout=300, delta=None): if not occurrence: occurrence = current_timestamp() if delta: occurrence += timedelta(seconds=delta) task = ScheduledTask(tag=tag, status=status, description=description, occurrence=occurrence, retry_backoff=retry_backoff, retry_limit=retry_limit, retry_timeout=retry_timeout) if isinstance(action, dict): task.action = TaskAction.polymorphic_create(action) else: task.action = action if failed_action: if isinstance(failed_action, dict): task.failed_action = TaskAction.polymorphic_create(failed_action) else: task.failed_action = failed_action if completed_action: if isinstance(completed_action, dict): task.completed_action = TaskAction.polymorphic_create(completed_action) else: taks.completed_action = completed_action session.add(task) return task
def _end_run(self, session, status): self.status = status self.ended = current_timestamp() try: Event.create(topic='run:changed', aspects={'id': self.id}) except Exception: log('exception', 'failed to fire run:changed event')
def purge(cls, session, lifetime): delta = current_timestamp() - timedelta(days=lifetime) subquery = session.query(cls.task_id).filter( cls.status=='completed', cls.occurrence < delta) session.query(Task).filter( Task.id.in_(subquery)).delete(synchronize_session=False)
def _end_run(self, session, status): self.status = status self.ended = current_timestamp() try: Event.create(topic="run:changed", aspects={"id": self.id}) except Exception: log("exception", "failed to fire run:changed event")
def create(cls, session, request_id, **attrs): msg = cls(occurrence=current_timestamp(), **attrs) try: msg.request_id = Request.load(session, id=request_id).id except NoResultFound: raise OperationError(token='unknown-request') session.add(msg) return msg
def process_processes(cls, taskqueue, session): occurrence = current_timestamp() query = session.query(cls).filter(cls.timeout != None, cls.started != None, cls.status == 'executing') for process in query: if (process.started + timedelta(minutes=process.timeout)) < occurrence: log('info', 'abandoning %r due to timing out', process) taskqueue.enqueue(process, 'abandon')
def create(cls, session, **attrs): if attrs['type'] == 'mule': mule_extensions = attrs.pop('mule_extensions') subject = cls(modified=current_timestamp(), **attrs) if attrs['type'] == 'mule': subject.mule_extensions = WorkflowMule(**mule_extensions) subject.validate_specification() session.add(subject) return subject
def process(self, session, status, output): workflow = self.workflow.workflow step = workflow.steps[self.step] self.ended = current_timestamp() try: step.process(session, self, workflow, status, output) except Exception: log('exception', 'processing of %r failed due to exception', self) self.run.fail(session)
def end(self, session, status='completed', output=None, bypass_checks=False): if not bypass_checks: session.refresh(self, lockmode='update') if self.status not in ('aborting', 'executing', 'pending'): return self.ended = current_timestamp() self.status = status self.output = output self._schedule_task(session, 'report-end', limit=10)
def activate(self, session, description): limit = self.activation_limit if limit is not None and self.activations > limit: return task = ScheduledTask.spawn(self, parameters={"event": description}) session.add(task) self.activations += 1 self.activated = current_timestamp() return task
def activate(self, session, description): limit = self.activation_limit if limit is not None and self.activations > limit: return task = ScheduledTask.spawn(self, parameters={'event': description}) session.add(task) self.activations += 1 self.activated = current_timestamp() return task
def initiate(self, session): self.started = current_timestamp() session.begin_nested() try: self.workflow.workflow.initiate(session, self) except Exception: log('exception', 'initiation of %r failed due to exception', self) session.rollback() self.invalidate(session) else: session.commit()
def update(self, session, **attrs): changed = False if 'name' in attrs and attrs['name'] != self.name: changed = True elif 'specification' in attrs and attrs['specification'] != self.specification: self.validate_specification() changed = True self.update_with_mapping(attrs, ignore='id') self.modified = current_timestamp() return changed
def create(cls, session, **attrs): subject = cls(**attrs) if not subject.id: subject.id = uniqid() if subject.created: subject.modified = subject.created else: subject.created = subject.modified = current_timestamp() cls._check_duplicate_name(session, subject) session.add(subject) return subject
def _setup_active_run(self, workflow_id, steps, parameters=None): session = self.config.schema.session run = Run.create(session, workflow_id, started=current_timestamp()) session.commit() self._runs.append(run.id) execution = None for step in steps: execution = run.create_execution(session, step, parameters=parameters, ancestor=execution) execution.start() sleep(1) session.commit() return run
def update(self, session, **attrs): changed = False if 'name' in attrs and attrs['name'] != self.name: changed = True elif 'specification' in attrs and attrs[ 'specification'] != self.specification: self.validate_specification() changed = True self.update_with_mapping(attrs, ignore='id') self.modified = current_timestamp() return changed
def end(self, session, status='completed', output=None, bypass_checks=False): if not bypass_checks: session.refresh(self, lockmode='update') if self.status != 'executing': return self.ended = current_timestamp() self.status = status self.output = output self._schedule_task(session, 'report-end', limit=10)
def next(self, session, *args, **params): cache_results = params.get('cache_results', True) cached_next = self.cached_next if cached_next: if cached_next > current_timestamp(): return cached_next occurrence = cached_next else: occurrence = self.anchor next = self._next_occurrence(occurrence) if cache_results: self.cached_next = next return next
def create(cls, session, tag, action, status='pending', occurrence=None, failed_action=None, completed_action=None, description=None, retry_backoff=None, retry_limit=2, retry_timeout=300, delta=None): if not occurrence: occurrence = current_timestamp() if delta: occurrence += timedelta(seconds=delta) task = ScheduledTask(tag=tag, status=status, description=description, occurrence=occurrence, retry_backoff=retry_backoff, retry_limit=retry_limit, retry_timeout=retry_timeout) if isinstance(action, dict): task.action = TaskAction.polymorphic_create(action) else: task.action = action if failed_action: if isinstance(failed_action, dict): task.failed_action = TaskAction.polymorphic_create( failed_action) else: task.failed_action = failed_action if completed_action: if isinstance(completed_action, dict): task.completed_action = TaskAction.polymorphic_create( completed_action) else: taks.completed_action = completed_action session.add(task) return task
def reschedule(self, session, occurrence=None): from platoon.models.scheduledtask import ScheduledTask if self.status != 'active': return if occurrence is None: occurrence = current_timestamp() query = session.query(ScheduledTask).filter_by(status='pending', parent_id=self.id) if query.count() > 0: return occurrence = self.schedule.next(occurrence) task = ScheduledTask.spawn(self, occurrence, parent_id=self.id) session.add(task) return task
def process_tasks(cls, taskqueue, session): occurrence = current_timestamp() tasks = list(session.query(cls).with_lockmode('update').filter( cls.status.in_(('pending', 'retrying')), cls.occurrence <= occurrence)) if not tasks: return for task in tasks: task.status = 'executing' session.commit() for task in tasks: log('info', 'processing %s', repr(task)) taskqueue.enqueue(task, 'execute')
def initiate_process(self, session): session.refresh(self, lockmode='update') if self.status != 'pending': return self.started = current_timestamp() payload = self._construct_payload(status='initiating', input=self.input) try: status, response = self.endpoint.request(payload) if status != COMPLETED: log('error', 'initiation of %s failed during initial request\n%s', repr(self), response) return self.end(session, 'failed', bypass_checks=True) except Exception, exception: log('exception', 'initiation of %s failed during initial request', repr(self)) return self.end(session, 'failed', bypass_checks=True)
def process_tasks(cls, taskqueue, session): occurrence = current_timestamp() tasks = list( session.query(cls).with_lockmode('update').filter( cls.status.in_(('pending', 'retrying')), cls.occurrence <= occurrence)) if not tasks: return for task in tasks: task.status = 'executing' session.commit() for task in tasks: log('info', 'processing %s', repr(task)) taskqueue.enqueue(task, 'execute')
def process(self, session, status, output): if not self.is_active: return workflow = self.workflow.workflow step = workflow.steps[self.step] self.ended = current_timestamp() session.begin_nested() try: step.process(session, self, workflow, status, output) except Exception: session.rollback() log('exception', 'processing of %r failed due to exception', self) self.run.fail(session) else: session.commit()
def bootstrap_documents(session): now = current_timestamp() matter = models.DocumentType( id='siq:matter', name='Matter', created=now, modified=now, resource='siq.matter') fileplan = models.DocumentType( id='siq:fileplan', name='File Plan', created=now, modified=now, resource='siq.fileplan') project = models.DocumentType( id='siq:project', name='Project', created=now, modified=now, resource='siq.project') available_to = models.Intent( id='available-to', name='Available to', created=now, modified=now, exclusive=False) contained_by = models.Intent( id='contained-by', name='Contained by', created=now, modified=now, exclusive=False) session.merge(matter) session.merge(fileplan) session.merge(project) session.merge(available_to) session.merge(contained_by) session.commit()
def bootstrap_documents(session): now = current_timestamp() matter = models.DocumentType(id='siq:matter', name='Matter', created=now, modified=now, resource='siq.matter') fileplan = models.DocumentType(id='siq:fileplan', name='File Plan', created=now, modified=now, resource='siq.fileplan') project = models.DocumentType(id='siq:project', name='Project', created=now, modified=now, resource='siq.project') available_to = models.Intent(id='available-to', name='Available to', created=now, modified=now, exclusive=False) contained_by = models.Intent(id='contained-by', name='Contained by', created=now, modified=now, exclusive=False) session.merge(matter) session.merge(fileplan) session.merge(project) session.merge(available_to) session.merge(contained_by) session.commit()
def update(self, data): current_entities = self._unserialize_entities(self.package) ce_dict = dict([(ce.get('id'), ce) for ce in current_entities]) updated_entities = self._unserialize_entities(data.get('package', {})) for ue in updated_entities: ue_id = ue.get('id') if not ue_id: raise BadRequestError ce = ce_dict.get(ue_id) if ce: ce.update(ue) else: ce_dict[ue_id] = ue self.update_with_mapping(data) self.package = self._serialize_entities(ce_dict.values()) self.modified = current_timestamp() if self.status == 'deployed': self.status = 'deploying' return
def task(self, request, response, subject, data): session = self.schema.session delta = current_timestamp() - timedelta(days=30) if data["task"] == "purge-notifications": session.query(Notification).filter(Notification.created < delta).delete(synchronize_session=False) session.commit()
def _end_run(self, session, status): self.status = status self.ended = current_timestamp() session.call_after_commit(self._run_changed_event, 'run:changed') session.call_after_commit(self._run_changed_event, 'run:ended')
def create(cls, session, **attrs): subject = cls(modified=current_timestamp(), **attrs) session.add(subject) return subject
def update(self, session, **attrs): self.update_with_mapping(attrs, ignore='id') self.modified = current_timestamp() self._check_duplicate_name(session, self)
def initiate(self, session): self.started = current_timestamp() self.workflow.workflow.initiate(session, self)
def start(self, parameters=None): self.started = current_timestamp() if parameters: self.parameters = parameters
def update(self, session, **attrs): self.update_with_mapping(attrs, ignore='id') self.modified = current_timestamp()
def purge(cls, session, lifetime): delta = current_timestamp() - timedelta(days=lifetime) for task in session.query(cls).filter(cls.status == 'completed', cls.occurrence < delta): session.delete(task)