def upgrade(self): try: with self._upgrade_lock: with contextlib.closing(self._engine.connect()) as conn: # NOTE(imelnikov): Alembic does not support SQLite, # and we don't recommend to use SQLite in production # deployments, so migrations are rarely needed # for SQLite. So we don't bother about working around # SQLite limitations, and create the database directly # from the tables when it is in use... if 'sqlite' in self._engine.url.drivername: self._metadata.create_all(bind=conn) else: migration.db_sync(conn) except sa_exc.SQLAlchemyError: exc.raise_with_cause(exc.StorageFailure, "Failed upgrading database version")
def get_logbook(self, book_uuid, lazy=False): try: logbooks = self._tables.logbooks with contextlib.closing(self._engine.connect()) as conn: q = (sql.select([logbooks ]).where(logbooks.c.uuid == book_uuid)) row = conn.execute(q).first() if not row: raise exc.NotFound("No logbook found with" " uuid '%s'" % book_uuid) book = self._converter.convert_book(row) if not lazy: self._converter.populate_book(conn, book) return book except sa_exc.DBAPIError: exc.raise_with_cause(exc.StorageFailure, "Failed getting logbook '%s'" % book_uuid)
def get_flow_details(self, fd_uuid, lazy=False): try: flowdetails = self._tables.flowdetails with self._engine.begin() as conn: q = (sql.select([flowdetails ]).where(flowdetails.c.uuid == fd_uuid)) row = conn.execute(q).first() if not row: raise exc.NotFound("No flow details found with uuid" " '%s'" % fd_uuid) fd = self._converter.convert_flow_detail(row) if not lazy: self._converter.populate_flow_detail(conn, fd) return fd except sa_exc.SQLAlchemyError: exc.raise_with_cause( exc.StorageFailure, "Failed getting flow details with" " uuid '%s'" % fd_uuid)
def update_atom_details(self, atom_detail): try: atomdetails = self._tables.atomdetails with self._engine.begin() as conn: q = (sql.select([ atomdetails ]).where(atomdetails.c.uuid == atom_detail.uuid)) row = conn.execute(q).first() if not row: raise exc.NotFound("No atom details found with uuid" " '%s'" % atom_detail.uuid) e_ad = self._converter.convert_atom_detail(row) self._update_atom_details(conn, atom_detail, e_ad) return e_ad except sa_exc.SQLAlchemyError: exc.raise_with_cause( exc.StorageFailure, "Failed updating atom details" " with uuid '%s'" % atom_detail.uuid)
def abandon(self, job, who): with self._wrap(job.uuid, job.path, fail_msg_tpl="Abandonment failure: %s"): try: owner_data = self._get_owner_and_data(job) lock_data, lock_stat, data, data_stat = owner_data except k_exceptions.NoNodeError: excp.raise_with_cause( excp.NotFound, "Can not abandon a job %s" " which we can not determine" " the owner of" % (job.uuid)) if lock_data.get("owner") != who: raise excp.JobFailure("Can not abandon a job %s" " which is not owned by %s" % (job.uuid, who)) txn = self._client.transaction() txn.delete(job.lock_path, version=lock_stat.version) kazoo_utils.checked_commit(txn)
def update_flow_details(self, flow_detail): try: flowdetails = self._tables.flowdetails with self._engine.begin() as conn: q = (sql.select([ flowdetails ]).where(flowdetails.c.uuid == flow_detail.uuid)) row = conn.execute(q).first() if not row: raise exc.NotFound("No flow details found with" " uuid '%s'" % flow_detail.uuid) e_fd = self._converter.convert_flow_detail(row) self._converter.populate_flow_detail(conn, e_fd) self._update_flow_details(conn, flow_detail, e_fd) return e_fd except sa_exc.SQLAlchemyError: exc.raise_with_cause( exc.StorageFailure, "Failed updating flow details with" " uuid '%s'" % flow_detail.uuid)
def trash(self, job, who): with self._wrap(job.uuid, job.path, fail_msg_tpl="Trash failure: %s"): try: owner_data = self._get_owner_and_data(job) lock_data, lock_stat, data, data_stat = owner_data except k_exceptions.NoNodeError: excp.raise_with_cause( excp.NotFound, "Can not trash a job %s" " which we can not determine" " the owner of" % (job.uuid)) if lock_data.get("owner") != who: raise excp.JobFailure("Can not trash a job %s" " which is not owned by %s" % (job.uuid, who)) trash_path = job.path.replace(self.path, self.trash_path) value = misc.binary_encode(zag_json.dumps(data)) txn = self._client.transaction() txn.create(trash_path, value=value) txn.delete(job.lock_path, version=lock_stat.version) txn.delete(job.path, version=data_stat.version) kazoo_utils.checked_commit(txn)
def validate(cls, data, response): if response: schema = cls.RESPONSE_SCHEMA else: schema = cls.SENDER_SCHEMA try: su.schema_validate(data, schema) except su.ValidationError as e: cls_name = reflection.get_class_name(cls, fully_qualified=False) if response: excp.raise_with_cause(excp.InvalidFormat, "%s message response data not of the" " expected format: %s" % (cls_name, e.message), cause=e) else: excp.raise_with_cause(excp.InvalidFormat, "%s message sender data not of the" " expected format: %s" % (cls_name, e.message), cause=e)
def register_entity(self, entity): entity_type = entity.kind if entity_type == c_base.Conductor.ENTITY_KIND: entity_path = k_paths.join(self.entity_path, entity_type) try: self._client.ensure_path(entity_path) self._client.create(k_paths.join(entity_path, entity.name), value=misc.binary_encode( zag_json.dumps(entity.to_dict())), ephemeral=True) except k_exceptions.NodeExistsError: pass except self._client.handler.timeout_exception: excp.raise_with_cause( excp.JobFailure, "Can not register entity %s under %s, operation" " timed out" % (entity.name, entity_path)) except k_exceptions.SessionExpiredError: excp.raise_with_cause( excp.JobFailure, "Can not register entity %s under %s, session" " expired" % (entity.name, entity_path)) except k_exceptions.KazooException: excp.raise_with_cause( excp.JobFailure, "Can not register entity %s under %s, internal" " error" % (entity.name, entity_path)) else: raise excp.NotImplementedError( "Not implemented for other entity type '%s'" % entity_type)
def state(self): owner = self.board.find_owner(self) job_data = {} try: raw_data, _data_stat = self._client.get(self.path) job_data = misc.decode_json(raw_data) except k_exceptions.NoNodeError: pass except k_exceptions.SessionExpiredError: excp.raise_with_cause( excp.JobFailure, "Can not fetch the state of %s," " session expired" % (self.uuid)) except self._client.handler.timeout_exception: excp.raise_with_cause( excp.JobFailure, "Can not fetch the state of %s," " operation timed out" % (self.uuid)) except k_exceptions.KazooException: excp.raise_with_cause( excp.JobFailure, "Can not fetch the state of %s," " internal error" % (self.uuid)) if not job_data: # No data this job has been completed (the owner that we might have # fetched will not be able to be fetched again, since the job node # is a parent node of the owner/lock node). return states.COMPLETE if not owner: # No owner, but data, still work to be done. return states.UNCLAIMED return states.CLAIMED
def validate(cls, data): try: su.schema_validate(data, cls.SCHEMA) except su.ValidationError as e: cls_name = reflection.get_class_name(cls, fully_qualified=False) excp.raise_with_cause(excp.InvalidFormat, "%s message response data not of the" " expected format: %s" % (cls_name, e.message), cause=e) else: # Validate all failure dictionaries that *may* be present... failures = [] if 'failures' in data: failures.extend(six.itervalues(data['failures'])) result = data.get('result') if result is not None: result_data_type, result_data = result if result_data_type == 'failure': failures.append(result_data) for fail_data in failures: ft.Failure.validate(fail_data)
def test_pformat_str(self): ex = None try: try: try: raise IOError("Didn't work") except IOError: exc.raise_with_cause(exc.ZagException, "It didn't go so well") except exc.ZagException: exc.raise_with_cause(exc.ZagException, "I Failed") except exc.ZagException as e: ex = e self.assertIsNotNone(ex) self.assertIsInstance(ex, exc.ZagException) self.assertIsInstance(ex.cause, exc.ZagException) self.assertIsInstance(ex.cause.cause, IOError) p_msg = ex.pformat() p_str_msg = str(ex) for msg in ["I Failed", "It didn't go so well", "Didn't work"]: self.assertIn(msg, p_msg) self.assertIn(msg, p_str_msg)
def _translate_failures(): """Translates common redis exceptions into zag exceptions.""" try: yield except redis_exceptions.ConnectionError: exc.raise_with_cause(exc.JobFailure, "Failed to connect to redis") except redis_exceptions.TimeoutError: exc.raise_with_cause(exc.JobFailure, "Failed to communicate with redis, connection" " timed out") except redis_exceptions.RedisError: exc.raise_with_cause(exc.JobFailure, "Failed to communicate with redis," " internal error")
def _force_refresh(self): try: maybe_children = self._client.get_children(self.path) self._on_job_posting(maybe_children, delayed=False) except self._client.handler.timeout_exception: excp.raise_with_cause(excp.JobFailure, "Refreshing failure, operation timed out") except k_exceptions.SessionExpiredError: excp.raise_with_cause(excp.JobFailure, "Refreshing failure, session expired") except k_exceptions.NoNodeError: pass except k_exceptions.KazooException: excp.raise_with_cause(excp.JobFailure, "Refreshing failure, internal error")
def last_results(self): """The last result that was produced.""" try: return self.results[-1][0] except IndexError: exc.raise_with_cause(exc.NotFound, "Last results not found")