コード例 #1
0
 def save_logbook(self, book):
     try:
         logbooks = self._tables.logbooks
         with self._engine.begin() as conn:
             q = (sql.select([logbooks
                              ]).where(logbooks.c.uuid == book.uuid))
             row = conn.execute(q).first()
             if row:
                 e_lb = self._converter.convert_book(row)
                 self._converter.populate_book(conn, e_lb)
                 e_lb.merge(book)
                 conn.execute(
                     sql.update(logbooks).where(
                         logbooks.c.uuid == e_lb.uuid).values(
                             e_lb.to_dict()))
                 for fd in book:
                     e_fd = e_lb.find(fd.uuid)
                     if e_fd is None:
                         e_lb.add(fd)
                         self._insert_flow_details(conn, fd, e_lb.uuid)
                     else:
                         self._update_flow_details(conn, fd, e_fd)
                 return e_lb
             else:
                 conn.execute(sql.insert(logbooks, book.to_dict()))
                 for fd in book:
                     self._insert_flow_details(conn, fd, book.uuid)
                 return book
     except sa_exc.DBAPIError as e:
         raise exc.StorageFailure(
             "Failed saving logbook"
             " '%s'" % book.uuid, e)
コード例 #2
0
ファイル: impl_sqlalchemy.py プロジェクト: hzrandd/taskflow
 def _destroy_logbook(self, session, lb_id):
     try:
         lb = _logbook_get_model(lb_id, session=session)
         session.delete(lb)
     except sa_exc.DBAPIError as e:
         LOG.exception('Failed destroying logbook')
         raise exc.StorageFailure("Failed destroying logbook %s" % lb_id, e)
コード例 #3
0
 def clear_all(self):
     try:
         logbooks = self._tables.logbooks
         with self._engine.begin() as conn:
             conn.execute(logbooks.delete())
     except sa_exc.DBAPIError as e:
         raise exc.StorageFailure("Failed clearing all entries", e)
コード例 #4
0
 def get_logbooks(self):
     try:
         books = list(self._get_logbooks())
     except EnvironmentError as e:
         raise exc.StorageFailure("Unable to fetch logbooks", e)
     else:
         for b in books:
             yield b
コード例 #5
0
ファイル: impl_zookeeper.py プロジェクト: hzrandd/taskflow
 def validate(self):
     with self._exc_wrapper():
         try:
             k_utils.check_compatible(self._client, MIN_ZK_VERSION)
         except exc.IncompatibleVersion as e:
             raise exc.StorageFailure(
                 "Backend storage is not a"
                 " compatible version", e)
コード例 #6
0
ファイル: impl_zookeeper.py プロジェクト: hzrandd/taskflow
 def close(self):
     self._validated = False
     if not self._owned:
         return
     try:
         k_utils.finalize_client(self._client)
     except (k_exc.KazooException, k_exc.ZookeeperError) as e:
         raise exc.StorageFailure("Unable to finalize client", e)
コード例 #7
0
 def _step_create():
     for path in (self._book_path, self._flow_path, self._atom_path):
         try:
             misc.ensure_tree(path)
         except EnvironmentError as e:
             raise exc.StorageFailure(
                 "Unable to create logbooks"
                 " required child path %s" % path, e)
コード例 #8
0
ファイル: impl_dir.py プロジェクト: suneelb/taskflow
def _storagefailure_wrapper():
    try:
        yield
    except exc.TaskFlowException:
        raise
    except Exception as e:
        if isinstance(e, (IOError, OSError)) and e.errno == errno.ENOENT:
            raise exc.NotFound('Item not found: %s' % e.filename, e)
        raise exc.StorageFailure("Storage backend internal error", e)
コード例 #9
0
 def _serialize(obj):
     if isinstance(obj, models.LogBook):
         return obj.to_dict(marshal_time=True)
     elif isinstance(obj, models.FlowDetail):
         return obj.to_dict()
     elif isinstance(obj, models.AtomDetail):
         return base._format_atom(obj)
     else:
         raise exc.StorageFailure("Invalid storage class %s" % type(obj))
コード例 #10
0
ファイル: impl_sqlalchemy.py プロジェクト: hzrandd/taskflow
 def _clear_all(self, session):
     # NOTE(harlowja): due to how we have our relationship setup and
     # cascading deletes are enabled, this will cause all associated
     # task details and flow details to automatically be purged.
     try:
         return session.query(models.LogBook).delete()
     except sa_exc.DBAPIError as e:
         LOG.exception('Failed clearing all entries')
         raise exc.StorageFailure("Failed clearing all entries", e)
コード例 #11
0
ファイル: impl_sqlalchemy.py プロジェクト: hzrandd/taskflow
 def get_logbook(self, book_uuid):
     session = self._make_session()
     try:
         lb = _logbook_get_model(book_uuid, session=session)
         return _convert_lb_to_external(lb)
     except sa_exc.DBAPIError as e:
         LOG.exception('Failed getting logbook')
         raise exc.StorageFailure("Failed getting logbook %s" % book_uuid,
                                  e)
コード例 #12
0
ファイル: impl_zookeeper.py プロジェクト: suneelb/taskflow
    def _exc_wrapper(self):
        """Exception context-manager which wraps kazoo exceptions.

        This is used to capture and wrap any kazoo specific exceptions and
        then group them into corresponding taskflow exceptions (not doing
        that would expose the underlying kazoo exception model).
        """
        try:
            yield
        except self._client.handler.timeout_exception as e:
            raise exc.StorageFailure("Storage backend timeout", e)
        except k_exc.SessionExpiredError as e:
            raise exc.StorageFailure("Storage backend session has expired", e)
        except k_exc.NoNodeError as e:
            raise exc.NotFound("Storage backend node not found: %s" % e)
        except k_exc.NodeExistsError as e:
            raise exc.Duplicate("Storage backend duplicate node: %s" % e)
        except (k_exc.KazooException, k_exc.ZookeeperError) as e:
            raise exc.StorageFailure("Storage backend internal error", e)
コード例 #13
0
 def _deserialize(cls, data):
     if issubclass(cls, models.LogBook):
         return cls.from_dict(data, unmarshal_time=True)
     elif issubclass(cls, models.FlowDetail):
         return cls.from_dict(data)
     elif issubclass(cls, models.AtomDetail):
         atom_class = models.atom_detail_class(data['type'])
         return atom_class.from_dict(data['atom'])
     else:
         raise exc.StorageFailure("Invalid storage class %s" % cls)
コード例 #14
0
 def _get_obj_path(self, obj):
     if isinstance(obj, models.LogBook):
         path = self.book_path
     elif isinstance(obj, models.FlowDetail):
         path = self.flow_path
     elif isinstance(obj, models.AtomDetail):
         path = self.atom_path
     else:
         raise exc.StorageFailure("Invalid storage class %s" % type(obj))
     return self._join_path(path, obj.uuid)
コード例 #15
0
ファイル: impl_sqlalchemy.py プロジェクト: hzrandd/taskflow
 def get_logbooks(self):
     session = self._make_session()
     try:
         raw_books = session.query(models.LogBook).all()
         books = [_convert_lb_to_external(lb) for lb in raw_books]
     except sa_exc.DBAPIError as e:
         LOG.exception('Failed getting logbooks')
         raise exc.StorageFailure("Failed getting logbooks", e)
     for lb in books:
         yield lb
コード例 #16
0
 def _destroy_atoms(atom_details):
     for atom_detail in atom_details:
         atom_path = os.path.join(self._atom_path, atom_detail.uuid)
         try:
             shutil.rmtree(atom_path)
         except EnvironmentError as e:
             if e.errno != errno.ENOENT:
                 raise exc.StorageFailure(
                     "Unable to remove atom"
                     " directory %s" % atom_path, e)
コード例 #17
0
 def get_atoms_for_flow(self, fd_uuid):
     gathered = []
     try:
         with contextlib.closing(self._engine.connect()) as conn:
             for ad in self._converter.atom_query_iter(conn, fd_uuid):
                 gathered.append(ad)
     except sa_exc.DBAPIError as e:
         raise exc.StorageFailure("Failed getting atom details", e)
     for atom_details in gathered:
         yield atom_details
コード例 #18
0
 def _destroy_book():
     book = self._get_logbook(book_uuid)
     book_path = os.path.join(self._book_path, book.uuid)
     self._run_with_process_lock("flow", _destroy_flows, list(book))
     try:
         shutil.rmtree(book_path)
     except EnvironmentError as e:
         if e.errno != errno.ENOENT:
             raise exc.StorageFailure(
                 "Unable to remove book"
                 " directory %s" % book_path, e)
コード例 #19
0
 def _run_with_process_lock(self, lock_name, functor, *args, **kwargs):
     lock_path = os.path.join(self.backend.lock_path, lock_name)
     with lock_utils.InterProcessLock(lock_path):
         try:
             return functor(*args, **kwargs)
         except exc.TaskFlowException:
             raise
         except Exception as e:
             LOG.exception("Failed running locking file based session")
             # NOTE(harlowja): trap all other errors as storage errors.
             raise exc.StorageFailure("Storage backend internal error", e)
コード例 #20
0
 def _destroy_flows(flow_details):
     for flow_detail in flow_details:
         flow_path = os.path.join(self._flow_path, flow_detail.uuid)
         self._run_with_process_lock("atom", _destroy_atoms,
                                     list(flow_detail))
         try:
             shutil.rmtree(flow_path)
         except EnvironmentError as e:
             if e.errno != errno.ENOENT:
                 raise exc.StorageFailure(
                     "Unable to remove flow"
                     " directory %s" % flow_path, e)
コード例 #21
0
 def get_flows_for_book(self, book_uuid, lazy=False):
     gathered = []
     try:
         with contextlib.closing(self._engine.connect()) as conn:
             for fd in self._converter.flow_query_iter(conn, book_uuid):
                 if not lazy:
                     self._converter.populate_flow_detail(conn, fd)
                 gathered.append(fd)
     except sa_exc.DBAPIError as e:
         raise exc.StorageFailure("Failed getting flow details", e)
     for flow_details in gathered:
         yield flow_details
コード例 #22
0
 def _memory_lock(self, write=False):
     if write:
         lock = self.backend.lock.write_lock
     else:
         lock = self.backend.lock.read_lock
     with lock():
         try:
             yield
         except exc.TaskFlowException as e:
             raise
         except Exception as e:
             raise exc.StorageFailure("Storage backend internal error", e)
コード例 #23
0
ファイル: impl_sqlalchemy.py プロジェクト: hzrandd/taskflow
 def _save_logbook(self, session, lb):
     try:
         lb_m = _logbook_get_model(lb.uuid, session=session)
         lb_m = _logbook_merge(lb_m, lb)
     except exc.NotFound:
         lb_m = _convert_lb_to_internal(lb)
     try:
         lb_m = session.merge(lb_m)
         return _convert_lb_to_external(lb_m)
     except sa_exc.DBAPIError as e:
         LOG.exception('Failed saving logbook')
         raise exc.StorageFailure("Failed saving logbook %s" % lb.uuid, e)
コード例 #24
0
 def destroy_logbook(self, book_uuid):
     try:
         logbooks = self._tables.logbooks
         with self._engine.begin() as conn:
             q = logbooks.delete().where(logbooks.c.uuid == book_uuid)
             r = conn.execute(q)
             if r.rowcount == 0:
                 raise exc.NotFound("No logbook found with"
                                    " uuid '%s'" % book_uuid)
     except sa_exc.DBAPIError as e:
         raise exc.StorageFailure(
             "Failed destroying"
             " logbook '%s'" % book_uuid, e)
コード例 #25
0
ファイル: impl_sqlalchemy.py プロジェクト: hzrandd/taskflow
def _atomdetails_merge(ad_m, ad):
    atom_type = logbook.atom_detail_type(ad)
    if atom_type != ad_m.atom_type:
        raise exc.StorageFailure("Can not merge differing atom types "
                                 "(%s != %s)" % (atom_type, ad_m.atom_type))
    ad_d = ad.to_dict()
    ad_m.state = ad_d['state']
    ad_m.intention = ad_d['intention']
    ad_m.results = ad_d['results']
    ad_m.version = ad_d['version']
    ad_m.failure = ad_d['failure']
    ad_m.meta = ad_d['meta']
    ad_m.name = ad_d['name']
    return ad_m
コード例 #26
0
 def get_logbooks(self, lazy=False):
     gathered = []
     try:
         with contextlib.closing(self._engine.connect()) as conn:
             q = sql.select([self._tables.logbooks])
             for row in conn.execute(q):
                 book = self._converter.convert_book(row)
                 if not lazy:
                     self._converter.populate_book(conn, book)
                 gathered.append(book)
     except sa_exc.DBAPIError as e:
         raise exc.StorageFailure("Failed getting logbooks", e)
     for book in gathered:
         yield book
コード例 #27
0
 def test_record_ending_exception(self, mocked_warn):
     with contextlib.closing(impl_memory.MemoryBackend()) as be:
         flow = lf.Flow("test")
         flow.add(test_utils.TaskNoRequiresNoReturns("test-1"))
         (lb, fd) = persistence_utils.temporary_flow_detail(be)
         e = self._make_engine(flow, fd, be)
         timing_listener = timing.TimingListener(e)
         with mock.patch.object(timing_listener._engine.storage,
                                'update_atom_metadata') as mocked_uam:
             mocked_uam.side_effect = exc.StorageFailure('Woot!')
             with timing_listener:
                 e.run()
     mocked_warn.assert_called_once_with(mock.ANY, mock.ANY, 'test-1',
                                         exc_info=True)
コード例 #28
0
 def upgrade(self):
     try:
         with contextlib.closing(self._engine.connect()) as conn:
             # NOTE(imelnikov): Alembic does not support SQLite,
             # and we don't recommend to use SQLite in production
             # deployments, so migrations are rarely needed
             # for SQLite. So we don't bother about working around
             # SQLite limitations, and create the database directly from
             # the tables when it is in use...
             if 'sqlite' in self._engine.url.drivername:
                 self._metadata.create_all(bind=conn)
             else:
                 migration.db_sync(conn)
     except sa_exc.SQLAlchemyError as e:
         raise exc.StorageFailure("Failed upgrading database version", e)
コード例 #29
0
ファイル: storage.py プロジェクト: suneelb/taskflow
 def save_retry_failure(self, retry_name, failed_atom_name, failure):
     """Save subflow failure to retry controller history."""
     ad = self._atomdetail_by_name(retry_name,
                                   expected_type=logbook.RetryDetail)
     try:
         failures = ad.last_failures
     except exceptions.NotFound as e:
         raise exceptions.StorageFailure(
             "Unable to fetch most recent"
             " retry failures so new retry"
             " failure can be inserted", e)
     else:
         if failed_atom_name not in failures:
             failures[failed_atom_name] = failure
             self._with_connection(self._save_atom_detail, ad)
コード例 #30
0
 def get_atom_details(self, ad_uuid):
     try:
         atomdetails = self._tables.atomdetails
         with self._engine.begin() as conn:
             q = (sql.select([atomdetails
                              ]).where(atomdetails.c.uuid == ad_uuid))
             row = conn.execute(q).first()
             if not row:
                 raise exc.NotFound("No atom details found with uuid"
                                    " '%s'" % ad_uuid)
             return self._converter.convert_atom_detail(row)
     except sa_exc.SQLAlchemyError as e:
         raise exc.StorageFailure(
             "Failed getting atom details with"
             " uuid '%s'" % ad_uuid, e)