def save_logbook(self, book):
     try:
         logbooks = self._tables.logbooks
         with self._engine.begin() as conn:
             q = (sql.select([logbooks
                              ]).where(logbooks.c.uuid == book.uuid))
             row = conn.execute(q).first()
             if row:
                 e_lb = self._converter.convert_book(row)
                 self._converter.populate_book(conn, e_lb)
                 e_lb.merge(book)
                 conn.execute(
                     sql.update(logbooks).where(
                         logbooks.c.uuid == e_lb.uuid).values(
                             e_lb.to_dict()))
                 for fd in book:
                     e_fd = e_lb.find(fd.uuid)
                     if e_fd is None:
                         e_lb.add(fd)
                         self._insert_flow_details(conn, fd, e_lb.uuid)
                     else:
                         self._update_flow_details(conn, fd, e_fd)
                 return e_lb
             else:
                 conn.execute(sql.insert(logbooks, book.to_dict()))
                 for fd in book:
                     self._insert_flow_details(conn, fd, book.uuid)
                 return book
     except sa_exc.DBAPIError:
         exc.raise_with_cause(exc.StorageFailure,
                              "Failed saving logbook '%s'" % book.uuid)
Esempio n. 2
0
 def clear_all(self):
     try:
         logbooks = self._tables.logbooks
         with self._engine.begin() as conn:
             conn.execute(logbooks.delete())
     except sa_exc.DBAPIError:
         exc.raise_with_cause(exc.StorageFailure, "Failed clearing all entries")
Esempio n. 3
0
 def fetch(self, name, many_handler=None):
     """Fetch a named result."""
     def _many_handler(values):
         # By default we just return the first of many (unless provided
         # a different callback that can translate many results into
         # something more meaningful).
         return values[0]
     if many_handler is None:
         many_handler = _many_handler
     try:
         providers = self._reverse_mapping[name]
     except KeyError:
         exceptions.raise_with_cause(exceptions.NotFound,
                                     "Name %r is not mapped as a produced"
                                     " output by any providers" % name)
     values = []
     for provider in providers:
         if provider.name is _TRANSIENT_PROVIDER:
             values.append(_item_from_single(provider,
                                             self._transients, name))
         else:
             try:
                 container = self._get(provider.name, only_last=True)
             except exceptions.NotFound:
                 pass
             else:
                 values.append(_item_from_single(provider,
                                                 container, name))
     if not values:
         raise exceptions.NotFound("Unable to find result %r,"
                                   " searched %s" % (name, providers))
     else:
         return many_handler(values)
Esempio n. 4
0
 def save_logbook(self, book):
     try:
         logbooks = self._tables.logbooks
         with self._engine.begin() as conn:
             q = (sql.select([logbooks]).
                  where(logbooks.c.uuid == book.uuid))
             row = conn.execute(q).first()
             if row:
                 e_lb = self._converter.convert_book(row)
                 self._converter.populate_book(conn, e_lb)
                 e_lb.merge(book)
                 conn.execute(sql.update(logbooks)
                              .where(logbooks.c.uuid == e_lb.uuid)
                              .values(e_lb.to_dict()))
                 for fd in book:
                     e_fd = e_lb.find(fd.uuid)
                     if e_fd is None:
                         e_lb.add(fd)
                         self._insert_flow_details(conn, fd, e_lb.uuid)
                     else:
                         self._update_flow_details(conn, fd, e_fd)
                 return e_lb
             else:
                 conn.execute(sql.insert(logbooks, book.to_dict()))
                 for fd in book:
                     self._insert_flow_details(conn, fd, book.uuid)
                 return book
     except sa_exc.DBAPIError:
         exc.raise_with_cause(
             exc.StorageFailure,
             "Failed saving logbook '%s'" % book.uuid)
Esempio n. 5
0
 def _get_script(self, name):
     try:
         return self._scripts[name]
     except KeyError:
         exc.raise_with_cause(exc.NotFound,
                              "Can not access %s script (has this"
                              " board been connected?)" % name)
 def clear_all(self):
     try:
         logbooks = self._tables.logbooks
         with self._engine.begin() as conn:
             conn.execute(logbooks.delete())
     except sa_exc.DBAPIError:
         exc.raise_with_cause(exc.StorageFailure,
                              "Failed clearing all entries")
Esempio n. 7
0
 def validate(self):
     with self._exc_wrapper():
         try:
             if self._conf.get('check_compatible', True):
                 k_utils.check_compatible(self._client, MIN_ZK_VERSION)
         except exc.IncompatibleVersion:
             exc.raise_with_cause(exc.StorageFailure, "Backend storage is"
                                  " not a compatible version")
Esempio n. 8
0
 def close(self):
     self._validated = False
     if not self._owned:
         return
     try:
         k_utils.finalize_client(self._client)
     except (k_exc.KazooException, k_exc.ZookeeperError):
         exc.raise_with_cause(exc.StorageFailure,
                              "Unable to finalize client")
 def close(self):
     self._validated = False
     if not self._owned:
         return
     try:
         k_utils.finalize_client(self._client)
     except (k_exc.KazooException, k_exc.ZookeeperError):
         exc.raise_with_cause(exc.StorageFailure,
                              "Unable to finalize client")
Esempio n. 10
0
 def validate(self):
     with self._exc_wrapper():
         try:
             if self._conf.get('check_compatible', True):
                 k_utils.check_compatible(self._client, MIN_ZK_VERSION)
         except exc.IncompatibleVersion:
             exc.raise_with_cause(
                 exc.StorageFailure, "Backend storage is"
                 " not a compatible version")
Esempio n. 11
0
def _storagefailure_wrapper():
    try:
        yield
    except exc.TaskFlowException:
        raise
    except Exception as e:
        if isinstance(e, (IOError, OSError)) and e.errno == errno.ENOENT:
            exc.raise_with_cause(exc.NotFound, "Item not found: %s" % e.filename, cause=e)
        else:
            exc.raise_with_cause(exc.StorageFailure, "Storage backend internal error", cause=e)
Esempio n. 12
0
 def destroy_logbook(self, book_uuid):
     try:
         logbooks = self._tables.logbooks
         with self._engine.begin() as conn:
             q = logbooks.delete().where(logbooks.c.uuid == book_uuid)
             r = conn.execute(q)
             if r.rowcount == 0:
                 raise exc.NotFound("No logbook found with" " uuid '%s'" % book_uuid)
     except sa_exc.DBAPIError:
         exc.raise_with_cause(exc.StorageFailure, "Failed destroying logbook '%s'" % book_uuid)
Esempio n. 13
0
 def _dumps(obj):
     try:
         return msgpackutils.dumps(obj)
     except (msgpack.PackException, ValueError):
         # TODO(harlowja): remove direct msgpack exception access when
         # oslo.utils provides easy access to the underlying msgpack
         # pack/unpack exceptions..
         exc.raise_with_cause(exc.JobFailure,
                              "Failed to serialize object to"
                              " msgpack blob")
Esempio n. 14
0
 def _unclaimable_try_find_owner(cause):
     try:
         owner = self.find_owner(job)
     except Exception:
         owner = None
     if owner:
         message = "Job %s already claimed by '%s'" % (job.uuid, owner)
     else:
         message = "Job %s already claimed" % (job.uuid)
     excp.raise_with_cause(excp.UnclaimableJob, message, cause=cause)
Esempio n. 15
0
 def get_atoms_for_flow(self, fd_uuid):
     gathered = []
     try:
         with contextlib.closing(self._engine.connect()) as conn:
             for ad in self._converter.atom_query_iter(conn, fd_uuid):
                 gathered.append(ad)
     except sa_exc.DBAPIError:
         exc.raise_with_cause(exc.StorageFailure, "Failed getting atom details in flow" " detail '%s'" % fd_uuid)
     for atom_details in gathered:
         yield atom_details
Esempio n. 16
0
def _item_from_single(provider, container, looking_for):
    """Returns item from a *single* provider."""
    try:
        return _item_from(container, provider.index)
    except _EXTRACTION_EXCEPTIONS:
        exceptions.raise_with_cause(
            exceptions.NotFound,
            "Unable to find result %r, expected to be able to find it"
            " created by %s but was unable to perform successful"
            " extraction" % (looking_for, provider))
Esempio n. 17
0
 def _loads(blob, root_types=(dict,)):
     try:
         return misc.decode_msgpack(blob, root_types=root_types)
     except (msgpack.UnpackException, ValueError):
         # TODO(harlowja): remove direct msgpack exception access when
         # oslo.utils provides easy access to the underlying msgpack
         # pack/unpack exceptions..
         exc.raise_with_cause(exc.JobFailure,
                              "Failed to deserialize object from"
                              " msgpack blob (of length %s)" % len(blob))
Esempio n. 18
0
def _item_from_single(provider, container, looking_for):
    """Returns item from a *single* provider."""
    try:
        return _item_from(container, provider.index)
    except _EXTRACTION_EXCEPTIONS:
        exceptions.raise_with_cause(
            exceptions.NotFound,
            "Unable to find result %r, expected to be able to find it"
            " created by %s but was unable to perform successful"
            " extraction" % (looking_for, provider))
Esempio n. 19
0
 def _get_results(looking_for, provider):
     """Gets the results saved for a given provider."""
     try:
         return self._get(provider.name, only_last=True)
     except exceptions.NotFound:
         exceptions.raise_with_cause(exceptions.NotFound,
                                     "Expected to be able to find"
                                     " output %r produced by %s but was"
                                     " unable to get at that providers"
                                     " results" % (looking_for,
                                                   provider))
Esempio n. 20
0
 def get_atom_details(self, ad_uuid):
     try:
         atomdetails = self._tables.atomdetails
         with self._engine.begin() as conn:
             q = sql.select([atomdetails]).where(atomdetails.c.uuid == ad_uuid)
             row = conn.execute(q).first()
             if not row:
                 raise exc.NotFound("No atom details found with uuid" " '%s'" % ad_uuid)
             return self._converter.convert_atom_detail(row)
     except sa_exc.SQLAlchemyError:
         exc.raise_with_cause(exc.StorageFailure, "Failed getting atom details with" " uuid '%s'" % ad_uuid)
Esempio n. 21
0
 def _get_results(looking_for, provider):
     """Gets the results saved for a given provider."""
     try:
         return self._get(provider.name, 'last_results', 'failure',
                          _EXECUTE_STATES_WITH_RESULTS, states.EXECUTE)
     except exceptions.NotFound:
         exceptions.raise_with_cause(
             exceptions.NotFound, "Expected to be able to find"
             " output %r produced by %s but was"
             " unable to get at that providers"
             " results" % (looking_for, provider))
Esempio n. 22
0
 def _unclaimable_try_find_owner(cause):
     try:
         owner = self.find_owner(job)
     except Exception:
         owner = None
     if owner:
         message = "Job %s already claimed by '%s'" % (job.uuid, owner)
     else:
         message = "Job %s already claimed" % (job.uuid)
     excp.raise_with_cause(excp.UnclaimableJob,
                           message, cause=cause)
Esempio n. 23
0
 def destroy_logbook(self, book_uuid):
     try:
         logbooks = self._tables.logbooks
         with self._engine.begin() as conn:
             q = logbooks.delete().where(logbooks.c.uuid == book_uuid)
             r = conn.execute(q)
             if r.rowcount == 0:
                 raise exc.NotFound("No logbook found with"
                                    " uuid '%s'" % book_uuid)
     except sa_exc.DBAPIError:
         exc.raise_with_cause(exc.StorageFailure,
                              "Failed destroying logbook '%s'" % book_uuid)
Esempio n. 24
0
 def get_atoms_for_flow(self, fd_uuid):
     gathered = []
     try:
         with contextlib.closing(self._engine.connect()) as conn:
             for ad in self._converter.atom_query_iter(conn, fd_uuid):
                 gathered.append(ad)
     except sa_exc.DBAPIError:
         exc.raise_with_cause(
             exc.StorageFailure, "Failed getting atom details in flow"
             " detail '%s'" % fd_uuid)
     for atom_details in gathered:
         yield atom_details
Esempio n. 25
0
 def get_flows_for_book(self, book_uuid, lazy=False):
     gathered = []
     try:
         with contextlib.closing(self._engine.connect()) as conn:
             for fd in self._converter.flow_query_iter(conn, book_uuid):
                 if not lazy:
                     self._converter.populate_flow_detail(conn, fd)
                 gathered.append(fd)
     except sa_exc.DBAPIError:
         exc.raise_with_cause(exc.StorageFailure, "Failed getting flow details in" " logbook '%s'" % book_uuid)
     for flow_details in gathered:
         yield flow_details
Esempio n. 26
0
 def _memory_lock(self, write=False):
     if write:
         lock = self.backend.lock.write_lock
     else:
         lock = self.backend.lock.read_lock
     with lock():
         try:
             yield
         except exc.TaskFlowException:
             raise
         except Exception:
             exc.raise_with_cause(exc.StorageFailure,
                                  "Storage backend internal error")
Esempio n. 27
0
 def test_raise_with(self):
     capture = None
     try:
         raise IOError('broken')
     except Exception:
         try:
             exc.raise_with_cause(exc.TaskFlowException, 'broken')
         except Exception as e:
             capture = e
     self.assertIsNotNone(capture)
     self.assertIsInstance(capture, exc.TaskFlowException)
     self.assertIsNotNone(capture.cause)
     self.assertIsInstance(capture.cause, IOError)
Esempio n. 28
0
 def test_raise_with(self):
     capture = None
     try:
         raise IOError('broken')
     except Exception:
         try:
             exc.raise_with_cause(exc.TaskFlowException, 'broken')
         except Exception as e:
             capture = e
     self.assertIsNotNone(capture)
     self.assertIsInstance(capture, exc.TaskFlowException)
     self.assertIsNotNone(capture.cause)
     self.assertIsInstance(capture.cause, IOError)
Esempio n. 29
0
 def _get_results(looking_for, provider):
     """Gets the results saved for a given provider."""
     try:
         return self._get(provider.name, 'last_results', 'failure',
                          _EXECUTE_STATES_WITH_RESULTS,
                          states.EXECUTE)
     except exceptions.NotFound:
         exceptions.raise_with_cause(exceptions.NotFound,
                                     "Expected to be able to find"
                                     " output %r produced by %s but was"
                                     " unable to get at that providers"
                                     " results" % (looking_for,
                                                   provider))
Esempio n. 30
0
 def _memory_lock(self, write=False):
     if write:
         lock = self.backend.lock.write_lock
     else:
         lock = self.backend.lock.read_lock
     with lock():
         try:
             yield
         except exc.TaskFlowException:
             raise
         except Exception:
             exc.raise_with_cause(exc.StorageFailure,
                                  "Storage backend internal error")
Esempio n. 31
0
    def claim(self, job, who):
        def _unclaimable_try_find_owner(cause):
            try:
                owner = self.find_owner(job)
            except Exception:
                owner = None
            if owner:
                message = "Job %s already claimed by '%s'" % (job.uuid, owner)
            else:
                message = "Job %s already claimed" % (job.uuid)
            excp.raise_with_cause(excp.UnclaimableJob, message, cause=cause)

        with self._wrap(job.uuid,
                        job.path,
                        fail_msg_tpl="Claiming failure: %s"):
            # NOTE(harlowja): post as json which will allow for future changes
            # more easily than a raw string/text.
            value = jsonutils.dumps({
                'owner': who,
            })
            # Ensure the target job is still existent (at the right version).
            job_data, job_stat = self._client.get(job.path)
            txn = self._client.transaction()
            # This will abort (and not create the lock) if the job has been
            # removed (somehow...) or updated by someone else to a different
            # version...
            txn.check(job.path, version=job_stat.version)
            txn.create(job.lock_path,
                       value=misc.binary_encode(value),
                       ephemeral=True)
            try:
                kazoo_utils.checked_commit(txn)
            except k_exceptions.NodeExistsError as e:
                _unclaimable_try_find_owner(e)
            except kazoo_utils.KazooTransactionException as e:
                if len(e.failures) < 2:
                    raise
                else:
                    if isinstance(e.failures[0], k_exceptions.NoNodeError):
                        excp.raise_with_cause(
                            excp.NotFound,
                            "Job %s not found to be claimed" % job.uuid,
                            cause=e.failures[0])
                    if isinstance(e.failures[1], k_exceptions.NodeExistsError):
                        _unclaimable_try_find_owner(e.failures[1])
                    else:
                        excp.raise_with_cause(
                            excp.UnclaimableJob,
                            "Job %s claim failed due to transaction"
                            " not succeeding" % (job.uuid),
                            cause=e)
Esempio n. 32
0
 def get_flows_for_book(self, book_uuid, lazy=False):
     gathered = []
     try:
         with contextlib.closing(self._engine.connect()) as conn:
             for fd in self._converter.flow_query_iter(conn, book_uuid):
                 if not lazy:
                     self._converter.populate_flow_detail(conn, fd)
                 gathered.append(fd)
     except sa_exc.DBAPIError:
         exc.raise_with_cause(
             exc.StorageFailure, "Failed getting flow details in"
             " logbook '%s'" % book_uuid)
     for flow_details in gathered:
         yield flow_details
Esempio n. 33
0
 def get_logbooks(self, lazy=False):
     gathered = []
     try:
         with contextlib.closing(self._engine.connect()) as conn:
             q = sql.select([self._tables.logbooks])
             for row in conn.execute(q):
                 book = self._converter.convert_book(row)
                 if not lazy:
                     self._converter.populate_book(conn, book)
                 gathered.append(book)
     except sa_exc.DBAPIError:
         exc.raise_with_cause(exc.StorageFailure, "Failed getting logbooks")
     for book in gathered:
         yield book
Esempio n. 34
0
    def last_failures(self):
        """The last failure dictionary that was produced.

        NOTE(harlowja): This is **not** the same as the
        local ``failure`` attribute as the obtained failure dictionary in
        the ``results`` attribute (which is what this returns) is from
        associated atom failures (which is different from the directly
        related failure of the retry unit associated with this
        atom detail).
        """
        try:
            return self.results[-1][1]
        except IndexError:
            exc.raise_with_cause(exc.NotFound, "Last failures not found")
Esempio n. 35
0
 def get_flow_details(self, fd_uuid, lazy=False):
     try:
         flowdetails = self._tables.flowdetails
         with self._engine.begin() as conn:
             q = sql.select([flowdetails]).where(flowdetails.c.uuid == fd_uuid)
             row = conn.execute(q).first()
             if not row:
                 raise exc.NotFound("No flow details found with uuid" " '%s'" % fd_uuid)
             fd = self._converter.convert_flow_detail(row)
             if not lazy:
                 self._converter.populate_flow_detail(conn, fd)
             return fd
     except sa_exc.SQLAlchemyError:
         exc.raise_with_cause(exc.StorageFailure, "Failed getting flow details with" " uuid '%s'" % fd_uuid)
Esempio n. 36
0
def _storagefailure_wrapper():
    try:
        yield
    except exc.TaskFlowException:
        raise
    except Exception as e:
        if isinstance(e, (IOError, OSError)) and e.errno == errno.ENOENT:
            exc.raise_with_cause(exc.NotFound,
                                 'Item not found: %s' % e.filename,
                                 cause=e)
        else:
            exc.raise_with_cause(exc.StorageFailure,
                                 "Storage backend internal error",
                                 cause=e)
Esempio n. 37
0
 def _get_node_attr(self, path, attr_name, trans_func=None):
     try:
         _data, node_stat = self._client.get(path)
         attr = getattr(node_stat, attr_name)
         if trans_func is not None:
             return trans_func(attr)
         else:
             return attr
     except k_exceptions.NoNodeError:
         excp.raise_with_cause(
             excp.NotFound, "Can not fetch the %r attribute of job %s (%s),"
             " path %s not found" % (attr_name, self.uuid, self.path, path))
     except self._client.handler.timeout_exception:
         excp.raise_with_cause(
             excp.JobFailure,
             "Can not fetch the %r attribute of job %s (%s),"
             " operation timed out" % (attr_name, self.uuid, self.path))
     except k_exceptions.SessionExpiredError:
         excp.raise_with_cause(
             excp.JobFailure,
             "Can not fetch the %r attribute of job %s (%s),"
             " session expired" % (attr_name, self.uuid, self.path))
     except (AttributeError, k_exceptions.KazooException):
         excp.raise_with_cause(
             excp.JobFailure,
             "Can not fetch the %r attribute of job %s (%s),"
             " internal error" % (attr_name, self.uuid, self.path))
Esempio n. 38
0
    def last_failures(self):
        """The last failure dictionary that was produced.

        NOTE(harlowja): This is **not** the same as the
        local ``failure`` attribute as the obtained failure dictionary in
        the ``results`` attribute (which is what this returns) is from
        associated atom failures (which is different from the directly
        related failure of the retry unit associated with this
        atom detail).
        """
        try:
            return self.results[-1][1]
        except IndexError:
            exc.raise_with_cause(exc.NotFound, "Last failures not found")
Esempio n. 39
0
 def _wrap(self,
           job_uuid,
           job_path,
           fail_msg_tpl="Failure: %s",
           ensure_known=True):
     if job_path:
         fail_msg_tpl += " (%s)" % (job_path)
     if ensure_known:
         if not job_path:
             raise ValueError("Unable to check if %r is a known path" %
                              (job_path))
         if job_path not in self._known_jobs:
             fail_msg_tpl += ", unknown job"
             raise excp.NotFound(fail_msg_tpl % (job_uuid))
     try:
         yield
     except self._client.handler.timeout_exception:
         fail_msg_tpl += ", operation timed out"
         excp.raise_with_cause(excp.JobFailure, fail_msg_tpl % (job_uuid))
     except k_exceptions.SessionExpiredError:
         fail_msg_tpl += ", session expired"
         excp.raise_with_cause(excp.JobFailure, fail_msg_tpl % (job_uuid))
     except k_exceptions.NoNodeError:
         fail_msg_tpl += ", unknown job"
         excp.raise_with_cause(excp.NotFound, fail_msg_tpl % (job_uuid))
     except k_exceptions.KazooException:
         fail_msg_tpl += ", internal error"
         excp.raise_with_cause(excp.JobFailure, fail_msg_tpl % (job_uuid))
Esempio n. 40
0
 def _wrap(self, job_uuid, job_path,
           fail_msg_tpl="Failure: %s", ensure_known=True):
     if job_path:
         fail_msg_tpl += " (%s)" % (job_path)
     if ensure_known:
         if not job_path:
             raise ValueError("Unable to check if %r is a known path"
                              % (job_path))
         if job_path not in self._known_jobs:
             fail_msg_tpl += ", unknown job"
             raise excp.NotFound(fail_msg_tpl % (job_uuid))
     try:
         yield
     except self._client.handler.timeout_exception:
         fail_msg_tpl += ", operation timed out"
         excp.raise_with_cause(excp.JobFailure, fail_msg_tpl % (job_uuid))
     except k_exceptions.SessionExpiredError:
         fail_msg_tpl += ", session expired"
         excp.raise_with_cause(excp.JobFailure, fail_msg_tpl % (job_uuid))
     except k_exceptions.NoNodeError:
         fail_msg_tpl += ", unknown job"
         excp.raise_with_cause(excp.NotFound, fail_msg_tpl % (job_uuid))
     except k_exceptions.KazooException:
         fail_msg_tpl += ", internal error"
         excp.raise_with_cause(excp.JobFailure, fail_msg_tpl % (job_uuid))
Esempio n. 41
0
 def get_logbooks(self, lazy=False):
     gathered = []
     try:
         with contextlib.closing(self._engine.connect()) as conn:
             q = sql.select([self._tables.logbooks])
             for row in conn.execute(q):
                 book = self._converter.convert_book(row)
                 if not lazy:
                     self._converter.populate_book(conn, book)
                 gathered.append(book)
     except sa_exc.DBAPIError:
         exc.raise_with_cause(exc.StorageFailure, "Failed getting logbooks")
     for book in gathered:
         yield book
Esempio n. 42
0
 def _get_node_attr(self, path, attr_name, trans_func=None):
     try:
         _data, node_stat = self._client.get(path)
         attr = getattr(node_stat, attr_name)
         if trans_func is not None:
             return trans_func(attr)
         else:
             return attr
     except k_exceptions.NoNodeError:
         excp.raise_with_cause(
             excp.NotFound,
             "Can not fetch the %r attribute of job %s (%s),"
             " path %s not found" % (attr_name, self.uuid,
                                     self.path, path))
     except self._client.handler.timeout_exception:
         excp.raise_with_cause(
             excp.JobFailure,
             "Can not fetch the %r attribute of job %s (%s),"
             " operation timed out" % (attr_name, self.uuid, self.path))
     except k_exceptions.SessionExpiredError:
         excp.raise_with_cause(
             excp.JobFailure,
             "Can not fetch the %r attribute of job %s (%s),"
             " session expired" % (attr_name, self.uuid, self.path))
     except (AttributeError, k_exceptions.KazooException):
         excp.raise_with_cause(
             excp.JobFailure,
             "Can not fetch the %r attribute of job %s (%s),"
             " internal error" % (attr_name, self.uuid, self.path))
Esempio n. 43
0
 def validate(cls, data):
     try:
         su.schema_validate(data, cls.SCHEMA)
     except su.ValidationError as e:
         cls_name = reflection.get_class_name(cls, fully_qualified=False)
         excp.raise_with_cause(excp.InvalidFormat,
                               "%s message response data not of the"
                               " expected format: %s" %
                               (cls_name, e.message),
                               cause=e)
     else:
         state = data['state']
         if state == FAILURE and 'result' in data:
             ft.Failure.validate(data['result'])
Esempio n. 44
0
 def validate(cls, data):
     try:
         su.schema_validate(data, cls.SCHEMA)
     except su.ValidationError as e:
         cls_name = reflection.get_class_name(cls, fully_qualified=False)
         excp.raise_with_cause(excp.InvalidFormat,
                               "%s message response data not of the"
                               " expected format: %s" % (cls_name,
                                                         e.message),
                               cause=e)
     else:
         state = data['state']
         if state == FAILURE and 'result' in data:
             ft.Failure.validate(data['result'])
Esempio n. 45
0
 def get_logbook(self, book_uuid, lazy=False):
     try:
         logbooks = self._tables.logbooks
         with contextlib.closing(self._engine.connect()) as conn:
             q = sql.select([logbooks]).where(logbooks.c.uuid == book_uuid)
             row = conn.execute(q).first()
             if not row:
                 raise exc.NotFound("No logbook found with" " uuid '%s'" % book_uuid)
             book = self._converter.convert_book(row)
             if not lazy:
                 self._converter.populate_book(conn, book)
             return book
     except sa_exc.DBAPIError:
         exc.raise_with_cause(exc.StorageFailure, "Failed getting logbook '%s'" % book_uuid)
Esempio n. 46
0
    def claim(self, job, who):
        def _unclaimable_try_find_owner(cause):
            try:
                owner = self.find_owner(job)
            except Exception:
                owner = None
            if owner:
                message = "Job %s already claimed by '%s'" % (job.uuid, owner)
            else:
                message = "Job %s already claimed" % (job.uuid)
            excp.raise_with_cause(excp.UnclaimableJob,
                                  message, cause=cause)

        with self._wrap(job.uuid, job.path,
                        fail_msg_tpl="Claiming failure: %s"):
            # NOTE(harlowja): post as json which will allow for future changes
            # more easily than a raw string/text.
            value = jsonutils.dumps({
                'owner': who,
            })
            # Ensure the target job is still existent (at the right version).
            job_data, job_stat = self._client.get(job.path)
            txn = self._client.transaction()
            # This will abort (and not create the lock) if the job has been
            # removed (somehow...) or updated by someone else to a different
            # version...
            txn.check(job.path, version=job_stat.version)
            txn.create(job.lock_path, value=misc.binary_encode(value),
                       ephemeral=True)
            try:
                kazoo_utils.checked_commit(txn)
            except k_exceptions.NodeExistsError as e:
                _unclaimable_try_find_owner(e)
            except kazoo_utils.KazooTransactionException as e:
                if len(e.failures) < 2:
                    raise
                else:
                    if isinstance(e.failures[0], k_exceptions.NoNodeError):
                        excp.raise_with_cause(
                            excp.NotFound,
                            "Job %s not found to be claimed" % job.uuid,
                            cause=e.failures[0])
                    if isinstance(e.failures[1], k_exceptions.NodeExistsError):
                        _unclaimable_try_find_owner(e.failures[1])
                    else:
                        excp.raise_with_cause(
                            excp.UnclaimableJob,
                            "Job %s claim failed due to transaction"
                            " not succeeding" % (job.uuid), cause=e)
Esempio n. 47
0
 def update_atom_details(self, atom_detail):
     try:
         atomdetails = self._tables.atomdetails
         with self._engine.begin() as conn:
             q = sql.select([atomdetails]).where(atomdetails.c.uuid == atom_detail.uuid)
             row = conn.execute(q).first()
             if not row:
                 raise exc.NotFound("No atom details found with uuid" " '%s'" % atom_detail.uuid)
             e_ad = self._converter.convert_atom_detail(row)
             self._update_atom_details(conn, atom_detail, e_ad)
         return e_ad
     except sa_exc.SQLAlchemyError:
         exc.raise_with_cause(
             exc.StorageFailure, "Failed updating atom details" " with uuid '%s'" % atom_detail.uuid
         )
Esempio n. 48
0
 def get_atom_details(self, ad_uuid):
     try:
         atomdetails = self._tables.atomdetails
         with self._engine.begin() as conn:
             q = (sql.select([atomdetails
                              ]).where(atomdetails.c.uuid == ad_uuid))
             row = conn.execute(q).first()
             if not row:
                 raise exc.NotFound("No atom details found with uuid"
                                    " '%s'" % ad_uuid)
             return self._converter.convert_atom_detail(row)
     except sa_exc.SQLAlchemyError:
         exc.raise_with_cause(
             exc.StorageFailure, "Failed getting atom details with"
             " uuid '%s'" % ad_uuid)
Esempio n. 49
0
 def save_retry_failure(self, retry_name, failed_atom_name, failure):
     """Save subflow failure to retry controller history."""
     source, clone = self._atomdetail_by_name(
         retry_name, expected_type=models.RetryDetail, clone=True)
     try:
         failures = clone.last_failures
     except exceptions.NotFound:
         exceptions.raise_with_cause(
             exceptions.StorageFailure, "Unable to fetch most recent retry"
             " failures so new retry failure can"
             " be inserted")
     else:
         if failed_atom_name not in failures:
             failures[failed_atom_name] = failure
             self._with_connection(self._save_atom_detail, source, clone)
Esempio n. 50
0
 def upgrade(self):
     try:
         with contextlib.closing(self._engine.connect()) as conn:
             # NOTE(imelnikov): Alembic does not support SQLite,
             # and we don't recommend to use SQLite in production
             # deployments, so migrations are rarely needed
             # for SQLite. So we don't bother about working around
             # SQLite limitations, and create the database directly from
             # the tables when it is in use...
             if "sqlite" in self._engine.url.drivername:
                 self._metadata.create_all(bind=conn)
             else:
                 migration.db_sync(conn)
     except sa_exc.SQLAlchemyError:
         exc.raise_with_cause(exc.StorageFailure, "Failed upgrading database version")
Esempio n. 51
0
 def save_retry_failure(self, retry_name, failed_atom_name, failure):
     """Save subflow failure to retry controller history."""
     source, clone = self._atomdetail_by_name(
         retry_name, expected_type=models.RetryDetail, clone=True)
     try:
         failures = clone.last_failures
     except exceptions.NotFound:
         exceptions.raise_with_cause(exceptions.StorageFailure,
                                     "Unable to fetch most recent retry"
                                     " failures so new retry failure can"
                                     " be inserted")
     else:
         if failed_atom_name not in failures:
             failures[failed_atom_name] = failure
             self._with_connection(self._save_atom_detail, source, clone)
Esempio n. 52
0
    def connect(self, timeout=10.0):

        def try_clean():
            # Attempt to do the needed cleanup if post-connection setup does
            # not succeed (maybe the connection is lost right after it is
            # obtained).
            try:
                self.close()
            except k_exceptions.KazooException:
                LOG.exception("Failed cleaning-up after post-connection"
                              " initialization failed")

        try:
            if timeout is not None:
                timeout = float(timeout)
            self._client.start(timeout=timeout)
            self._closing = False
        except (self._client.handler.timeout_exception,
                k_exceptions.KazooException):
            excp.raise_with_cause(excp.JobFailure,
                                  "Failed to connect to zookeeper")
        try:
            if self._conf.get('check_compatible', True):
                kazoo_utils.check_compatible(self._client, self.MIN_ZK_VERSION)
            if self._worker is None and self._emit_notifications:
                self._worker = futurist.ThreadPoolExecutor(max_workers=1)
            self._client.ensure_path(self.path)
            self._client.ensure_path(self.trash_path)
            if self._job_watcher is None:
                self._job_watcher = watchers.ChildrenWatch(
                    self._client,
                    self.path,
                    func=self._on_job_posting,
                    allow_session_lost=True)
            self._connected = True
        except excp.IncompatibleVersion:
            with excutils.save_and_reraise_exception():
                try_clean()
        except (self._client.handler.timeout_exception,
                k_exceptions.KazooException):
            exc_type, exc, exc_tb = sys.exc_info()
            try:
                try_clean()
                excp.raise_with_cause(excp.JobFailure,
                                      "Failed to do post-connection"
                                      " initialization", cause=exc)
            finally:
                del(exc_type, exc, exc_tb)
Esempio n. 53
0
 def upgrade(self):
     try:
         with contextlib.closing(self._engine.connect()) as conn:
             # NOTE(imelnikov): Alembic does not support SQLite,
             # and we don't recommend to use SQLite in production
             # deployments, so migrations are rarely needed
             # for SQLite. So we don't bother about working around
             # SQLite limitations, and create the database directly from
             # the tables when it is in use...
             if 'sqlite' in self._engine.url.drivername:
                 self._metadata.create_all(bind=conn)
             else:
                 migration.db_sync(conn)
     except sa_exc.SQLAlchemyError:
         exc.raise_with_cause(exc.StorageFailure,
                              "Failed upgrading database version")
 def get_revert_result(self, atom_name):
     """Gets the ``revert`` results for an atom from storage."""
     try:
         results = self._get(atom_name, 'revert_results', 'revert_failure',
                             _REVERT_STATES_WITH_RESULTS, states.REVERT)
     except exceptions.DisallowedAccess as e:
         if e.state == states.IGNORE:
             exceptions.raise_with_cause(
                 exceptions.NotFound, "Result for atom '%s' revert is"
                 " not known (as it was"
                 " ignored)" % atom_name)
         else:
             exceptions.raise_with_cause(
                 exceptions.NotFound, "Result for atom '%s' revert is"
                 " not known" % atom_name)
     else:
         return results
Esempio n. 55
0
 def get_logbook(self, book_uuid, lazy=False):
     try:
         logbooks = self._tables.logbooks
         with contextlib.closing(self._engine.connect()) as conn:
             q = (sql.select([logbooks
                              ]).where(logbooks.c.uuid == book_uuid))
             row = conn.execute(q).first()
             if not row:
                 raise exc.NotFound("No logbook found with"
                                    " uuid '%s'" % book_uuid)
             book = self._converter.convert_book(row)
             if not lazy:
                 self._converter.populate_book(conn, book)
             return book
     except sa_exc.DBAPIError:
         exc.raise_with_cause(exc.StorageFailure,
                              "Failed getting logbook '%s'" % book_uuid)
Esempio n. 56
0
 def _atomdetail_by_name(self, atom_name, expected_type=None, clone=False):
     try:
         ad = self._flowdetail.find(self._atom_name_to_uuid[atom_name])
     except KeyError:
         exceptions.raise_with_cause(exceptions.NotFound,
                                     "Unknown atom name: %s" % atom_name)
     else:
         # TODO(harlowja): we need to figure out how to get away from doing
         # these kinds of type checks in general (since they likely mean
         # we aren't doing something right).
         if expected_type and not isinstance(ad, expected_type):
             raise TypeError(
                 "Atom %s is not of the expected type: %s" %
                 (atom_name, reflection.get_class_name(expected_type)))
         if clone:
             return (ad, ad.copy())
         else:
             return (ad, ad)
Esempio n. 57
0
 def get_flow_details(self, fd_uuid, lazy=False):
     try:
         flowdetails = self._tables.flowdetails
         with self._engine.begin() as conn:
             q = (sql.select([flowdetails
                              ]).where(flowdetails.c.uuid == fd_uuid))
             row = conn.execute(q).first()
             if not row:
                 raise exc.NotFound("No flow details found with uuid"
                                    " '%s'" % fd_uuid)
             fd = self._converter.convert_flow_detail(row)
             if not lazy:
                 self._converter.populate_flow_detail(conn, fd)
             return fd
     except sa_exc.SQLAlchemyError:
         exc.raise_with_cause(
             exc.StorageFailure, "Failed getting flow details with"
             " uuid '%s'" % fd_uuid)