Пример #1
0
    def _flow_detail_from_job(self, job):
        """Extracts a flow detail from a job (via some manner).

        The current mechanism to accomplish this is the following choices:

        * If the job details provide a 'flow_uuid' key attempt to load this
          key from the jobs book and use that as the flow_detail to run.
        * If the job details does not have have a 'flow_uuid' key then attempt
          to examine the size of the book and if it's only one element in the
          book (aka one flow_detail) then just use that.
        * Otherwise if there is no 'flow_uuid' defined or there are > 1
          flow_details in the book raise an error that corresponds to being
          unable to locate the correct flow_detail to run.
        """
        book = job.book
        if book is None:
            raise excp.NotFound("No book found in job")
        if job.details and 'flow_uuid' in job.details:
            flow_uuid = job.details["flow_uuid"]
            flow_detail = book.find(flow_uuid)
            if flow_detail is None:
                raise excp.NotFound("No matching flow detail found in"
                                    " jobs book for flow detail"
                                    " with uuid %s" % flow_uuid)
        else:
            choices = len(book)
            if choices == 1:
                flow_detail = list(book)[0]
            elif choices == 0:
                raise excp.NotFound("No flow detail(s) found in jobs book")
            else:
                raise excp.MultipleChoices("No matching flow detail found (%s"
                                           " choices) in jobs book" % choices)
        return flow_detail
Пример #2
0
 def _wrap(self,
           job_uuid,
           job_path,
           fail_msg_tpl="Failure: %s",
           ensure_known=True):
     if job_path:
         fail_msg_tpl += " (%s)" % (job_path)
     if ensure_known:
         if not job_path:
             raise ValueError("Unable to check if %r is a known path" %
                              (job_path))
         if job_path not in self._known_jobs:
             fail_msg_tpl += ", unknown job"
             raise excp.NotFound(fail_msg_tpl % (job_uuid))
     try:
         yield
     except self._client.handler.timeout_exception as e:
         fail_msg_tpl += ", operation timed out"
         raise excp.JobFailure(fail_msg_tpl % (job_uuid), e)
     except k_exceptions.SessionExpiredError as e:
         fail_msg_tpl += ", session expired"
         raise excp.JobFailure(fail_msg_tpl % (job_uuid), e)
     except k_exceptions.NoNodeError:
         fail_msg_tpl += ", unknown job"
         raise excp.NotFound(fail_msg_tpl % (job_uuid))
     except k_exceptions.KazooException as e:
         fail_msg_tpl += ", internal error"
         raise excp.JobFailure(fail_msg_tpl % (job_uuid), e)
Пример #3
0
 def trash(self, job, who):
     script = self._get_script('trash')
     with _translate_failures():
         raw_who = self._encode_owner(who)
         raw_result = script(keys=[job.owner_key, self.listings_key,
                                   job.last_modified_key, self.trash_key],
                             args=[raw_who, job.key,
                                   self._dumps(timeutils.utcnow())])
         result = self._loads(raw_result)
     status = result['status']
     if status != self.SCRIPT_STATUS_OK:
         reason = result.get('reason')
         if reason == self.SCRIPT_UNKNOWN_JOB:
             raise exc.NotFound("Job %s not found to be"
                                " trashed" % (job.uuid))
         elif reason == self.SCRIPT_UNKNOWN_OWNER:
             raise exc.NotFound("Can not trash job %s"
                                " which we can not determine"
                                " the owner of" % (job.uuid))
         elif reason == self.SCRIPT_NOT_EXPECTED_OWNER:
             raw_owner = result.get('owner')
             if raw_owner:
                 owner = self._decode_owner(raw_owner)
                 raise exc.JobFailure("Can not trash job %s"
                                      " which is not owned by %s (it is"
                                      " actively owned by %s)"
                                      % (job.uuid, who, owner))
             else:
                 raise exc.JobFailure("Can not trash job %s"
                                      " which is not owned by %s"
                                      % (job.uuid, who))
         else:
             raise exc.JobFailure("Failure to trash job %s,"
                                  " unknown internal error (reason=%s)"
                                  % (job.uuid, reason))
Пример #4
0
 def fetch(self, name):
     """Fetch named task result"""
     try:
         indexes = self._reverse_mapping[name]
     except KeyError:
         raise exceptions.NotFound("Name %r is not mapped" % name)
     # Return the first one that is found.
     for uuid, index in indexes:
         try:
             result = self.get(uuid)
             if index is None:
                 return result
             else:
                 return result[index]
         except exceptions.NotFound:
             # NOTE(harlowja): No result was found for the given uuid.
             pass
         except (KeyError, IndexError, TypeError):
             # NOTE(harlowja): The result that the uuid returned can not be
             # accessed in the manner that the index is requesting. Perhaps
             # the result is a dictionary-like object and that key does
             # not exist (key error), or the result is a tuple/list and a
             # non-numeric key is being requested (index error), or there
             # was no result and an attempt to index into None is being
             # requested (type error).
             pass
     raise exceptions.NotFound("Unable to find result %r" % name)
Пример #5
0
    def fetch(self, name, many_handler=None):
        """Fetch a named result."""
        def _many_handler(values):
            # By default we just return the first of many (unless provided
            # a different callback that can translate many results into
            # something more meaningful).
            return values[0]

        if many_handler is None:
            many_handler = _many_handler
        try:
            providers = self._reverse_mapping[name]
        except KeyError:
            raise exceptions.NotFound("Name %r is not mapped as a"
                                      " produced output by any"
                                      " providers" % name)
        values = []
        for provider in providers:
            if provider.name is _TRANSIENT_PROVIDER:
                values.append(
                    _item_from_single(provider, self._transients, name))
            else:
                try:
                    container = self._get(provider.name, only_last=True)
                except exceptions.NotFound:
                    pass
                else:
                    values.append(_item_from_single(provider, container, name))
        if not values:
            raise exceptions.NotFound("Unable to find result %r,"
                                      " searched %s" % (name, providers))
        else:
            return many_handler(values)
Пример #6
0
 def fetch(self, name):
     """Fetch named task result"""
     try:
         indexes = self._reverse_mapping[name]
     except KeyError:
         raise exceptions.NotFound("Name %r is not mapped" % name)
     # Return the first one that is found.
     for uuid, index in reversed(indexes):
         try:
             result = self.get(uuid)
             return _item_from_result(result, index, name)
         except exceptions.NotFound:
             pass
     raise exceptions.NotFound("Unable to find result %r" % name)
Пример #7
0
 def fetch(self, name):
     """Fetch named task result."""
     with self._lock.read_lock():
         try:
             indexes = self._reverse_mapping[name]
         except KeyError:
             raise exceptions.NotFound("Name %r is not mapped" % name)
         # Return the first one that is found.
         for (task_name, index) in reversed(indexes):
             try:
                 result = self.get(task_name)
                 return misc.item_from(result, index, name=name)
             except exceptions.NotFound:
                 pass
         raise exceptions.NotFound("Unable to find result %r" % name)
Пример #8
0
 def add_transition(self, start, end, event):
     """Adds an allowed transition from start -> end for the given event."""
     if self.frozen:
         raise FrozenMachine()
     if start not in self._states:
         raise excp.NotFound("Can not add a transition on event '%s' that"
                             " starts in a undefined state '%s'" %
                             (event, start))
     if end not in self._states:
         raise excp.NotFound("Can not add a transition on event '%s' that"
                             " ends in a undefined state '%s'" %
                             (event, end))
     self._transitions[start][event] = _Jump(end,
                                             self._states[end]['on_enter'],
                                             self._states[start]['on_exit'])
Пример #9
0
def taskdetail_get(td_id):
    """Gets a TaskDetail with matching td_id, if it exists"""
    # Try to get the TaskDetail
    try:
        ld_td = taskdetails[td_id]
    # Raise NotFound exception if it is not there
    except KeyError:
        raise exception.NotFound("No TaskDetail found with id "
                                 "%s." % (td_id, ))

    # Acquire a read lock
    with ld_td.acquire_lock(read=True):
        # Get the Task this TaskDetail represents
        tsk = ld_td.task

        # Update TaskDetail to return
        retVal = taskdetail.TaskDetail(ld_td.name, tsk, ld_td.uuid)
        retVal.updated_at = ld_td.updated_at
        retVal.state = ld_td.state
        retVal.results = ld_td.results
        retVal.exception = ld_td.exception
        retVal.stacktrace = ld_td.stacktrace
        retVal.meta = ld_td.meta

    return retVal
Пример #10
0
 def wait(self, timeout=None):
     # Wait until timeout expires (or forever) for jobs to appear.
     watch = timeutils.StopWatch(duration=timeout)
     watch.start()
     with self._job_cond:
         while True:
             if not self._known_jobs:
                 if watch.expired():
                     raise excp.NotFound("Expired waiting for jobs to"
                                         " arrive; waited %s seconds" %
                                         watch.elapsed())
                 # This is done since the given timeout can not be provided
                 # to the condition variable, since we can not ensure that
                 # when we acquire the condition that there will actually
                 # be jobs (especially if we are spuriously awaken), so we
                 # must recalculate the amount of time we really have left.
                 self._job_cond.wait(watch.leftover(return_none=True))
             else:
                 curr_jobs = self._fetch_jobs()
                 fetch_func = lambda ensure_fresh: curr_jobs
                 removal_func = lambda a_job: self._remove_job(a_job.path)
                 return base.JobBoardIterator(
                     self,
                     LOG,
                     board_fetch_func=fetch_func,
                     board_removal_func=removal_func)
Пример #11
0
 def wait(self, timeout=None):
     # Wait until timeout expires (or forever) for jobs to appear.
     watch = None
     if timeout is not None:
         watch = tt.StopWatch(duration=float(timeout)).start()
     self._job_cond.acquire()
     try:
         while True:
             if not self._known_jobs:
                 if watch is not None and watch.expired():
                     raise excp.NotFound("Expired waiting for jobs to"
                                         " arrive; waited %s seconds"
                                         % watch.elapsed())
                 # This is done since the given timeout can not be provided
                 # to the condition variable, since we can not ensure that
                 # when we acquire the condition that there will actually
                 # be jobs (especially if we are spuriously awaken), so we
                 # must recalculate the amount of time we really have left.
                 timeout = None
                 if watch is not None:
                     timeout = watch.leftover()
                 self._job_cond.wait(timeout)
             else:
                 it = ZookeeperJobBoardIterator(self)
                 it._jobs.extend(self._fetch_jobs())
                 it._fetched = True
                 return it
     finally:
         self._job_cond.release()
Пример #12
0
    def add_reaction(self, state, event, reaction, *args, **kwargs):
        """Adds a reaction that may get triggered by the given event & state.

        :param state: the last stable state expressed
        :type state: string
        :param event: event that caused the transition
        :param args: non-keyworded arguments
        :type args: list
        :param kwargs: key-value pair arguments
        :type kwargs: dictionary

        Reaction callbacks may (depending on how the state machine is ran) be
        used after an event is processed (and a transition occurs) to cause
        the machine to react to the newly arrived at stable state. The
        expected result of a callback is expected to be a
        new event that the callback wants the state machine to react to.
        This new event may (depending on how the state machine is ran) get
        processed (and this process typically repeats) until the state
        machine reaches a terminal state.
        """
        if state not in self._states:
            raise excp.NotFound("Can not add a reaction to event '%s' for an"
                                " undefined state '%s'" % (event, state))
        if not six.callable(reaction):
            raise ValueError("Reaction callback must be callable")
        if event not in self._states[state]['reactions']:
            self._states[state]['reactions'][event] = (reaction, args, kwargs)
        else:
            raise excp.Duplicate("State '%s' reaction to event '%s'"
                                 " already defined" % (state, event))
Пример #13
0
def fetch(name, conf, namespace=BACKEND_NAMESPACE, **kwargs):
    """Fetch a jobboard backend with the given configuration.

    This fetch method will look for the entrypoint name in the entrypoint
    namespace, and then attempt to instantiate that entrypoint using the
    provided name, configuration and any board specific kwargs.

    NOTE(harlowja): to aid in making it easy to specify configuration and
    options to a board the configuration (which is typical just a dictionary)
    can also be a URI string that identifies the entrypoint name and any
    configuration specific to that board.

    For example, given the following configuration URI::

        zookeeper://<not-used>/?a=b&c=d

    This will look for the entrypoint named 'zookeeper' and will provide
    a configuration object composed of the URI's components, in this case that
    is ``{'a': 'b', 'c': 'd'}`` to the constructor of that board
    instance (also including the name specified).
    """
    board, conf = misc.extract_driver_and_conf(conf, 'board')
    LOG.debug('Looking for %r jobboard driver in %r', board, namespace)
    try:
        mgr = driver.DriverManager(namespace, board,
                                   invoke_on_load=True,
                                   invoke_args=(name, conf),
                                   invoke_kwds=kwargs)
        return mgr.driver
    except RuntimeError as e:
        raise exc.NotFound("Could not find jobboard %s" % (board), e)
Пример #14
0
    def add_reaction(self, state, event, reaction, *args, **kwargs):
        """Adds a reaction that may get triggered by the given event & state.

        Reaction callbacks may (depending on how the state machine is ran) be
        used after an event is processed (and a transition occurs) to cause the
        machine to react to the newly arrived at stable state.

        These callbacks are expected to accept three default positional
        parameters (although more can be passed in via *args and **kwargs,
        these will automatically get provided to the callback when it is
        activated *ontop* of the three default). The three default parameters
        are the last stable state, the new stable state and the event that
        caused the transition to this new stable state to be arrived at.

        The expected result of a callback is expected to be a new event that
        the callback wants the state machine to react to. This new event
        may (depending on how the state machine is ran) get processed (and
        this process typically repeats) until the state machine reaches a
        terminal state.
        """
        if self.frozen:
            raise FrozenMachine()
        if state not in self._states:
            raise excp.NotFound("Can not add a reaction to event '%s' for an"
                                " undefined state '%s'" % (event, state))
        if not six.callable(reaction):
            raise ValueError("Reaction callback must be callable")
        if event not in self._states[state]['reactions']:
            self._states[state]['reactions'][event] = (reaction, args, kwargs)
        else:
            raise excp.Duplicate("State '%s' reaction to event '%s'"
                                 " already defined" % (state, event))
Пример #15
0
    def process_event(self, event):
        """Trigger a state change in response to the provided event.

        :param event: event to be processed to cause a potential transition
        """
        current = self._current
        if current is None:
            raise NotInitialized("Can only process events after"
                                 " being initialized (not before)")
        if self._states[current.name]['terminal']:
            raise excp.InvalidState("Can not transition from terminal"
                                    " state '%s' on event '%s'" %
                                    (current.name, event))
        if event not in self._transitions[current.name]:
            raise excp.NotFound("Can not transition from state '%s' on"
                                " event '%s' (no defined transition)" %
                                (current.name, event))
        replacement = self._transitions[current.name][event]
        if current.on_exit is not None:
            current.on_exit(current.name, event)
        if replacement.on_enter is not None:
            replacement.on_enter(replacement.name, event)
        self._current = replacement
        return (
            self._states[replacement.name]['reactions'].get(event),
            self._states[replacement.name]['terminal'],
        )
Пример #16
0
 def _save_flow_details(self, flow_detail, ignore_missing):
     # See if we have an existing flow detail to merge with.
     e_fd = None
     try:
         e_fd = self._get_flow_details(flow_detail.uuid, lock=False)
     except EnvironmentError:
         if not ignore_missing:
             raise exc.NotFound("No flow details found with id: %s"
                                % flow_detail.uuid)
     if e_fd is not None:
         e_fd = p_utils.flow_details_merge(e_fd, flow_detail)
         for td in flow_detail:
             if e_fd.find(td.uuid) is None:
                 e_fd.add(td)
         flow_detail = e_fd
     flow_path = os.path.join(self._flow_path, flow_detail.uuid)
     misc.ensure_tree(flow_path)
     self._write_to(
         os.path.join(flow_path, 'metadata'),
         jsonutils.dumps(p_utils.format_flow_detail(flow_detail)))
     if len(flow_detail):
         task_path = os.path.join(flow_path, 'tasks')
         misc.ensure_tree(task_path)
         self._run_with_process_lock('task',
                                     self._save_tasks_and_link,
                                     list(flow_detail), task_path)
     return flow_detail
Пример #17
0
def flowdetail_get(fd_id):
    """Gets a FlowDetail with matching fd_id, if it exists"""
    # Try to get the FlowDetail
    try:
        fd = flowdetails[fd_id]
    # Raise a NotFound exception if it is not there
    except KeyError:
        raise exception.NotFound("No FlowDetail found with id "
                                 "%s." % (fd_id, ))

    # Acquire a read lock on the FlowDetail
    with fd.acquire_lock(read=True):
        # Get the Flow this FlowDetail represents
        wf = fd.flow

        # Create a FlowDetail to return
        retVal = flowdetail.FlowDetail(fd.name, wf, fd.uuid)

        # Change updated_at to reflect the current data
        retVal.updated_at = fd.updated_at

        # Add the generic TaskDetails to the FlowDetail to return
        for td in fd:
            retVal.add_task_detail(taskdetail_get(td.uuid))

    return retVal
Пример #18
0
 def get_uuid_by_name(self, task_name):
     """Get uuid of task with given name"""
     td = self._flowdetail.find_by_name(task_name)
     if td is not None:
         return td.uuid
     else:
         raise exceptions.NotFound("Unknown task name: %r" % task_name)
Пример #19
0
 def update_task_details(self, task_detail):
     try:
         e_td = self.backend.task_details[task_detail.uuid]
     except KeyError:
         raise exc.NotFound("No task details found with id: %s" %
                            task_detail.uuid)
     return p_utils.task_details_merge(e_td, task_detail, deep_copy=True)
Пример #20
0
 def wait(self, timeout=None, initial_delay=0.005,
          max_delay=1.0, sleep_func=time.sleep):
     if initial_delay > max_delay:
         raise ValueError("Initial delay %s must be less than or equal"
                          " to the provided max delay %s"
                          % (initial_delay, max_delay))
     # This does a spin-loop that backs off by doubling the delay
     # up to the provided max-delay. In the future we could try having
     # a secondary client connected into redis pubsub and use that
     # instead, but for now this is simpler.
     w = timeutils.StopWatch(duration=timeout)
     w.start()
     delay = initial_delay
     while True:
         jc = self.job_count
         if jc > 0:
             curr_jobs = self._fetch_jobs()
             if curr_jobs:
                 return base.JobBoardIterator(
                     self, LOG,
                     board_fetch_func=lambda ensure_fresh: curr_jobs)
         if w.expired():
             raise exc.NotFound("Expired waiting for jobs to"
                                " arrive; waited %s seconds"
                                % w.elapsed())
         else:
             remaining = w.leftover(return_none=True)
             if remaining is not None:
                 delay = min(delay * 2, remaining, max_delay)
             else:
                 delay = min(delay * 2, max_delay)
             sleep_func(delay)
Пример #21
0
    def _update_flow_details(self, fd, txn, create_missing=False):
        # Determine whether the desired data exists or not
        fd_path = paths.join(self.flow_path, fd.uuid)
        try:
            fd_data, _zstat = self._client.get(fd_path)
        except k_exc.NoNodeError:
            # Not-existent: create or raise exception
            if create_missing:
                txn.create(fd_path)
                e_fd = logbook.FlowDetail(name=fd.name, uuid=fd.uuid)
            else:
                raise exc.NotFound("No flow details found with id: %s" %
                                   fd.uuid)
        else:
            # Existent: read it out
            e_fd = logbook.FlowDetail.from_dict(misc.decode_json(fd_data))

        # Update and write it back
        e_fd = e_fd.merge(fd)
        fd_data = e_fd.to_dict()
        txn.set_data(fd_path, misc.binary_encode(jsonutils.dumps(fd_data)))
        for ad in fd:
            ad_path = paths.join(fd_path, ad.uuid)
            # NOTE(harlowja): create an entry in the flow detail path
            # for the provided atom detail so that a reference exists
            # from the flow detail to its atom details.
            if not self._client.exists(ad_path):
                txn.create(ad_path)
            e_fd.add(self._update_atom_details(ad, txn, create_missing=True))
        return e_fd
Пример #22
0
    def _update_atom_details(self, ad, txn, create_missing=False):
        # Determine whether the desired data exists or not.
        ad_path = paths.join(self.atom_path, ad.uuid)
        e_ad = None
        try:
            ad_data, _zstat = self._client.get(ad_path)
        except k_exc.NoNodeError:
            # Not-existent: create or raise exception.
            if not create_missing:
                raise exc.NotFound("No atom details found with"
                                   " id: %s" % ad.uuid)
            else:
                txn.create(ad_path)
        else:
            # Existent: read it out.
            try:
                ad_data = misc.decode_json(ad_data)
                ad_cls = logbook.atom_detail_class(ad_data['type'])
                e_ad = ad_cls.from_dict(ad_data['atom'])
            except KeyError:
                pass

        # Update and write it back
        if e_ad:
            e_ad = e_ad.merge(ad)
        else:
            e_ad = ad
        ad_data = base._format_atom(e_ad)
        txn.set_data(ad_path, misc.binary_encode(jsonutils.dumps(ad_data)))
        return e_ad
Пример #23
0
 def _submit_task(self,
                  task,
                  task_uuid,
                  action,
                  arguments,
                  progress_callback,
                  timeout=pr.REQUEST_TIMEOUT,
                  **kwargs):
     """Submit task request to workers."""
     remote_task = self._store_remote_task(
         rt.RemoteTask(task, task_uuid, action, arguments,
                       progress_callback, timeout, **kwargs))
     try:
         # get task's workers topic to send request to
         try:
             topic = self._workers_info[remote_task.name]
         except KeyError:
             raise exc.NotFound("Workers topic not found for the '%s'"
                                "task." % remote_task.name)
         else:
             # publish request
             request = remote_task.request
             LOG.debug("Sending request: %s" % request)
             self._proxy.publish(request,
                                 remote_task.uuid,
                                 routing_key=topic,
                                 reply_to=self._uuid)
     except Exception as e:
         LOG.error("Failed to submit the '%s' task: %s" % (remote_task, e))
         self._remove_remote_task(remote_task)
         remote_task.set_result(misc.Failure())
     return remote_task.result
Пример #24
0
    def run_iter(self, event, initialize=True):
        """Returns a iterator/generator that will run the state machine.

        NOTE(harlowja): only one runner iterator/generator should be active for
        a machine, if this is not observed then it is possible for
        initialization and other local state to be corrupted and cause issues
        when running...
        """
        if initialize:
            self.initialize()
        while True:
            old_state = self.current_state
            reaction, terminal = self.process_event(event)
            new_state = self.current_state
            try:
                sent_event = yield (old_state, new_state)
            except GeneratorExit:
                break
            if terminal:
                break
            if reaction is None and sent_event is None:
                raise excp.NotFound("Unable to progress since no reaction (or"
                                    " sent event) has been made available in"
                                    " new state '%s' (moved to from state '%s'"
                                    " in response to event '%s')" %
                                    (new_state, old_state, event))
            elif sent_event is not None:
                event = sent_event
            else:
                cb, args, kwargs = reaction
                event = cb(old_state, new_state, event, *args, **kwargs)
Пример #25
0
 def _get_node_attr(self, path, attr_name, trans_func=None):
     try:
         _data, node_stat = self._client.get(path)
         attr = getattr(node_stat, attr_name)
         if trans_func is not None:
             return trans_func(attr)
         else:
             return attr
     except k_exceptions.NoNodeError as e:
         raise excp.NotFound(
             "Can not fetch the %r attribute"
             " of job %s (%s), path %s not found" %
             (attr_name, self.uuid, self.path, path), e)
     except self._client.handler.timeout_exception as e:
         raise excp.JobFailure(
             "Can not fetch the %r attribute"
             " of job %s (%s), operation timed out" %
             (attr_name, self.uuid, self.path), e)
     except k_exceptions.SessionExpiredError as e:
         raise excp.JobFailure(
             "Can not fetch the %r attribute"
             " of job %s (%s), session expired" %
             (attr_name, self.uuid, self.path), e)
     except (AttributeError, k_exceptions.KazooException) as e:
         raise excp.JobFailure(
             "Can not fetch the %r attribute"
             " of job %s (%s), internal error" %
             (attr_name, self.uuid, self.path), e)
Пример #26
0
 def update_atom_details(self, atom_detail):
     try:
         e_ad = self.backend.atom_details[atom_detail.uuid]
     except KeyError:
         raise exc.NotFound("No atom details found with id: %s" %
                            atom_detail.uuid)
     return e_ad.merge(atom_detail, deep_copy=True)
Пример #27
0
 def get(self, uuid):
     """Get result for task with id 'uuid' to storage"""
     td = self._taskdetail_by_uuid(uuid)
     if td.failure:
         return td.failure
     if td.state not in STATES_WITH_RESULTS:
         raise exceptions.NotFound("Result for task %r is not known" % uuid)
     return td.results
Пример #28
0
def logbook_destroy(lb_id):
    """Deletes the LogBook model with matching lb_id"""
    # Try deleting the LogBook
    try:
        del logbooks[lb_id]
    # Raise a NotFound error if the LogBook doesn't exist
    except KeyError:
        raise exception.NotFound("No Logbook found with id " "%s." % (lb_id, ))
Пример #29
0
 def get_logbook(self, book_uuid):
     with self._lock.read_lock():
         try:
             return self._helper.construct(book_uuid,
                                           self._memory.log_books)
         except KeyError:
             raise exc.NotFound("No logbook found with uuid '%s'"
                                % book_uuid)
Пример #30
0
 def _destroy_logbook(lb_uuid, txn):
     lb_path = paths.join(self.book_path, lb_uuid)
     if not self._client.exists(lb_path):
         raise exc.NotFound("No logbook found with id: %s" % lb_uuid)
     for fd_uuid in self._client.get_children(lb_path):
         _destroy_flow_details(fd_uuid, txn)
         txn.delete(paths.join(lb_path, fd_uuid))
     txn.delete(lb_path)