Пример #1
0
    def _update_atom_details(self, ad, txn, create_missing=False):
        # Determine whether the desired data exists or not.
        ad_path = paths.join(self.atom_path, ad.uuid)
        e_ad = None
        try:
            ad_data, _zstat = self._client.get(ad_path)
        except k_exc.NoNodeError:
            # Not-existent: create or raise exception.
            raise exc.NotFound("No atom details found with id: %s" % ad.uuid)
        else:
            # Existent: read it out.
            try:
                ad_data = misc.decode_json(ad_data)
                ad_cls = logbook.atom_detail_class(ad_data['type'])
                e_ad = ad_cls.from_dict(ad_data['atom'])
            except KeyError:
                pass

        # Update and write it back
        if e_ad:
            e_ad = e_ad.merge(ad)
        else:
            e_ad = ad
        ad_data = base._format_atom(e_ad)
        txn.set_data(ad_path,
                     misc.binary_encode(jsonutils.dumps(ad_data)))
        return e_ad
Пример #2
0
 def _fetch_state(self):
     owner = self.board.find_owner(self)
     job_data = {}
     try:
         raw_data, _data_stat = self._client.get(self.path)
         job_data = misc.decode_json(raw_data)
     except k_exceptions.NoNodeError:
         pass
     except k_exceptions.SessionExpiredError:
         excp.raise_with_cause(
             excp.JobFailure,
             "Can not fetch the state of %s,"
             " session expired" % (self.uuid))
     except self._client.handler.timeout_exception:
         excp.raise_with_cause(
             excp.JobFailure,
             "Can not fetch the state of %s,"
             " operation timed out" % (self.uuid))
     except k_exceptions.KazooException:
         excp.raise_with_cause(
             excp.JobFailure,
             "Can not fetch the state of %s,"
             " internal error" % (self.uuid))
     if not job_data:
         # No data this job has been completed (the owner that we might have
         # fetched will not be able to be fetched again, since the job node
         # is a parent node of the owner/lock node).
         return states.COMPLETE
     if not owner:
         # No owner, but data, still work to be done.
         return states.UNCLAIMED
     return states.CLAIMED
Пример #3
0
 def _process_child(self, path, request, quiet=True):
     """Receives the result of a child data fetch request."""
     job = None
     try:
         raw_data, node_stat = request.get()
         job_data = misc.decode_json(raw_data)
         job_created_on = misc.millis_to_datetime(node_stat.ctime)
         try:
             job_priority = job_data['priority']
             job_priority = base.JobPriority.convert(job_priority)
         except KeyError:
             job_priority = base.JobPriority.NORMAL
         job_uuid = job_data['uuid']
         job_name = job_data['name']
     except (ValueError, TypeError, KeyError):
         with excutils.save_and_reraise_exception(reraise=not quiet):
             LOG.warning("Incorrectly formatted job data found at path: %s",
                         path,
                         exc_info=True)
     except self._client.handler.timeout_exception:
         with excutils.save_and_reraise_exception(reraise=not quiet):
             LOG.warning(
                 "Operation timed out fetching job data from"
                 " from path: %s",
                 path,
                 exc_info=True)
     except k_exceptions.SessionExpiredError:
         with excutils.save_and_reraise_exception(reraise=not quiet):
             LOG.warning("Session expired fetching job data from path: %s",
                         path,
                         exc_info=True)
     except k_exceptions.NoNodeError:
         LOG.debug(
             "No job node found at path: %s, it must have"
             " disappeared or was removed", path)
     except k_exceptions.KazooException:
         with excutils.save_and_reraise_exception(reraise=not quiet):
             LOG.warning("Internal error fetching job data from path: %s",
                         path,
                         exc_info=True)
     else:
         with self._job_cond:
             # Now we can officially check if someone already placed this
             # jobs information into the known job set (if it's already
             # existing then just leave it alone).
             if path not in self._known_jobs:
                 job = ZookeeperJob(self,
                                    job_name,
                                    self._client,
                                    path,
                                    backend=self._persistence,
                                    uuid=job_uuid,
                                    book_data=job_data.get("book"),
                                    details=job_data.get("details", {}),
                                    created_on=job_created_on,
                                    priority=job_priority)
                 self._known_jobs[path] = job
                 self._job_cond.notify_all()
     if job is not None:
         self._try_emit(base.POSTED, details={'job': job})
Пример #4
0
 def state(self):
     owner = self.board.find_owner(self)
     job_data = {}
     try:
         raw_data, _data_stat = self._client.get(self.path)
         job_data = misc.decode_json(raw_data)
     except k_exceptions.NoNodeError:
         pass
     except k_exceptions.SessionExpiredError as e:
         raise excp.JobFailure(
             "Can not fetch the state of %s,"
             " session expired" % (self.uuid), e)
     except self._client.handler.timeout_exception as e:
         raise excp.JobFailure(
             "Can not fetch the state of %s,"
             " operation timed out" % (self.uuid), e)
     except k_exceptions.KazooException as e:
         raise excp.JobFailure(
             "Can not fetch the state of %s, internal"
             " error" % (self.uuid), e)
     if not job_data:
         # No data this job has been completed (the owner that we might have
         # fetched will not be able to be fetched again, since the job node
         # is a parent node of the owner/lock node).
         return states.COMPLETE
     if not owner:
         # No owner, but data, still work to be done.
         return states.UNCLAIMED
     return states.CLAIMED
Пример #5
0
 def _process_child(self, path, request):
     """Receives the result of a child data fetch request."""
     try:
         raw_data, _stat = request.get()
         job_data = misc.decode_json(raw_data)
         with self._job_mutate:
             if path not in self._known_jobs:
                 job = ZookeeperJob(job_data['name'], self,
                                    self._client, self._persistence,
                                    uuid=job_data['uuid'],
                                    book_data=job_data.get("book"),
                                    details=job_data.get("details", {}))
                 self._known_jobs[path] = (job, _READY)
     except (ValueError, TypeError, KeyError):
         LOG.exception("Incorrectly formatted job data found"
                       " at path: %s", path)
     except self._client.handler.timeout_exception:
         LOG.warn("Connection timed out fetching job data"
                  " from path: %s", path)
     except k_exceptions.SessionExpiredError:
         LOG.warn("Session expired fetching job data from path: %s", path)
     except k_exceptions.NoNodeError:
         LOG.warn("No job node found at path: %s, it must have"
                  " disappeared or was removed", path)
     except k_exceptions.KazooException:
         LOG.exception("Internal error fetching job data from"
                       " path: %s", path)
Пример #6
0
    def _update_flow_details(self, fd, txn, create_missing=False):
        # Determine whether the desired data exists or not
        fd_path = paths.join(self.flow_path, fd.uuid)
        try:
            fd_data, _zstat = self._client.get(fd_path)
        except k_exc.NoNodeError:
            # Not-existent: create or raise exception
            if create_missing:
                txn.create(fd_path)
                e_fd = logbook.FlowDetail(name=fd.name, uuid=fd.uuid)
            else:
                raise exc.NotFound("No flow details found with id: %s" %
                                   fd.uuid)
        else:
            # Existent: read it out
            e_fd = logbook.FlowDetail.from_dict(misc.decode_json(fd_data))

        # Update and write it back
        e_fd = e_fd.merge(fd)
        fd_data = e_fd.to_dict()
        txn.set_data(fd_path, misc.binary_encode(jsonutils.dumps(fd_data)))
        for ad in fd:
            ad_path = paths.join(fd_path, ad.uuid)
            # NOTE(harlowja): create an entry in the flow detail path
            # for the provided atom detail so that a reference exists
            # from the flow detail to its atom details.
            if not self._client.exists(ad_path):
                txn.create(ad_path)
            e_fd.add(self._update_atom_details(ad, txn, create_missing=True))
        return e_fd
Пример #7
0
    def _update_flow_details(self, fd, txn, create_missing=False):
        # Determine whether the desired data exists or not
        fd_path = paths.join(self.flow_path, fd.uuid)
        try:
            fd_data, _zstat = self._client.get(fd_path)
        except k_exc.NoNodeError:
            # Not-existent: create or raise exception
            if create_missing:
                txn.create(fd_path)
                e_fd = logbook.FlowDetail(name=fd.name, uuid=fd.uuid)
            else:
                raise exc.NotFound("No flow details found with id: %s"
                                   % fd.uuid)
        else:
            # Existent: read it out
            e_fd = logbook.FlowDetail.from_dict(misc.decode_json(fd_data))

        # Update and write it back
        e_fd = e_fd.merge(fd)
        fd_data = e_fd.to_dict()
        txn.set_data(fd_path, misc.binary_encode(jsonutils.dumps(fd_data)))
        for ad in fd:
            ad_path = paths.join(fd_path, ad.uuid)
            # NOTE(harlowja): create an entry in the flow detail path
            # for the provided atom detail so that a reference exists
            # from the flow detail to its atom details.
            if not self._client.exists(ad_path):
                txn.create(ad_path)
            e_fd.add(self._update_atom_details(ad, txn, create_missing=True))
        return e_fd
Пример #8
0
 def _process_child(self, path, request):
     """Receives the result of a child data fetch request."""
     try:
         raw_data, _stat = request.get()
         job_data = misc.decode_json(raw_data)
         with self._job_mutate:
             if path not in self._known_jobs:
                 job = ZookeeperJob(job_data['name'],
                                    self,
                                    self._client,
                                    self._persistence,
                                    uuid=job_data['uuid'],
                                    book_data=job_data.get("book"),
                                    details=job_data.get("details", {}))
                 self._known_jobs[path] = (job, _READY)
     except (ValueError, TypeError, KeyError):
         LOG.exception(
             "Incorrectly formatted job data found"
             " at path: %s", path)
     except self._client.handler.timeout_exception:
         LOG.warn("Connection timed out fetching job data"
                  " from path: %s", path)
     except k_exceptions.SessionExpiredError:
         LOG.warn("Session expired fetching job data from path: %s", path)
     except k_exceptions.NoNodeError:
         LOG.warn(
             "No job node found at path: %s, it must have"
             " disappeared or was removed", path)
     except k_exceptions.KazooException:
         LOG.exception("Internal error fetching job data from"
                       " path: %s", path)
Пример #9
0
 def state(self):
     owner = self.board.find_owner(self)
     job_data = {}
     job_path, _lock_path = _get_paths(self.board.path, self.uuid)
     try:
         raw_data, _data_stat = self._client.get(job_path)
         job_data = misc.decode_json(raw_data)
     except k_exceptions.NoNodeError:
         pass
     except k_exceptions.SessionExpiredError:
         raise excp.ConnectionFailure("Can not fetch the state of %s,"
                                      " session expired" % (self.uuid))
     except self._client.handler.timeout_exception:
         raise excp.ConnectionFailure("Can not fetch the state of %s,"
                                      " connection timed out" % (self.uuid))
     except k_exceptions.KazooException as e:
         raise excp.InvalidJobOperation("Can not fetch the state of %s,"
                                        " internal error: %s"
                                        % (self.uuid, e))
     if not job_data:
         # No data this job has been completed (the owner that we might have
         # fetched will not be able to be fetched again, since the job node
         # is a parent node of the owner/lock node).
         return states.COMPLETE
     if not owner:
         # No owner, but data, still work to be done.
         return states.UNCLAIMED
     return states.CLAIMED
Пример #10
0
    def _update_atom_details(self, ad, txn, create_missing=False):
        # Determine whether the desired data exists or not.
        ad_path = paths.join(self.atom_path, ad.uuid)
        e_ad = None
        try:
            ad_data, _zstat = self._client.get(ad_path)
        except k_exc.NoNodeError:
            # Not-existent: create or raise exception.
            if not create_missing:
                raise exc.NotFound("No atom details found with"
                                   " id: %s" % ad.uuid)
            else:
                txn.create(ad_path)
        else:
            # Existent: read it out.
            try:
                ad_data = misc.decode_json(ad_data)
                ad_cls = logbook.atom_detail_class(ad_data['type'])
                e_ad = ad_cls.from_dict(ad_data['atom'])
            except KeyError:
                pass

        # Update and write it back
        if e_ad:
            e_ad = e_ad.merge(ad)
        else:
            e_ad = ad
        ad_data = base._format_atom(e_ad)
        txn.set_data(ad_path, misc.binary_encode(jsonutils.dumps(ad_data)))
        return e_ad
Пример #11
0
 def _get_task_details(self, td_uuid):
     td_path = paths.join(self.task_path, td_uuid)
     try:
         td_data, _zstat = self._client.get(td_path)
     except k_exc.NoNodeError:
         raise exc.NotFound("No task details found with id: %s" % td_uuid)
     else:
         return p_utils.unformat_task_detail(td_uuid,
                                             misc.decode_json(td_data))
Пример #12
0
 def _get_task_details(self, td_uuid):
     td_path = paths.join(self.task_path, td_uuid)
     try:
         td_data, _zstat = self._client.get(td_path)
     except k_exc.NoNodeError:
         raise exc.NotFound("No task details found with id: %s" % td_uuid)
     else:
         return p_utils.unformat_task_detail(td_uuid,
                                             misc.decode_json(td_data))
Пример #13
0
 def _get_atom_details(self, ad_uuid):
     ad_path = paths.join(self.atom_path, ad_uuid)
     try:
         ad_data, _zstat = self._client.get(ad_path)
     except k_exc.NoNodeError:
         raise exc.NotFound("No atom details found with id: %s" % ad_uuid)
     else:
         ad_data = misc.decode_json(ad_data)
         ad_cls = logbook.atom_detail_class(ad_data['type'])
         return ad_cls.from_dict(ad_data['atom'])
Пример #14
0
 def find_owner(self, job):
     with self._wrap(job.uuid, job.path, "Owner query failure: %s"):
         try:
             self._client.sync(job.lock_path)
             raw_data, _lock_stat = self._client.get(job.lock_path)
             data = misc.decode_json(raw_data)
             owner = data.get("owner")
         except k_exceptions.NoNodeError:
             owner = None
         return owner
Пример #15
0
 def find_owner(self, job):
     with self._wrap(job.uuid, job.path, "Owner query failure: %s"):
         try:
             self._client.sync(job.lock_path)
             raw_data, _lock_stat = self._client.get(job.lock_path)
             data = misc.decode_json(raw_data)
             owner = data.get("owner")
         except k_exceptions.NoNodeError:
             owner = None
         return owner
Пример #16
0
 def _get_atom_details(self, ad_uuid):
     ad_path = paths.join(self.atom_path, ad_uuid)
     try:
         ad_data, _zstat = self._client.get(ad_path)
     except k_exc.NoNodeError:
         raise exc.NotFound("No atom details found with id: %s" % ad_uuid)
     else:
         ad_data = misc.decode_json(ad_data)
         ad_cls = logbook.atom_detail_class(ad_data['type'])
         return ad_cls.from_dict(ad_data['atom'])
Пример #17
0
    def _get_flow_details(self, fd_uuid):
        fd_path = paths.join(self.flow_path, fd_uuid)
        try:
            fd_data, _zstat = self._client.get(fd_path)
        except k_exc.NoNodeError:
            raise exc.NotFound("No flow details found with id: %s" % fd_uuid)

        fd = logbook.FlowDetail.from_dict(misc.decode_json(fd_data))
        for ad_uuid in self._client.get_children(fd_path):
            fd.add(self._get_atom_details(ad_uuid))
        return fd
Пример #18
0
    def _get_flow_details(self, fd_uuid):
        fd_path = paths.join(self.flow_path, fd_uuid)
        try:
            fd_data, _zstat = self._client.get(fd_path)
        except k_exc.NoNodeError:
            raise exc.NotFound("No flow details found with id: %s" % fd_uuid)

        fd = logbook.FlowDetail.from_dict(misc.decode_json(fd_data))
        for ad_uuid in self._client.get_children(fd_path):
            fd.add(self._get_atom_details(ad_uuid))
        return fd
Пример #19
0
 def _get_logbook(self, lb_uuid):
     lb_path = paths.join(self.book_path, lb_uuid)
     try:
         lb_data, _zstat = self._client.get(lb_path)
     except k_exc.NoNodeError:
         raise exc.NotFound("No logbook found with id: %s" % lb_uuid)
     else:
         lb = p_utils.unformat_logbook(lb_uuid, misc.decode_json(lb_data))
         for fd_uuid in self._client.get_children(lb_path):
             lb.add(self._get_flow_details(fd_uuid))
         return lb
Пример #20
0
    def _get_flow_details(self, fd_uuid):
        fd_path = paths.join(self.flow_path, fd_uuid)
        try:
            fd_data, _zstat = self._client.get(fd_path)
        except k_exc.NoNodeError:
            raise exc.NotFound("No flow details found with id: %s" % fd_uuid)

        fd = p_utils.unformat_flow_detail(fd_uuid, misc.decode_json(fd_data))
        for td_uuid in self._client.get_children(fd_path):
            fd.add(self._get_task_details(td_uuid))
        return fd
Пример #21
0
    def _get_flow_details(self, fd_uuid):
        fd_path = paths.join(self.flow_path, fd_uuid)
        try:
            fd_data, _zstat = self._client.get(fd_path)
        except k_exc.NoNodeError:
            raise exc.NotFound("No flow details found with id: %s" % fd_uuid)

        fd = p_utils.unformat_flow_detail(fd_uuid, misc.decode_json(fd_data))
        for td_uuid in self._client.get_children(fd_path):
            fd.add(self._get_task_details(td_uuid))
        return fd
Пример #22
0
 def _get_logbook(self, lb_uuid):
     lb_path = paths.join(self.book_path, lb_uuid)
     try:
         lb_data, _zstat = self._client.get(lb_path)
     except k_exc.NoNodeError:
         raise exc.NotFound("No logbook found with id: %s" % lb_uuid)
     else:
         lb = logbook.LogBook.from_dict(misc.decode_json(lb_data),
                                        unmarshal_time=True)
         for fd_uuid in self._client.get_children(lb_path):
             lb.add(self._get_flow_details(fd_uuid))
         return lb
Пример #23
0
 def _get_logbook(self, lb_uuid):
     lb_path = paths.join(self.book_path, lb_uuid)
     try:
         lb_data, _zstat = self._client.get(lb_path)
     except k_exc.NoNodeError:
         raise exc.NotFound("No logbook found with id: %s" % lb_uuid)
     else:
         lb = p_utils.unformat_logbook(lb_uuid,
                                       misc.decode_json(lb_data))
         for fd_uuid in self._client.get_children(lb_path):
             lb.add(self._get_flow_details(fd_uuid))
         return lb
Пример #24
0
 def _get_logbook(self, lb_uuid):
     lb_path = paths.join(self.book_path, lb_uuid)
     try:
         lb_data, _zstat = self._client.get(lb_path)
     except k_exc.NoNodeError:
         raise exc.NotFound("No logbook found with id: %s" % lb_uuid)
     else:
         lb = logbook.LogBook.from_dict(misc.decode_json(lb_data),
                                        unmarshal_time=True)
         for fd_uuid in self._client.get_children(lb_path):
             lb.add(self._get_flow_details(fd_uuid))
         return lb
Пример #25
0
 def _process_child(self, path, request, quiet=True):
     """Receives the result of a child data fetch request."""
     job = None
     try:
         raw_data, node_stat = request.get()
         job_data = misc.decode_json(raw_data)
         job_created_on = misc.millis_to_datetime(node_stat.ctime)
         try:
             job_priority = job_data['priority']
             job_priority = base.JobPriority.convert(job_priority)
         except KeyError:
             job_priority = base.JobPriority.NORMAL
         job_uuid = job_data['uuid']
         job_name = job_data['name']
     except (ValueError, TypeError, KeyError):
         with excutils.save_and_reraise_exception(reraise=not quiet):
             LOG.warning("Incorrectly formatted job data found at path: %s",
                         path, exc_info=True)
     except self._client.handler.timeout_exception:
         with excutils.save_and_reraise_exception(reraise=not quiet):
             LOG.warning("Operation timed out fetching job data from"
                         " from path: %s",
                         path, exc_info=True)
     except k_exceptions.SessionExpiredError:
         with excutils.save_and_reraise_exception(reraise=not quiet):
             LOG.warning("Session expired fetching job data from path: %s",
                         path, exc_info=True)
     except k_exceptions.NoNodeError:
         LOG.debug("No job node found at path: %s, it must have"
                   " disappeared or was removed", path)
     except k_exceptions.KazooException:
         with excutils.save_and_reraise_exception(reraise=not quiet):
             LOG.warning("Internal error fetching job data from path: %s",
                         path, exc_info=True)
     else:
         with self._job_cond:
             # Now we can officially check if someone already placed this
             # jobs information into the known job set (if it's already
             # existing then just leave it alone).
             if path not in self._known_jobs:
                 job = ZookeeperJob(self, job_name,
                                    self._client, path,
                                    backend=self._persistence,
                                    uuid=job_uuid,
                                    book_data=job_data.get("book"),
                                    details=job_data.get("details", {}),
                                    created_on=job_created_on,
                                    priority=job_priority)
                 self._known_jobs[path] = job
                 self._job_cond.notify_all()
     if job is not None:
         self._try_emit(base.POSTED, details={'job': job})
Пример #26
0
 def _get():
     fd_path = os.path.join(self._flow_path, uuid)
     meta_path = os.path.join(fd_path, "metadata")
     meta = misc.decode_json(self._read_from(meta_path))
     fd = logbook.FlowDetail.from_dict(meta)
     ad_to_load = []
     ad_path = os.path.join(fd_path, "atoms")
     try:
         ad_to_load = [f for f in os.listdir(ad_path) if os.path.islink(os.path.join(ad_path, f))]
     except EnvironmentError as e:
         if e.errno != errno.ENOENT:
             raise
     for ad_uuid in ad_to_load:
         fd.add(self._get_atom_details(ad_uuid))
     return fd
Пример #27
0
 def _update_logbook(lb_path, lb_data, txn):
     e_lb = p_utils.unformat_logbook(lb.uuid, misc.decode_json(lb_data))
     e_lb = p_utils.logbook_merge(e_lb, lb)
     lb_data = p_utils.format_logbook(e_lb, created_at=lb.created_at)
     txn.set_data(lb_path, misc.binary_encode(jsonutils.dumps(lb_data)))
     for fd in lb:
         fd_path = paths.join(lb_path, fd.uuid)
         if not self._client.exists(fd_path):
             # NOTE(harlowja): create an entry in the logbook path
             # for the provided flow detail so that a reference exists
             # from the logbook to its flow details.
             txn.create(fd_path)
         e_fd = self._update_flow_details(fd, txn, create_missing=True)
         e_lb.add(e_fd)
     return e_lb
Пример #28
0
 def _update_logbook(lb_path, lb_data, txn):
     e_lb = p_utils.unformat_logbook(lb.uuid, misc.decode_json(lb_data))
     e_lb = p_utils.logbook_merge(e_lb, lb)
     lb_data = p_utils.format_logbook(e_lb, created_at=lb.created_at)
     txn.set_data(lb_path, misc.binary_encode(jsonutils.dumps(lb_data)))
     for fd in lb:
         fd_path = paths.join(lb_path, fd.uuid)
         if not self._client.exists(fd_path):
             # NOTE(harlowja): create an entry in the logbook path
             # for the provided flow detail so that a reference exists
             # from the logbook to its flow details.
             txn.create(fd_path)
         e_fd = self._update_flow_details(fd, txn, create_missing=True)
         e_lb.add(e_fd)
     return e_lb
Пример #29
0
 def _get():
     fd_path = os.path.join(self._flow_path, uuid)
     meta_path = os.path.join(fd_path, 'metadata')
     meta = misc.decode_json(self._read_from(meta_path))
     fd = p_utils.unformat_flow_detail(uuid, meta)
     td_to_load = []
     td_path = os.path.join(fd_path, 'tasks')
     try:
         td_to_load = [f for f in os.listdir(td_path)
                       if os.path.islink(os.path.join(td_path, f))]
     except EnvironmentError as e:
         if e.errno != errno.ENOENT:
             raise
     for t_uuid in td_to_load:
         fd.add(self._get_task_details(t_uuid))
     return fd
Пример #30
0
 def _update_logbook(lb_path, lb_data, txn):
     e_lb = logbook.LogBook.from_dict(misc.decode_json(lb_data),
                                      unmarshal_time=True)
     e_lb = e_lb.merge(lb)
     lb_data = e_lb.to_dict(marshal_time=True)
     txn.set_data(lb_path, misc.binary_encode(jsonutils.dumps(lb_data)))
     for fd in lb:
         fd_path = paths.join(lb_path, fd.uuid)
         if not self._client.exists(fd_path):
             # NOTE(harlowja): create an entry in the logbook path
             # for the provided flow detail so that a reference exists
             # from the logbook to its flow details.
             txn.create(fd_path)
         e_fd = self._update_flow_details(fd, txn, create_missing=True)
         e_lb.add(e_fd)
     return e_lb
Пример #31
0
 def _update_logbook(lb_path, lb_data, txn):
     e_lb = logbook.LogBook.from_dict(misc.decode_json(lb_data),
                                      unmarshal_time=True)
     e_lb = e_lb.merge(lb)
     lb_data = e_lb.to_dict(marshal_time=True)
     txn.set_data(lb_path, misc.binary_encode(jsonutils.dumps(lb_data)))
     for fd in lb:
         fd_path = paths.join(lb_path, fd.uuid)
         if not self._client.exists(fd_path):
             # NOTE(harlowja): create an entry in the logbook path
             # for the provided flow detail so that a reference exists
             # from the logbook to its flow details.
             txn.create(fd_path)
         e_fd = self._update_flow_details(fd, txn, create_missing=True)
         e_lb.add(e_fd)
     return e_lb
Пример #32
0
 def _get():
     fd_path = os.path.join(self._flow_path, uuid)
     meta_path = os.path.join(fd_path, 'metadata')
     meta = misc.decode_json(self._read_from(meta_path))
     fd = logbook.FlowDetail.from_dict(meta)
     ad_to_load = []
     ad_path = os.path.join(fd_path, 'atoms')
     try:
         ad_to_load = [
             f for f in os.listdir(ad_path)
             if os.path.islink(os.path.join(ad_path, f))
         ]
     except EnvironmentError as e:
         if e.errno != errno.ENOENT:
             raise
     for ad_uuid in ad_to_load:
         fd.add(self._get_atom_details(ad_uuid))
     return fd
Пример #33
0
 def _process_child(self, path, request):
     """Receives the result of a child data fetch request."""
     job = None
     try:
         raw_data, node_stat = request.get()
         job_data = misc.decode_json(raw_data)
         created_on = misc.millis_to_datetime(node_stat.ctime)
     except (ValueError, TypeError, KeyError):
         LOG.warn("Incorrectly formatted job data found at path: %s",
                  path, exc_info=True)
     except self._client.handler.timeout_exception:
         LOG.warn("Operation timed out fetching job data from path: %s",
                  path, exc_info=True)
     except k_exceptions.SessionExpiredError:
         LOG.warn("Session expired fetching job data from path: %s", path,
                  exc_info=True)
     except k_exceptions.NoNodeError:
         LOG.debug("No job node found at path: %s, it must have"
                   " disappeared or was removed", path)
     except k_exceptions.KazooException:
         LOG.warn("Internal error fetching job data from path: %s",
                  path, exc_info=True)
     else:
         self._job_cond.acquire()
         try:
             # Now we can offically check if someone already placed this
             # jobs information into the known job set (if it's already
             # existing then just leave it alone).
             if path not in self._known_jobs:
                 job = ZookeeperJob(job_data['name'], self,
                                    self._client, self._persistence, path,
                                    uuid=job_data['uuid'],
                                    book_data=job_data.get("book"),
                                    details=job_data.get("details", {}),
                                    created_on=created_on)
                 self._known_jobs[path] = job
                 self._job_cond.notify_all()
         finally:
             self._job_cond.release()
     if job is not None:
         self._emit(jobboard.POSTED, details={'job': job})
Пример #34
0
 def _process_child(self, path, request):
     """Receives the result of a child data fetch request."""
     job = None
     try:
         raw_data, node_stat = request.get()
         job_data = misc.decode_json(raw_data)
         created_on = misc.millis_to_datetime(node_stat.ctime)
     except (ValueError, TypeError, KeyError):
         LOG.warn("Incorrectly formatted job data found at path: %s",
                  path, exc_info=True)
     except self._client.handler.timeout_exception:
         LOG.warn("Connection timed out fetching job data from path: %s",
                  path, exc_info=True)
     except k_exceptions.SessionExpiredError:
         LOG.warn("Session expired fetching job data from path: %s", path,
                  exc_info=True)
     except k_exceptions.NoNodeError:
         LOG.debug("No job node found at path: %s, it must have"
                   " disappeared or was removed", path)
     except k_exceptions.KazooException:
         LOG.warn("Internal error fetching job data from path: %s",
                  path, exc_info=True)
     else:
         self._job_cond.acquire()
         try:
             # Now we can offically check if someone already placed this
             # jobs information into the known job set (if it's already
             # existing then just leave it alone).
             if path not in self._known_jobs:
                 job = ZookeeperJob(job_data['name'], self,
                                    self._client, self._persistence, path,
                                    uuid=job_data['uuid'],
                                    book_data=job_data.get("book"),
                                    details=job_data.get("details", {}),
                                    created_on=created_on)
                 self._known_jobs[path] = job
                 self._job_cond.notify_all()
         finally:
             self._job_cond.release()
     if job is not None:
         self._emit(jobboard.POSTED, details={'job': job})
Пример #35
0
 def _get_logbook(self, book_uuid):
     book_path = os.path.join(self._book_path, book_uuid)
     meta_path = os.path.join(book_path, "metadata")
     try:
         meta = misc.decode_json(self._read_from(meta_path))
     except EnvironmentError as e:
         if e.errno == errno.ENOENT:
             raise exc.NotFound("No logbook found with id: %s" % book_uuid)
         else:
             raise
     lb = logbook.LogBook.from_dict(meta, unmarshal_time=True)
     fd_path = os.path.join(book_path, "flows")
     fd_uuids = []
     try:
         fd_uuids = [f for f in os.listdir(fd_path) if os.path.islink(os.path.join(fd_path, f))]
     except EnvironmentError as e:
         if e.errno != errno.ENOENT:
             raise
     for fd_uuid in fd_uuids:
         lb.add(self._get_flow_details(fd_uuid))
     return lb
Пример #36
0
 def _get_logbook(self, book_uuid):
     book_path = os.path.join(self._book_path, book_uuid)
     meta_path = os.path.join(book_path, 'metadata')
     try:
         meta = misc.decode_json(self._read_from(meta_path))
     except EnvironmentError as e:
         if e.errno == errno.ENOENT:
             raise exc.NotFound("No logbook found with id: %s" % book_uuid)
         else:
             raise
     lb = p_utils.unformat_logbook(book_uuid, meta)
     fd_path = os.path.join(book_path, 'flows')
     fd_uuids = []
     try:
         fd_uuids = [f for f in os.listdir(fd_path)
                     if os.path.islink(os.path.join(fd_path, f))]
     except EnvironmentError as e:
         if e.errno != errno.ENOENT:
             raise
     for fd_uuid in fd_uuids:
         lb.add(self._get_flow_details(fd_uuid))
     return lb
Пример #37
0
    def _update_task_details(self, td, txn, create_missing=False):
        # Determine whether the desired data exists or not.
        td_path = paths.join(self.task_path, td.uuid)
        try:
            td_data, _zstat = self._client.get(td_path)
        except k_exc.NoNodeError:
            # Not-existent: create or raise exception.
            if create_missing:
                txn.create(td_path)
                e_td = logbook.TaskDetail(name=td.name, uuid=td.uuid)
            else:
                raise exc.NotFound("No task details found with id: %s"
                                   % td.uuid)
        else:
            # Existent: read it out.
            e_td = p_utils.unformat_task_detail(td.uuid,
                                                misc.decode_json(td_data))

        # Update and write it back
        e_td = p_utils.task_details_merge(e_td, td)
        td_data = p_utils.format_task_detail(e_td)
        txn.set_data(td_path, misc.binary_encode(jsonutils.dumps(td_data)))
        return e_td
Пример #38
0
    def _update_task_details(self, td, txn, create_missing=False):
        # Determine whether the desired data exists or not.
        td_path = paths.join(self.task_path, td.uuid)
        try:
            td_data, _zstat = self._client.get(td_path)
        except k_exc.NoNodeError:
            # Not-existent: create or raise exception.
            if create_missing:
                txn.create(td_path)
                e_td = logbook.TaskDetail(name=td.name, uuid=td.uuid)
            else:
                raise exc.NotFound("No task details found with id: %s" %
                                   td.uuid)
        else:
            # Existent: read it out.
            e_td = p_utils.unformat_task_detail(td.uuid,
                                                misc.decode_json(td_data))

        # Update and write it back
        e_td = p_utils.task_details_merge(e_td, td)
        td_data = p_utils.format_task_detail(e_td)
        txn.set_data(td_path, misc.binary_encode(jsonutils.dumps(td_data)))
        return e_td
Пример #39
0
 def _get_owner_and_data(self, job):
     lock_data, lock_stat = self._client.get(job.lock_path)
     job_data, job_stat = self._client.get(job.path)
     return (misc.decode_json(lock_data), lock_stat,
             misc.decode_json(job_data), job_stat)
Пример #40
0
 def _get_owner_and_data(self, job):
     job_path, lock_path = _get_paths(self.path, job.uuid)
     lock_data, lock_stat = self._client.get(lock_path)
     job_data, job_stat = self._client.get(job_path)
     return (misc.decode_json(lock_data), lock_stat,
             misc.decode_json(job_data), job_stat)
Пример #41
0
 def _get():
     ad_path = os.path.join(self._atom_path, uuid)
     ad_data = misc.decode_json(self._read_from(ad_path))
     ad_cls = logbook.atom_detail_class(ad_data['type'])
     return ad_cls.from_dict(ad_data['atom'])
Пример #42
0
 def _get_item(self, path):
     with self._exc_wrapper():
         data, _ = self._client.get(path)
     return misc.decode_json(data)
Пример #43
0
 def _get_item(self, path):
     with self._exc_wrapper():
         data, _ = self._client.get(path)
     return misc.decode_json(data)
Пример #44
0
 def _get_owner_and_data(self, job):
     lock_data, lock_stat = self._client.get(job.lock_path)
     job_data, job_stat = self._client.get(job.path)
     return (misc.decode_json(lock_data), lock_stat,
             misc.decode_json(job_data), job_stat)
Пример #45
0
 def _get():
     ad_path = os.path.join(self._atom_path, uuid)
     ad_data = misc.decode_json(self._read_from(ad_path))
     ad_cls = logbook.atom_detail_class(ad_data['type'])
     return ad_cls.from_dict(ad_data['atom'])
Пример #46
0
 def test_it_works_with_unicode(self):
     data = _bytes('{"foo": "фуу"}')
     self.assertEqual(misc.decode_json(data), {"foo": u'фуу'})
Пример #47
0
 def test_it_works(self):
     self.assertEqual(misc.decode_json(_bytes('{"foo": 1}')), {"foo": 1})
Пример #48
0
 def test_it_works(self):
     self.assertEqual(misc.decode_json(_bytes('{"foo": 1}')),
                      {"foo": 1})
Пример #49
0
 def test_it_works_with_unicode(self):
     data = _bytes('{"foo": "фуу"}')
     self.assertEqual(misc.decode_json(data), {"foo": u'фуу'})
Пример #50
0
 def _get_item(self, path):
     with self._path_lock(path):
         item_path = self._join_path(path, 'metadata')
         return misc.decode_json(self._read_from(item_path))
Пример #51
0
 def _get():
     td_path = os.path.join(self._task_path, uuid)
     td_data = misc.decode_json(self._read_from(td_path))
     return p_utils.unformat_task_detail(uuid, td_data)
Пример #52
0
 def _get_item(self, path):
     with self._path_lock(path):
         item_path = self._join_path(path, 'metadata')
         return misc.decode_json(self._read_from(item_path))
Пример #53
0
 def _get_owner_and_data(self, job):
     job_path, lock_path = _get_paths(self.path, job.uuid)
     lock_data, lock_stat = self._client.get(lock_path)
     job_data, job_stat = self._client.get(job_path)
     return (misc.decode_json(lock_data), lock_stat,
             misc.decode_json(job_data), job_stat)