def _publish(self, payload): """Publishes a payload to the passed exchange. If it encounters a failure, will store the payload for later. :param Payload payload: The payload to send. """ LOG.debug( _("Sending message to %(name)s [%(topic)s]") % { 'name': self._exchange_name, 'topic': payload.topic }) # First check, are we closing? if self._closing: LOG.warning(_LW("Cannot send message, publisher is closing.")) if payload not in self._pending: self._pending.append(payload) return # Second check, are we open? if not self._open: LOG.debug(_("Cannot send message, publisher is connecting.")) if payload not in self._pending: self._pending.append(payload) self._reconnect() return # Third check, are we in a sane state? This should never happen, # but just in case... if not self._connection or not self._channel: LOG.error( _LE("Cannot send message, publisher is " "an unexpected state.")) if payload not in self._pending: self._pending.append(payload) self._reconnect() return # Try to send a message. If we fail, schedule a reconnect and store # the message. try: self._channel.basic_publish( self._exchange_name, payload.topic, json.dumps(payload.payload, ensure_ascii=False), self._properties) if payload in self._pending: self._pending.remove(payload) return True except ConnectionClosed as cc: LOG.warning(_LW("Attempted to send message on closed connection.")) LOG.debug(cc) self._open = False if payload not in self._pending: self._pending.append(payload) self._reconnect() return False
def _publish(self, payload): """Publishes a payload to the passed exchange. If it encounters a failure, will store the payload for later. :param Payload payload: The payload to send. """ LOG.debug(_("Sending message to %(name)s [%(topic)s]") % {'name': self._exchange_name, 'topic': payload.topic}) # First check, are we closing? if self._closing: LOG.warning(_LW("Cannot send message, publisher is closing.")) if payload not in self._pending: self._pending.append(payload) return # Second check, are we open? if not self._open: LOG.debug(_("Cannot send message, publisher is connecting.")) if payload not in self._pending: self._pending.append(payload) self._reconnect() return # Third check, are we in a sane state? This should never happen, # but just in case... if not self._connection or not self._channel: LOG.error(_LE("Cannot send message, publisher is " "an unexpected state.")) if payload not in self._pending: self._pending.append(payload) self._reconnect() return # Try to send a message. If we fail, schedule a reconnect and store # the message. try: self._channel.basic_publish(self._exchange_name, payload.topic, json.dumps(payload.payload, ensure_ascii=False), self._properties) if payload in self._pending: self._pending.remove(payload) return True except ConnectionClosed as cc: LOG.warning(_LW("Attempted to send message on closed connection.")) LOG.debug(cc) self._open = False if payload not in self._pending: self._pending.append(payload) self._reconnect() return False
def get(self): """Get a single message from the queue. If the subscriber is currently waiting to reconnect, it will return None. Note that you must manually ack the message after it has been successfully processed. :rtype: (None, None, None)|(spec.Basic.Get, spec.Basic.Properties, str or unicode) """ # Sanity check one, are we closing? if self._closing: return None, None, None # Sanity check two, are we open, or reconnecting? if not self._open: return None, None, None try: return self._channel.basic_get(queue=self._queue_name, no_ack=False) except ConnectionClosed as cc: LOG.warning(_LW("Attempted to get message on closed connection.")) LOG.debug(cc) self._open = False self._reconnect() return None, None, None
def _health_check(self): processes = list(self._procs) dead_processes = 0 for process in processes: if not process.is_alive(): LOG.warning(_LW("Dead Process found [exit code:%d]") % (process.exitcode,)) dead_processes += 1 self._procs.remove(process) for i in range(dead_processes): self._add_process()
def _health_check(self): processes = list(self._procs) dead_processes = 0 for process in processes: if not process.is_alive(): LOG.warning( _LW("Dead Process found [exit code:%d]") % (process.exitcode, )) dead_processes += 1 self._procs.remove(process) for i in range(dead_processes): self._add_process()
def _get_project(project, session): validator = NameType() name = six.text_type(project['project']) if 'description' in project: description = six.text_type(project['description']) else: description = '' try: validator.validate(name) except Exception: # Skipping invalid project names LOG.warn(_LW("Project %s was not loaded. Validation failed.") % [name, ]) return None db_project = session.query(Project) \ .filter_by(name=name).first() if not db_project: db_project = Project() db_project.name = name db_project.description = description db_project.groups = [] session.add(db_project) session.flush() master_branch = session.query(Branch).\ filter_by(name='master', project_id=db_project.id).first() if not master_branch: master_branch = Branch() master_branch.update(MasterBranchHelper(db_project.id).as_dict()) session.add(master_branch) return db_project
def _get_project(project, session): validator = NameType() name = six.text_type(project['project']) if 'description' in project: description = six.text_type(project['description']) else: description = '' try: validator.validate(name) except Exception: # Skipping invalid project names LOG.warning(_LW("Project %s was not loaded. Validation failed.") % [name, ]) return None db_project = session.query(Project) \ .filter_by(name=name).first() if not db_project: db_project = Project() db_project.name = name db_project.description = description db_project.groups = [] session.add(db_project) session.flush() master_branch = session.query(Branch).\ filter_by(name='master', project_id=db_project.id).first() if not master_branch: master_branch = Branch() master_branch.update(MasterBranchHelper(db_project.id).as_dict()) session.add(master_branch) return db_project