def container_audit(self, path): """ Audits the given container path :param path: the path to a container db """ try: if not path.endswith('.db'): return broker = ContainerBroker(path) if not broker.is_deleted(): info = broker.get_info() self.container_passes += 1 self.logger.debug(_('Audit passed for %s'), broker.db_file) except (Exception, Timeout): self.container_failures += 1 self.logger.exception(_('ERROR Could not get container info %s'), (broker.db_file))
def container_audit(self, path): """ Audits the given container path :param path: the path to a container db """ start_time = time.time() try: broker = ContainerBroker(path) if not broker.is_deleted(): broker.get_info() self.logger.increment('passes') self.container_passes += 1 self.logger.debug(_('Audit passed for %s'), broker.db_file) except (Exception, Timeout): self.logger.increment('failures') self.container_failures += 1 self.logger.exception(_('ERROR Could not get container info %s'), broker.db_file) self.logger.timing_since('timing', start_time)
def container_audit(self, path): """ Audits the given container path :param path: the path to a container db """ start_time = time.time() try: if not path.endswith(".db"): return broker = ContainerBroker(path) if not broker.is_deleted(): info = broker.get_info() self.logger.increment("passes") self.container_passes += 1 self.logger.debug(_("Audit passed for %s"), broker.db_file) except (Exception, Timeout): self.logger.increment("failures") self.container_failures += 1 self.logger.exception(_("ERROR Could not get container info %s"), (broker.db_file)) self.logger.timing_since("timing", start_time)
def get_data(self, db_path): """ Data for generated csv has the following columns: Account Hash, Container Name, Object Count, Bytes Used This will just collect whether or not the metadata is set using a 1 or ''. :raises sqlite3.Error: does not catch errors connecting to db """ line_data = None broker = ContainerBroker(db_path) if not broker.is_deleted(): info = broker.get_info(include_metadata=bool(self.metadata_keys)) encoded_container_name = urllib.quote(info['container']) line_data = '"%s","%s",%d,%d' % ( info['account'], encoded_container_name, info['object_count'], info['bytes_used']) if self.metadata_keys: metadata_results = ','.join( [info['metadata'].get(mkey) and '1' or '' for mkey in self.metadata_keys]) line_data += ',%s' % metadata_results line_data += '\n' return line_data
def container_sync(self, path): """ Checks the given path for a container database, determines if syncing is turned on for that database and, if so, sends any updates to the other container. :param path: the path to a container db """ broker = None try: broker = ContainerBroker(path) info = broker.get_info() x, nodes = self.container_ring.get_nodes(info['account'], info['container']) for ordinal, node in enumerate(nodes): if node['ip'] in self._myips and node['port'] == self._myport: break else: return if not broker.is_deleted(): sync_to = None sync_key = None sync_point1 = info['x_container_sync_point1'] sync_point2 = info['x_container_sync_point2'] for key, (value, timestamp) in broker.metadata.iteritems(): if key.lower() == 'x-container-sync-to': sync_to = value elif key.lower() == 'x-container-sync-key': sync_key = value if not sync_to or not sync_key: self.container_skips += 1 self.logger.increment('skips') return sync_to = sync_to.rstrip('/') err = validate_sync_to(sync_to, self.allowed_sync_hosts) if err: self.logger.info( _('ERROR %(db_file)s: %(validate_sync_to_err)s'), {'db_file': broker.db_file, 'validate_sync_to_err': err}) self.container_failures += 1 self.logger.increment('failures') return stop_at = time() + self.container_time next_sync_point = None while time() < stop_at and sync_point2 < sync_point1: rows = broker.get_items_since(sync_point2, 1) if not rows: break row = rows[0] if row['ROWID'] > sync_point1: break key = hash_path(info['account'], info['container'], row['name'], raw_digest=True) # This node will only initially sync out one third of the # objects (if 3 replicas, 1/4 if 4, etc.) and will skip # problematic rows as needed in case of faults. # This section will attempt to sync previously skipped # rows in case the previous attempts by any of the nodes # didn't succeed. if not self.container_sync_row(row, sync_to, sync_key, broker, info): if not next_sync_point: next_sync_point = sync_point2 sync_point2 = row['ROWID'] broker.set_x_container_sync_points(None, sync_point2) if next_sync_point: broker.set_x_container_sync_points(None, next_sync_point) while time() < stop_at: rows = broker.get_items_since(sync_point1, 1) if not rows: break row = rows[0] key = hash_path(info['account'], info['container'], row['name'], raw_digest=True) # This node will only initially sync out one third of the # objects (if 3 replicas, 1/4 if 4, etc.). It'll come back # around to the section above and attempt to sync # previously skipped rows in case the other nodes didn't # succeed or in case it failed to do so the first time. if unpack_from('>I', key)[0] % \ len(nodes) == ordinal: self.container_sync_row(row, sync_to, sync_key, broker, info) sync_point1 = row['ROWID'] broker.set_x_container_sync_points(sync_point1, None) self.container_syncs += 1 self.logger.increment('syncs') except (Exception, Timeout) as err: self.container_failures += 1 self.logger.increment('failures') self.logger.exception(_('ERROR Syncing %s'), broker.db_file if broker else path)
def container_sync(self, path): """ Checks the given path for a container database, determines if syncing is turned on for that database and, if so, sends any updates to the other container. :param path: the path to a container db """ try: if not path.endswith(".db"): return broker = ContainerBroker(path) info = broker.get_info() x, nodes = self.container_ring.get_nodes(info["account"], info["container"]) for ordinal, node in enumerate(nodes): if node["ip"] in self._myips and node["port"] == self._myport: break else: return if not broker.is_deleted(): sync_to = None sync_key = None sync_point1 = info["x_container_sync_point1"] sync_point2 = info["x_container_sync_point2"] for key, (value, timestamp) in broker.metadata.iteritems(): if key.lower() == "x-container-sync-to": sync_to = value elif key.lower() == "x-container-sync-key": sync_key = value if not sync_to or not sync_key: self.container_skips += 1 return sync_to = sync_to.rstrip("/") err = validate_sync_to(sync_to, self.allowed_sync_hosts) if err: self.logger.info( _("ERROR %(db_file)s: %(validate_sync_to_err)s"), {"db_file": broker.db_file, "validate_sync_to_err": err}, ) self.container_failures += 1 return stop_at = time() + self.container_time while time() < stop_at and sync_point2 < sync_point1: rows = broker.get_items_since(sync_point2, 1) if not rows: break row = rows[0] if row["ROWID"] >= sync_point1: break key = hash_path(info["account"], info["container"], row["name"], raw_digest=True) # This node will only intially sync out one third of the # objects (if 3 replicas, 1/4 if 4, etc.). This section # will attempt to sync previously skipped rows in case the # other nodes didn't succeed. if unpack_from(">I", key)[0] % len(nodes) != ordinal: if not self.container_sync_row(row, sync_to, sync_key, broker, info): return sync_point2 = row["ROWID"] broker.set_x_container_sync_points(None, sync_point2) while time() < stop_at: rows = broker.get_items_since(sync_point1, 1) if not rows: break row = rows[0] key = hash_path(info["account"], info["container"], row["name"], raw_digest=True) # This node will only intially sync out one third of the # objects (if 3 replicas, 1/4 if 4, etc.). It'll come back # around to the section above and attempt to sync # previously skipped rows in case the other nodes didn't # succeed. if unpack_from(">I", key)[0] % len(nodes) == ordinal: if not self.container_sync_row(row, sync_to, sync_key, broker, info): return sync_point1 = row["ROWID"] broker.set_x_container_sync_points(sync_point1, None) self.container_syncs += 1 except (Exception, Timeout), err: self.container_failures += 1 self.logger.exception(_("ERROR Syncing %s"), (broker.db_file))