def get_key(self, table, key, topic=None): path = self._generate_path(table, key) try: self._lazy_initialize() ret = self.client.get(path)[0] return ret except kazoo.exceptions.NoNodeError: raise df_exceptions.DBKeyNotFound(key=key)
def get_key(self, table, key, topic=None): if topic: local_key = self._uuid_to_key(table, key, topic) else: local_key = self._find_key_without_topic(table, key) if local_key is None: raise df_exceptions.DBKeyNotFound(key=key) try: res = self._execute_cmd("GET", local_key) if res is not None: return res except Exception: LOG.exception("exception when get_key: %(key)s", {'key': local_key}) raise df_exceptions.DBKeyNotFound(key=key)
def get_key(self, table, key, topic=None): try: rows = self.session.execute("SELECT value FROM %(table)s WHERE " "key='%(key)s';" % {'table': table, 'key': key}) return rows[0]['value'] except Exception: raise df_exceptions.DBKeyNotFound(key=key)
def get_all_keys(self, table, topic=None): res = [] try: rows = self.session.execute("SELECT key FROM %s;" % table) except Exception: raise df_exceptions.DBKeyNotFound(key=table) for entry in rows: res.append(entry['key']) return res
def delete_key(self, table, key, topic=None): try: self.session.execute("DELETE FROM %(table)s WHERE " "key='%(key)s';" % { 'table': table, 'key': key }) except Exception: raise df_exceptions.DBKeyNotFound(key=key)
def get_key(self, table, key, topic=None): with self._get_conn() as conn: try: res = self._query_key(table, key).run(conn) except rdb.errors.ReqlOpFailedError: res = None if res is None: raise exceptions.DBKeyNotFound(key=key) return res['value']
def get_all_keys(self, table, topic=None): res = [] try: directory = self.client.get("/" + table) except etcd.EtcdKeyNotFound: raise df_exceptions.DBKeyNotFound(key=table) for entry in directory.children: table_name_size = len(table) + 2 res.append(entry.key[table_name_size:]) return res
def delete_key(self, table, key, topic=None): local_topic = topic local_key = self.uuid_to_key(table, key, local_topic) try: client = self._get_client(local_key) return client.delete(local_key) except Exception as e: LOG.exception(_LE("exception %(key)s: %(e)s") % {'key': local_key, 'e': e}) raise df_exceptions.DBKeyNotFound(key=local_key)
def set_key(self, table, key, value, topic=None): # FIXME cannot marshall None values with self._get_conn() as conn: res = self._query_key(table, key).update({ 'id': key, 'value': value, }).run(conn) if res['skipped'] == 1: raise exceptions.DBKeyNotFound(key=key)
def get_all_entries(self, table, topic=None): res = [] path = self._generate_path(table, None) try: self._lazy_initialize() directory = self.client.get_children(path) for key in directory: res.append(self.get_key(table, key)) except kazoo.exceptions.NoNodeError: raise df_exceptions.DBKeyNotFound(key=table) return res
def get_all_keys(self, table, topic=None): if topic is None: res = [] local_key = self.uuid_to_key(table, '*', '*') try: for client in six.itervalues(self.clients): res.extend(client.keys(local_key)) return res except Exception as e: LOG.exception(_LE("exception %(key)s: %(e)s") % {'key': local_key, 'e': e}) raise df_exceptions.DBKeyNotFound(key=local_key) else: local_key = self.uuid_to_key(table, '*', topic) try: client = self._get_client(local_key) return client.keys(local_key) except Exception as e: LOG.exception(_LE("exception %(key)s: %(e)s") % {'key': local_key, 'e': e}) raise df_exceptions.DBKeyNotFound(key=local_key)
def set_key(self, table, key, value, topic=None): local_key = self.uuid_to_key(table, key, topic) try: client = self._get_client(local_key) res = client.set(local_key, value) if not res: client.delete(local_key) return res except Exception as e: LOG.exception(_LE("exception %(key)s: %(e)s") % {'key': local_key, 'e': e}) raise df_exceptions.DBKeyNotFound(key=local_key)
def get_key(self, table, key, topic=None): if topic is None: local_key = self.uuid_to_key(table, key, '*') try: for client in six.itervalues(self.clients): local_keys = client.keys(local_key) if len(local_keys) == 1: return client.get(local_keys[0]) except Exception as e: LOG.exception(_LE("exception %(key)s: %(e)s") % {'key': local_key, 'e': e}) raise df_exceptions.DBKeyNotFound(key=local_key) else: local_key = self.uuid_to_key(table, key, topic) try: client = self._get_client(local_key) # return nil if not found return client.get(local_key) except Exception as e: LOG.exception(_LE("exception %(key)s: %(e)s") % {'key': local_key, 'e': e}) raise df_exceptions.DBKeyNotFound(key=local_key)
def _get_key_notopic(self, table, key): result = [] def add_key(k, v): result.append(v) self._bulk_operation(table, None, 'GET', key_pattern=key, entry_cb=add_key) n_keys = len(result) if n_keys != 1: LOG.error('Found %d entries with key "%s"', n_keys, key) raise df_exceptions.DBKeyNotFound(key=key) return result[0]
def _scan(self, table, key=None, topic=None): (pattern, nodes) = self._query_info(table, topic, key) keys = set() for node in nodes: retry = 0 while retry < self.RETRY_COUNT: LOG.debug('Getting all keys with pattern %s retry %d', pattern, retry) try: node_keys = self._get_all_keys_from_node(node, pattern) keys.update(node_keys) break except exceptions.RedisError: LOG.exception('Error getting keys from node %s:%s', node.ip, node.port) retry += 1 self._cluster.populate_cluster() if retry == self.RETRY_COUNT: raise df_exceptions.DBKeyNotFound('ALL KEYS') return keys
def _bulk_operation(self, table, topic, command, args=(), key_pattern=None, entry_cb=None, stop_on_fail=False): def is_error(value): return isinstance(value, exceptions.RedisError) (pattern, nodes) = self._query_info(table, topic, key_pattern) success = True batch_key_amount = self.BATCH_KEY_AMOUNT LOG.debug('Performing bulk operation "%s" on table %s topic %s', command, table, topic or 'None') for node in nodes: node_failed_keys = set() retry = 0 while retry < self.RETRY_COUNT: try: node_keys = list( self._get_all_keys_from_node(node, pattern)) break except exceptions.RedisError: LOG.exception('Error get keys from node %s:%s retry %d', node.ip, node.port, retry) retry += 1 LOG.debug('Node %s:%s has %d keys for table %s topic %s', node.ip, node.port, len(node_keys), table, topic or 'None') if retry == self.RETRY_COUNT: raise df_exceptions.DBKeyNotFound('ALL KEYS') bulk_begin = 0 bulk_end = batch_key_amount while bulk_begin < len(node_keys): LOG.debug('Working on chunk %d:%d', bulk_begin, bulk_end) result = self._bulk_execute(node, node_keys[bulk_begin:bulk_end], command, args) if result is False: LOG.error('Error executing bulk operation on node %s:%s', node.ip, node.port) if stop_on_fail: return False else: continue for (k, v) in result: if is_error(v): LOG.warning('Bulk operation error node %s:%s key "%s"', node.ip, node.port, k) if stop_on_fail: return False node_failed_keys.update(k) elif v is not None and callable(entry_cb): entry_cb(k, v) bulk_begin += batch_key_amount bulk_end += batch_key_amount for key in node_failed_keys: try: value = self._key_command(command, key, args) except Exception: LOG.warning('Failed to process key "%s" from node %s:%s', key, node.ip, node.port) if stop_on_fail: return False success = False else: if callable(entry_cb): entry_cb(key, value) return success
def _get_key_topic(self, table, key, topic): real_key = self._key_name(table, topic, key) value = self._key_command('GET', real_key) if value is None: raise df_exceptions.DBKeyNotFound(key=key) return value
def delete_key(self, table, key, topic=None): try: self.client.delete('/' + table + '/' + key) except etcd.EtcdKeyNotFound: raise df_exceptions.DBKeyNotFound(key=key)
def test_get_nonexistent(self): self.api_nb.driver.get_key.side_effect = exceptions.DBKeyNotFound() self.assertIsNone(self.api_nb.get(ModelTest(id='id1')))
def _get_key(self, table_key, key): value = self.client.get(table_key) if len(value) > 0: return value.pop() raise df_exceptions.DBKeyNotFound(key=key)
def get_key(self, table, key, topic=None): try: table_dict = self._db[table] return table_dict[key] except KeyError: raise df_exceptions.DBKeyNotFound(key=key)
def test_delete_nonexistent(self): m = ModelTest(id='id1', topic='topic') self.api_nb.driver.delete_key.side_effect = exceptions.DBKeyNotFound() self.assertRaises(exceptions.DBKeyNotFound, self.api_nb.delete, m)
def get_key(self, table, key, topic=None): try: return self.client.read('/' + table + '/' + key).value except etcd.EtcdKeyNotFound: raise df_exceptions.DBKeyNotFound(key=key)
def get_key(self, table, key, topic=None): value = self.client.get(self._make_key(table, key)) if len(value) > 0: return value.pop() raise df_exceptions.DBKeyNotFound(key=key)
def delete_key(self, table, key, topic=None): deleted = self.client.delete(self._make_key(table, key)) if not deleted: raise df_exceptions.DBKeyNotFound(key=key)
def delete_key(self, table, key, topic=None): with self._get_conn() as conn: res = self._query_key(table, key).delete().run(conn) if res['skipped'] == 1: raise exceptions.DBKeyNotFound(key=key)