def load_schema(self): """ Loads the schema definition for this column family from Cassandra and updates comparator and validation classes if neccessary. """ ksdef = self.pool.execute("get_keyspace_description", use_dict_for_col_metadata=True) try: self._cfdef = ksdef[self.column_family] except KeyError: nfe = NotFoundException() nfe.why = "Column family %s not found." % self.column_family raise nfe self.super = self._cfdef.column_type == "Super" self._load_comparator_classes() self._load_validation_classes() self._load_key_class()
def load_schema(self): """ Loads the schema definition for this column family from Cassandra and updates comparator and validation classes if neccessary. """ ksdef = self.pool.execute('get_keyspace_description', use_dict_for_col_metadata=True) try: self._cfdef = ksdef[self.column_family] except KeyError: nfe = NotFoundException() nfe.why = 'Column family %s not found.' % self.column_family raise nfe self.super = self._cfdef.column_type == 'Super' self._load_comparator_classes() self._load_validation_classes() self._load_key_class()
def load_schema(self): """ Loads the schema definition for this column family from Cassandra and updates comparator and validation classes if neccessary. """ try: try: self._obtain_connection() ksdef = self._tlocal.client.get_keyspace_description(use_dict_for_col_metadata=True) self._cfdef = ksdef[self.column_family] except KeyError: nfe = NotFoundException() nfe.why = 'Column family %s not found.' % self.column_family raise nfe finally: self._release_connection() self.super = self._cfdef.column_type == 'Super' self._load_comparator_classes() self._load_validation_classes() self._load_key_class()
def load_schema(self): """ Loads the schema definition for this column family from Cassandra and updates comparator and validation classes if neccessary. """ try: try: self._obtain_connection() ksdef = self._tlocal.client.get_keyspace_description(use_dict_for_col_metadata=True) self._cfdef = ksdef[self.column_family] except KeyError: nfe = NotFoundException() nfe.why = 'Column family %s not found.' % self.column_family raise nfe finally: self._release_connection() self.super = self._cfdef.column_type == 'Super' self._load_comparator_classes() self._load_validation_classes() self._load_key_class()
def __init__(self, pool, column_family, buffer_size=1024, read_consistency_level=ConsistencyLevel.ONE, write_consistency_level=ConsistencyLevel.ONE, timestamp=gm_timestamp, super=False, dict_class=util.OrderedDict, autopack_names=True, autopack_values=True): """ An abstraction of a Cassandra column family or super column family. Operations on this, such as :meth:`get` or :meth:`insert` will get data from or insert data into the corresponding Cassandra column family with name `column_family`. `pool` is a :class:`~pycassa.pool.ConnectionPool` that the column family will use for all operations. A connection is drawn from the pool before each operations and is returned afterwards. Note that the keyspace to be used is determined by the pool. When calling :meth:`get_range()` or :meth:`get_indexed_slices()`, the intermediate results need to be buffered if we are fetching many rows, otherwise the Cassandra server will overallocate memory and fail. `buffer_size` is the size of that buffer in number of rows. The default is 1024. `read_consistency_level` and `write_consistency_level` set the default consistency levels for every operation; these may be overridden per-operation. These should be instances of :class:`~pycassa.cassandra.ttypes.ConsistencyLevel`. These default to level ``ONE``. Each :meth:`insert()` or :meth:`remove` sends a timestamp with every column. The `timestamp` parameter is a function that is used to get this timestamp when needed. The default function is :meth:`gm_timestamp()`. Results are returned as dictionaries. :class:`~pycassa.util.OrderedDict` is used by default so that order is maintained. A different class, such as :class:`dict` may be used instead by passing `dict_class`. By default, column family definitions will be examined to determine what data type Cassandra expects for column names and values. When columns are retrieved or inserted, their names and values will be packed or unpacked if necessary to convert them to or from their binary representation. Automatic packing of names and values can be individually enabled or disabled with `autopack_names` and `autopack_values`. When using :class:`~pycassa.columnfamilymap.ColumnFamilyMap`, these should both be set to ``False``. """ self.pool = pool self._tlocal = threading.local() self._tlocal.client = None self.column_family = column_family self.buffer_size = buffer_size self.read_consistency_level = read_consistency_level self.write_consistency_level = write_consistency_level self.timestamp = timestamp self.dict_class = dict_class self.autopack_names = autopack_names self.autopack_values = autopack_values # Determine the ColumnFamily type to allow for auto conversion # so that packing/unpacking doesn't need to be done manually self.cf_data_type = None self.col_name_data_type = None self.supercol_name_data_type = None self.col_type_dict = dict() col_fam = None try: try: self._obtain_connection() col_fam = self._tlocal.client.get_keyspace_description(use_dict_for_col_metadata=True)[self.column_family] except KeyError: nfe = NotFoundException() nfe.why = 'Column family %s not found.' % self.column_family raise nfe finally: self._release_connection() if col_fam is not None: self.super = col_fam.column_type == 'Super' if self.autopack_names: if not self.super: self.col_name_data_type = col_fam.comparator_type else: self.col_name_data_type = col_fam.subcomparator_type self.supercol_name_data_type = util.extract_type_name(col_fam.comparator_type) index = self.col_name_data_type = util.extract_type_name(self.col_name_data_type) if self.autopack_values: self.cf_data_type = util.extract_type_name(col_fam.default_validation_class) for name, cdef in col_fam.column_metadata.items(): self.col_type_dict[name] = util.extract_type_name(cdef.validation_class)
def __init__(self, pool, column_family, buffer_size=1024, read_consistency_level=ConsistencyLevel.ONE, write_consistency_level=ConsistencyLevel.ONE, timestamp=gm_timestamp, super=False, dict_class=OrderedDict, autopack_names=True, autopack_values=True): """ Constructs an abstraction of a Cassandra column family or super column family. Operations on this, such as :meth:`get` or :meth:`insert` will get data from or insert data into the corresponding Cassandra column family. :param pool: A connection pool to a Cassandra cluster :type client: :class:`~pycassa.pool.AbstractPool` :param column_family: The name of the column family :type column_family: string :param buffer_size: When calling :meth:`get_range()` or :meth:`get_indexed_slices()`, the intermediate results need to be buffered if we are fetching many rows, otherwise the Cassandra server will overallocate memory and fail. This is the size of that buffer in number of rows. :type buffer_size: int :param read_consistency_level: Affects the guaranteed replication factor before returning from any read operation :type read_consistency_level: :class:`~pycassa.cassandra.ttypes.ConsistencyLevel` :param write_consistency_level: Affects the guaranteed replication factor before returning from any write operation :type write_consistency_level: :class:`~pycassa.cassandra.ttypes.ConsistencyLevel` :param timestamp: The default timestamp function returns ``int(time.mktime(time.gmtime()))``, the number of seconds since Unix epoch in GMT. Set this to replace the default timestamp function with your own. :type timestamp: function :param dict_class: The default dict_class is :class:`~pycassa.util.OrderedDict`. All returned rows and subcolumns are instances of this. :type dict_class: class :param autopack_names: Whether column and supercolumn names should be packed automatically based on the comparator and subcomparator for the column family. This does not typically work when used with :class:`~pycassa.columnfamilymap.ColumnFamilyMap`. :type autopack_names: bool :param autopack_values: Whether column values should be packed automatically based on the validator_class for a given column. This should probably be set to ``False`` when used with a :class:`~pycassa.columnfamilymap.ColumnFamilyMap`. :type autopack_values: bool :param super: Whether this column family has super columns. This is detected automatically since 0.5.1. .. deprecated:: 0.5.1 :type super: bool """ self.pool = pool self.client = None self.column_family = column_family self.buffer_size = buffer_size self.read_consistency_level = read_consistency_level self.write_consistency_level = write_consistency_level self.timestamp = timestamp self.dict_class = dict_class self.autopack_names = autopack_names self.autopack_values = autopack_values # Determine the ColumnFamily type to allow for auto conversion # so that packing/unpacking doesn't need to be done manually self.cf_data_type = None self.col_name_data_type = None self.supercol_name_data_type = None self.col_type_dict = dict() col_fam = None try: self.client = self.pool.get() col_fam = self.client.get_keyspace_description(use_dict_for_col_metadata=True)[self.column_family] except KeyError: nfe = NotFoundException() nfe.why = 'Column family %s not found.' % self.column_family raise nfe finally: self.client.return_to_pool() if col_fam is not None: self.super = col_fam.column_type == 'Super' if self.autopack_names: if not self.super: self.col_name_data_type = col_fam.comparator_type else: self.col_name_data_type = col_fam.subcomparator_type self.supercol_name_data_type = self._extract_type_name(col_fam.comparator_type) index = self.col_name_data_type = self._extract_type_name(self.col_name_data_type) if self.autopack_values: self.cf_data_type = self._extract_type_name(col_fam.default_validation_class) for name, cdef in col_fam.column_metadata.items(): self.col_type_dict[name] = self._extract_type_name(cdef.validation_class)
def __init__(self, pool, column_family, buffer_size=1024, read_consistency_level=ConsistencyLevel.ONE, write_consistency_level=ConsistencyLevel.ONE, timestamp=gm_timestamp, super=False, dict_class=util.OrderedDict, autopack_names=True, autopack_values=True): """ An abstraction of a Cassandra column family or super column family. Operations on this, such as :meth:`get` or :meth:`insert` will get data from or insert data into the corresponding Cassandra column family with name `column_family`. `pool` is a :class:`~pycassa.pool.ConnectionPool` that the column family will use for all operations. A connection is drawn from the pool before each operations and is returned afterwards. Note that the keyspace to be used is determined by the pool. When calling :meth:`get_range()` or :meth:`get_indexed_slices()`, the intermediate results need to be buffered if we are fetching many rows, otherwise the Cassandra server will overallocate memory and fail. `buffer_size` is the size of that buffer in number of rows. The default is 1024. `read_consistency_level` and `write_consistency_level` set the default consistency levels for every operation; these may be overridden per-operation. These should be instances of :class:`~pycassa.cassandra.ttypes.ConsistencyLevel`. These default to level ``ONE``. Each :meth:`insert()` or :meth:`remove` sends a timestamp with every column. The `timestamp` parameter is a function that is used to get this timestamp when needed. The default function is :meth:`gm_timestamp()`. Results are returned as dictionaries. :class:`~pycassa.util.OrderedDict` is used by default so that order is maintained. A different class, such as :class:`dict` may be used instead by passing `dict_class`. By default, column family definitions will be examined to determine what data type Cassandra expects for column names and values. When columns are retrieved or inserted, their names and values will be packed or unpacked if necessary to convert them to or from their binary representation. Automatic packing of names and values can be individually enabled or disabled with `autopack_names` and `autopack_values`. When using :class:`~pycassa.columnfamilymap.ColumnFamilyMap`, these should both be set to ``False``. """ self.pool = pool self._tlocal = threading.local() self._tlocal.client = None self.column_family = column_family self.buffer_size = buffer_size self.read_consistency_level = read_consistency_level self.write_consistency_level = write_consistency_level self.timestamp = timestamp self.dict_class = dict_class self.autopack_names = autopack_names self.autopack_values = autopack_values # Determine the ColumnFamily type to allow for auto conversion # so that packing/unpacking doesn't need to be done manually self.cf_data_type = None self.col_name_data_type = None self.supercol_name_data_type = None self.col_type_dict = dict() col_fam = None try: try: self._obtain_connection() col_fam = self._tlocal.client.get_keyspace_description( use_dict_for_col_metadata=True)[self.column_family] except KeyError: nfe = NotFoundException() nfe.why = 'Column family %s not found.' % self.column_family raise nfe finally: self._release_connection() if col_fam is not None: self.super = col_fam.column_type == 'Super' if self.autopack_names: if not self.super: self.col_name_data_type = col_fam.comparator_type else: self.col_name_data_type = col_fam.subcomparator_type self.supercol_name_data_type = util.extract_type_name( col_fam.comparator_type) index = self.col_name_data_type = util.extract_type_name( self.col_name_data_type) if self.autopack_values: self.cf_data_type = util.extract_type_name( col_fam.default_validation_class) for name, cdef in col_fam.column_metadata.items(): self.col_type_dict[name] = util.extract_type_name( cdef.validation_class)