Example #1
0
    def test_table_property_update(self):
        ModelWithTableProperties.__bloom_filter_fp_chance__ = 0.66778
        ModelWithTableProperties.__caching__ = CACHING_NONE
        ModelWithTableProperties.__comment__ = 'xirAkRWZVVvsmzRvXamiEcQkshkUIDINVJZgLYSdnGHweiBrAiJdLJkVohdRy'
        ModelWithTableProperties.__default_time_to_live__ = 65178
        ModelWithTableProperties.__gc_grace_seconds__ = 96362
        ModelWithTableProperties.__index_interval__ = 94207
        ModelWithTableProperties.__memtable_flush_period_in_ms__ = 60210
        ModelWithTableProperties.__populate_io_cache_on_flush__ = False
        ModelWithTableProperties.__read_repair_chance__ = 0.2989
        ModelWithTableProperties.__replicate_on_write__ = True
        ModelWithTableProperties.__dclocal_read_repair_chance__ = 0.12732

        sync_table(ModelWithTableProperties)

        table_settings = management.get_table_settings(ModelWithTableProperties).options

        self.assertDictContainsSubset({
            'bloom_filter_fp_chance': 0.66778,
            'caching': CACHING_NONE,
            'comment': 'xirAkRWZVVvsmzRvXamiEcQkshkUIDINVJZgLYSdnGHweiBrAiJdLJkVohdRy',
            'default_time_to_live': 65178,
            'gc_grace_seconds': 96362,
            'index_interval': 94207,
            'memtable_flush_period_in_ms': 60210,
            'populate_io_cache_on_flush': False,
            'read_repair_chance': 0.2989,
            'replicate_on_write': True,

            # TODO see above comment re: native driver missing local read repair chance
            # 'local_read_repair_chance': 0.12732,
        }, table_settings)
    def test_all_size_tiered_options(self):
        class AllSizeTieredOptionsModel(Model):

            __compaction__ = SizeTieredCompactionStrategy
            __compaction_bucket_low__ = .3
            __compaction_bucket_high__ = 2
            __compaction_min_threshold__ = 2
            __compaction_max_threshold__ = 64
            __compaction_tombstone_compaction_interval__ = 86400

            cid = columns.UUID(primary_key=True)
            name = columns.Text()

        drop_table(AllSizeTieredOptionsModel)
        sync_table(AllSizeTieredOptionsModel)

        options = get_table_settings(AllSizeTieredOptionsModel).options['compaction_strategy_options']
        options = json.loads(options)

        expected = {u'min_threshold': u'2',
                    u'bucket_low': u'0.3',
                    u'tombstone_compaction_interval': u'86400',
                    u'bucket_high': u'2',
                    u'max_threshold': u'64'}

        self.assertDictEqual(options, expected)
    def setUpClass(cls):
        super(TestIndexedPolymorphicQuery, cls).setUpClass()
        management.sync_table(IndexedPoly1)
        management.sync_table(IndexedPoly2)

        cls.p1 = IndexedPoly1.create(data1='pickle')
        cls.p2 = IndexedPoly2.create(partition=cls.p1.partition, data2='bacon')
def timeoflastrun(name, set_to_now):
    """
    Allows you to figure out the last time a batch process
    (name given by first argument) was run. Also allows you
    to se the value to now.
    """
    assert type(set_to_now) is bool

    class timeoflastruns(Model):
        name = columns.Text(primary_key=True)
        time = columns.Integer()

    from cqlengine.management import sync_table
    sync_table(timeoflastruns)

    if set_to_now is True:
        epoch_time_seconds = int(time.time())
        timeoflastruns.create(name=name, time=epoch_time_seconds)
        return epoch_time_seconds
    else:
        try:
            return timeoflastruns.objects(name=name).get()['time']
        except:
            print "First time running batch script. Returning the epoch."
            return 0
    def handle_noargs(self, **options):
        db = options.get('database')
        engine = settings.DATABASES.get(db, {}).get('ENGINE', '')

        # Call regular syncdb if engine is different from ours
        if engine != 'django_cassandra_engine':
            return super(Command, self).handle_noargs(**options)

        if django.VERSION < (1, 7):
            self._import_management()

        connection = connections[db]
        connection.connect()
        options = connection.settings_dict.get('OPTIONS', {})
        replication_opts = options.get('replication', {})
        keyspace = connection.settings_dict['NAME']

        self.stdout.write('Creating keyspace %s..' % keyspace)
        create_keyspace(keyspace, **replication_opts)
        for app_name, app_models \
                in connection.introspection.cql_models.iteritems():

            for model in app_models:
                self.stdout.write('Syncing %s.%s' % (app_name, model.__name__))
                sync_table(model, create_missing_keyspace=False)
Example #6
0
    def run(self):

       """Runs DB server and sync models with Cassandra coloumn family."""
       print 'inside db'
       connection.setup(self.database_ip, self.database_name)
       sync_table(products.ProductsDetails)
       print 'synched'
Example #7
0
    def test_sync_table_works_with_primary_keys_only_tables(self):

        # This is "create table":

        sync_table(PrimaryKeysOnlyModel)

        # let's make sure settings persisted correctly:

        assert PrimaryKeysOnlyModel.__compaction__ == LeveledCompactionStrategy
        # blows up with DoesNotExist if table does not exist
        table_settings = management.get_table_settings(PrimaryKeysOnlyModel)
        # let make sure the flag we care about

        assert LeveledCompactionStrategy in table_settings.options['compaction_strategy_class']


        # Now we are "updating" the table:

        # setting up something to change
        PrimaryKeysOnlyModel.__compaction__ = SizeTieredCompactionStrategy

        # primary-keys-only tables do not create entries in system.schema_columns
        # table. Only non-primary keys are added to that table.
        # Our code must deal with that eventuality properly (not crash)
        # on subsequent runs of sync_table (which runs get_fields internally)
        get_fields(PrimaryKeysOnlyModel)
        sync_table(PrimaryKeysOnlyModel)

        table_settings = management.get_table_settings(PrimaryKeysOnlyModel)
        assert SizeTieredCompactionStrategy in table_settings.options['compaction_strategy_class']
Example #8
0
    def test_table_definition(self):
        """ Tests that creating a table with capitalized column names succeedso """
        sync_table(LowercaseKeyModel)
        sync_table(CapitalizedKeyModel)

        drop_table(LowercaseKeyModel)
        drop_table(CapitalizedKeyModel)
    def test_all_size_tiered_options(self):
        class AllSizeTieredOptionsModel(Model):

            __compaction__ = SizeTieredCompactionStrategy
            __compaction_bucket_low__ = 0.3
            __compaction_bucket_high__ = 2
            __compaction_min_threshold__ = 2
            __compaction_max_threshold__ = 64
            __compaction_tombstone_compaction_interval__ = 86400

            cid = columns.UUID(primary_key=True)
            name = columns.Text()

        drop_table(AllSizeTieredOptionsModel)
        sync_table(AllSizeTieredOptionsModel)

        options = get_table_settings(AllSizeTieredOptionsModel).options["compaction_strategy_options"]
        options = json.loads(options)

        expected = {
            u"min_threshold": u"2",
            u"bucket_low": u"0.3",
            u"tombstone_compaction_interval": u"86400",
            u"bucket_high": u"2",
            u"max_threshold": u"64",
        }

        self.assertDictEqual(options, expected)
Example #10
0
 def test_extra_field(self):
     drop_table(self.TestModel)
     sync_table(self.TestModel)
     self.TestModel.create()
     execute("ALTER TABLE {} add blah int".format(
         self.TestModel.column_family_name(include_keyspace=True)))
     self.TestModel.objects().all()
Example #11
0
    def test_sync_table_works_with_primary_keys_only_tables(self):

        # This is "create table":

        sync_table(PrimaryKeysOnlyModel)

        # let's make sure settings persisted correctly:

        assert PrimaryKeysOnlyModel.__compaction__ == LeveledCompactionStrategy
        # blows up with DoesNotExist if table does not exist
        table_settings = management.get_table_settings(PrimaryKeysOnlyModel)
        # let make sure the flag we care about

        assert LeveledCompactionStrategy in table_settings.options[
            'compaction_strategy_class']

        # Now we are "updating" the table:

        # setting up something to change
        PrimaryKeysOnlyModel.__compaction__ = SizeTieredCompactionStrategy

        # primary-keys-only tables do not create entries in system.schema_columns
        # table. Only non-primary keys are added to that table.
        # Our code must deal with that eventuality properly (not crash)
        # on subsequent runs of sync_table (which runs get_fields internally)
        get_fields(PrimaryKeysOnlyModel)
        sync_table(PrimaryKeysOnlyModel)

        table_settings = management.get_table_settings(PrimaryKeysOnlyModel)
        assert SizeTieredCompactionStrategy in table_settings.options[
            'compaction_strategy_class']
def AddToCassandra_allcountsbatch_bypartition(d_iter):  # filter_missing_values=True for RDDs
    # from cassandra.cluster import Cluster
    from cqlengine import columns
    from cqlengine.models import Model
    from cqlengine import connection
    from cqlengine.management import sync_table

    # CASSANDRA_KEYSPACE = "wikipedia_jan_2015"
    CASSANDRA_KEYSPACE = "test"
    connection.setup(
        ["52.89.66.139", "52.89.34.7", "52.89.116.45", "52.89.78.4", "52.89.27.115", "52.89.133.147", "52.89.1.48"],
        CASSANDRA_KEYSPACE,
    )

    class url_ranks_links_23(Model):
        # primary key is url which is dictated by the number of links
        url = columns.Text(primary_key=True)
        ranks = columns.Float()  # this will be stored as a double # this is a primary key to sort on later
        links = columns.List(columns.Text)  # this will be stored as a double

        def __repr__(self):
            return "%s %s" % (self.url, self.ranks)

    sync_table(url_ranks_links_23)
    for d in d_iter:
        url_ranks_links_23.create(**d)
    def sync(self, alias):
        engine = get_engine_from_db_alias(alias)

        if engine != 'django_cassandra_engine':
            raise CommandError('Database {0} is not cassandra!'.format(alias))

        connection = connections[alias]
        connection.connect()
        options = connection.settings_dict.get('OPTIONS', {})
        keyspace = connection.settings_dict['NAME']
        replication_opts = options.get('replication', {})
        strategy_class = replication_opts.pop('strategy_class',
                                              'SimpleStrategy')
        replication_factor = replication_opts.pop('replication_factor', 1)

        self.stdout.write('Creating keyspace {0}..'.format(keyspace))

        create_keyspace(keyspace, strategy_class, replication_factor,
                        **replication_opts)

        for app_name, app_models \
                in connection.introspection.cql_models.iteritems():
            for model in app_models:
                self.stdout.write('Syncing %s.%s' % (app_name, model.__name__))
                sync_table(model)
Example #14
0
    def test_has_missing_single_partition_key_field(self):
        Foo = make_model(table_name='foo_bar', skip={'uuidd', 'uuide', 'uuidf'})
        sync_table(Foo)

        Foo2 = make_model(table_name='foo_bar', skip={'uuidd', 'uuidf'})
        results = verify(Foo2)
        [result.report() for result in results]
        assert len(results) == 1
        result = results[0]

        assert not result.is_missing
        assert not result.extra
        assert not result.different
        assert not result.missing_indexes
        assert not result.extra_indexes
        # Note that 'partition' will be 'missing' too because Foo.partition
        # gets a default 'partition_key=True' when all the explicit
        # partition_keys are skipped.  So, the verify will report a partition
        # key 'partition'.
        # When uuide is not skipped, Foo2.partition is not a partition key.
        # When verifying Foo2 against Foo's schema partition will show up
        # as 'missing' because it is a missing partition_key (not a missing
        # column.)
        assert len(result.missing) == 2
        assert 'uuide' in result.missing
        assert 'partition' in result.missing
    def handle_noargs(self, **options):
        db = options.get('database')
        engine = settings.DATABASES.get(db, {}).get('ENGINE', '')

        # Call regular syncdb if engine is different from ours
        if engine != 'django_cassandra_engine':
            return super(Command, self).handle_noargs(**options)

        if django.VERSION < (1, 7):
            self._import_management()

        connection = connections[db]
        connection.connect()
        options = connection.settings_dict.get('OPTIONS', {})
        replication_opts = options.get('replication', {})
        keyspace = connection.settings_dict['NAME']

        self.stdout.write('Creating keyspace %s..' % keyspace)
        create_keyspace(keyspace, **replication_opts)
        for app_name, app_models \
                in connection.introspection.cql_models.iteritems():

            for model in app_models:
                self.stdout.write('Syncing %s.%s' % (app_name, model.__name__))
                sync_table(model, create_missing_keyspace=False)
Example #16
0
def test_non_quality_filtering():
    class NonEqualityFilteringModel(Model):
        __keyspace__ = 'test'
        example_id = columns.UUID(primary_key=True, default=uuid.uuid4)
        sequence_id = columns.Integer(
            primary_key=True)  # sequence_id is a clustering key
        example_type = columns.Integer(index=True)
        created_at = columns.DateTime()

    drop_table(NonEqualityFilteringModel)
    sync_table(NonEqualityFilteringModel)

    # setup table, etc.

    NonEqualityFilteringModel.create(sequence_id=1,
                                     example_type=0,
                                     created_at=datetime.now())
    NonEqualityFilteringModel.create(sequence_id=3,
                                     example_type=0,
                                     created_at=datetime.now())
    NonEqualityFilteringModel.create(sequence_id=5,
                                     example_type=1,
                                     created_at=datetime.now())

    qA = NonEqualityFilteringModel.objects(
        NonEqualityFilteringModel.sequence_id > 3).allow_filtering()
    num = qA.count()
    assert num == 1, num
Example #17
0
    def test_all_size_tiered_options(self):
        class AllSizeTieredOptionsModel(Model):
            __compaction__ = SizeTieredCompactionStrategy
            __compaction_bucket_low__ = .3
            __compaction_bucket_high__ = 2
            __compaction_min_threshold__ = 2
            __compaction_max_threshold__ = 64
            __compaction_tombstone_compaction_interval__ = 86400

            cid = columns.UUID(primary_key=True)
            name = columns.Text()

        drop_table(AllSizeTieredOptionsModel)
        sync_table(AllSizeTieredOptionsModel)

        settings = get_table_settings(AllSizeTieredOptionsModel)
        options = json.loads(settings['compaction_strategy_options'])
        expected = {
            u'min_threshold': u'2',
            u'bucket_low': u'0.3',
            u'tombstone_compaction_interval': u'86400',
            u'bucket_high': u'2',
            u'max_threshold': u'64'
        }
        self.assertDictEqual(options, expected)
Example #18
0
    def test_set_table_properties(self):

        sync_table(ModelWithTableProperties)
        expected = {
            'bloom_filter_fp_chance': 0.76328,
            'comment': 'TxfguvBdzwROQALmQBOziRMbkqVGFjqcJfVhwGR',
            'gc_grace_seconds': 2063,
            'read_repair_chance': 0.17985,
            # For some reason 'dclocal_read_repair_chance' in CQL is called
            #  just 'local_read_repair_chance' in the schema table.
            #  Source: https://issues.apache.org/jira/browse/CASSANDRA-6717
            #  TODO: due to a bug in the native driver i'm not seeing the local read repair chance show up
            # 'local_read_repair_chance': 0.50811,
        }
        if CASSANDRA_VERSION <= 20:
            expected['caching'] = CACHING_ALL
            expected['replicate_on_write'] = False

        if CASSANDRA_VERSION == 20:
            expected['populate_io_cache_on_flush'] = True
            expected['index_interval'] = 98706

        if CASSANDRA_VERSION >= 20:
            expected['default_time_to_live'] = 4756
            expected['memtable_flush_period_in_ms'] = 43681

        self.assertDictContainsSubset(
            expected,
            management.get_table_settings(ModelWithTableProperties).options)
Example #19
0
    def test_table_definition(self):
        """ Tests that creating a table with capitalized column names succeedso """
        sync_table(LowercaseKeyModel)
        sync_table(CapitalizedKeyModel)

        drop_table(LowercaseKeyModel)
        drop_table(CapitalizedKeyModel)
 def __init__(self, serializer_class=None, **options):
     self.column_family_name = options.pop('column_family_name')
     super(CassandraTimelineStorage, self).__init__(
         serializer_class, **options)
     self.model = self.get_model(self.base_model, self.column_family_name)
     from cqlengine.management import sync_table
     sync_table(self.model)
Example #21
0
    def test_set_table_properties(self):

        sync_table(ModelWithTableProperties)
        expected = {'bloom_filter_fp_chance': 0.76328,
                    'comment': 'TxfguvBdzwROQALmQBOziRMbkqVGFjqcJfVhwGR',
                    'gc_grace_seconds': 2063,
                    'read_repair_chance': 0.17985,
                     # For some reason 'dclocal_read_repair_chance' in CQL is called
                     #  just 'local_read_repair_chance' in the schema table.
                     #  Source: https://issues.apache.org/jira/browse/CASSANDRA-6717
                     #  TODO: due to a bug in the native driver i'm not seeing the local read repair chance show up
                     # 'local_read_repair_chance': 0.50811,
                    }
        if CASSANDRA_VERSION <= 20:
            expected['caching'] = CACHING_ALL
            expected['replicate_on_write'] = False

        if CASSANDRA_VERSION == 20:
            expected['populate_io_cache_on_flush'] = True
            expected['index_interval'] = 98706

        if CASSANDRA_VERSION >= 20:
            expected['default_time_to_live'] = 4756
            expected['memtable_flush_period_in_ms'] = 43681

        self.assertDictContainsSubset(expected, management.get_table_settings(ModelWithTableProperties).options)
Example #22
0
def cassandra_reset():
    from feedly.feeds.cassandra import CassandraFeed
    from feedly.feeds.aggregated_feed.cassandra import CassandraAggregatedFeed
    from cqlengine.management import sync_table
    aggregated_timeline = CassandraAggregatedFeed.get_timeline_storage()
    timeline = CassandraFeed.get_timeline_storage()
    sync_table(aggregated_timeline.model)
    sync_table(timeline.model)
Example #23
0
    def test_multiple_deletes_dont_fail(self):
        """

        """
        sync_table(TestModel)

        drop_table(TestModel)
        drop_table(TestModel)
Example #24
0
    def test_has_two(self):
        Foo = make_model(table_name='foo_bar')
        Bar = make_model(table_name='Bar')
        sync_table(Foo)
        sync_table(Bar)

        results = verify(Foo, Bar)
        assert not results
Example #25
0
    def test_multiple_deletes_dont_fail(self):
        """

        """
        sync_table(TestModel)

        drop_table(TestModel)
        drop_table(TestModel)
Example #26
0
 def setUpClass(cls):
     super(BaseIfNotExistsTest, cls).setUpClass()
     """
     when receiving an insert statement with 'if not exist', cassandra would
     perform a read with QUORUM level. Unittest would be failed if replica_factor
     is 3 and one node only. Therefore I have create a new keyspace with
     replica_factor:1.
     """
     sync_table(TestIfNotExistsModel)
Example #27
0
 def setUpClass(cls):
     super(BaseIfNotExistsTest, cls).setUpClass()
     """
     when receiving an insert statement with 'if not exist', cassandra would
     perform a read with QUORUM level. Unittest would be failed if replica_factor
     is 3 and one node only. Therefore I have create a new keyspace with
     replica_factor:1.
     """
     sync_table(TestIfNotExistsModel)
Example #28
0
def run():
    from cqlengine import connection

    connection.setup(['127.0.0.1'], "cqlengine")

    from cqlengine import management

    management.drop_table(Stock)
    management.sync_table(Stock)

    Stock.create(name="WPRO", prices={
        datetime.date(2014, 12, 1): 200
        , datetime.date(2014, 12, 2): 220.45
        , datetime.date(2014, 12, 3): 250.67
        , datetime.date(2014, 12, 4): 246.86
        , datetime.date(2014, 12, 5): 201
        , datetime.date(2014, 12, 6): 233
        , datetime.date(2014, 12, 7): 245
        , datetime.date(2014, 12, 8): 300
        , datetime.date(2014, 12, 9): 307
        , datetime.date(2014, 12, 10): 180
        , datetime.date(2014, 12, 11): 405
        , datetime.date(2014, 12, 12): 400
        , datetime.date(2014, 12, 13): 670
        , datetime.date(2014, 12, 14): 260
        , datetime.date(2014, 12, 15): 250
        , datetime.date(2014, 12, 16): 251
        , datetime.date(2014, 12, 17): 254
        , datetime.date(2014, 12, 18): 267
        , datetime.date(2014, 12, 19): 270
    }, events={
        datetime.date(2014, 12, 13): "Something happened over here",
        datetime.date(2014, 12, 19): "The bears are playing"
    })

    Stock.create(name="INFY", prices={
        datetime.date(2014, 8, 1): 3200
        , datetime.date(2014, 8, 2): 3220.45
        , datetime.date(2014, 8, 3): 3250.67
        , datetime.date(2014, 8, 4): 3246.86
        , datetime.date(2014, 8, 5): 3201
        , datetime.date(2014, 8, 6): 3233
        , datetime.date(2014, 8, 7): 3245
        , datetime.date(2014, 8, 8): 3300
        , datetime.date(2014, 8, 9): 3307
        , datetime.date(2014, 8, 10): 3180
        , datetime.date(2014, 8, 11): 3405
        , datetime.date(2014, 8, 12): 3400
        , datetime.date(2014, 8, 13): 3670
        , datetime.date(2014, 8, 14): 3260
        , datetime.date(2014, 8, 15): 3250
        , datetime.date(2014, 8, 16): 3251
        , datetime.date(2014, 8, 17): 3254
        , datetime.date(2014, 8, 18): 3267
        , datetime.date(2014, 8, 19): 3270
    })
Example #29
0
    def test_has_missing_cf(self):
        Foo = make_model(table_name='foo_bar')
        Bar = make_model(table_name='baz_qux')
        sync_table(Foo)

        results = verify(Foo, Bar)
        [result.report() for result in results]
        assert len(results) == 1
        bar_result = results[0]
        assert bar_result.is_missing
    def test_alter_actually_alters(self):
        tmp = copy.deepcopy(LeveledcompactionTestTable)
        drop_table(tmp)
        sync_table(tmp)
        tmp.__compaction__ = SizeTieredCompactionStrategy
        tmp.__compaction_sstable_size_in_mb__ = None
        sync_table(tmp)

        table_settings = get_table_settings(tmp)

        self.assertRegexpMatches(table_settings['compaction_strategy_class'], '.*SizeTieredCompactionStrategy$')
    def setUpClass(cls):
        super(TestDateTimeQueries, cls).setUpClass()
        sync_table(DateTimeQueryTestModel)

        cls.base_date = datetime.now() - timedelta(days=10)
        for x in range(7):
            for y in range(10):
                DateTimeQueryTestModel.create(user=x,
                                              day=(cls.base_date +
                                                   timedelta(days=y)),
                                              data=str(uuid4()))
Example #32
0
    def test_has_different_partition_key(self):
        Foo = make_model(table_name='foo_bar')
        sync_table(Foo)

        Foo2 = make_model(table_name='foo_bar', different={'uuide': columns.Ascii(primary_key=True, partition_key=True, default=uuid.uuid4)})
        results = verify(Foo2)
        assert len(results) == 1
        result = results[0]
        assert not result.extra
        assert not result.missing
        assert len(result.different) == 1
        assert 'uuide' in result.different
Example #33
0
    def test_reserved_cql_words_can_be_used_as_column_names(self):
        """
        """
        sync_table(ReservedWordModel)

        model1 = ReservedWordModel.create(token='1', insert=5)

        model2 = ReservedWordModel.filter(token='1')

        assert len(model2) == 1
        assert model1.token == model2[0].token
        assert model1.insert == model2[0].insert
    def setUpClass(cls):
        super(TestDateTimeQueries, cls).setUpClass()
        sync_table(DateTimeQueryTestModel)

        cls.base_date = datetime.now() - timedelta(days=10)
        for x in range(7):
            for y in range(10):
                DateTimeQueryTestModel.create(
                    user=x,
                    day=(cls.base_date+timedelta(days=y)),
                    data=str(uuid4())
                )
Example #35
0
    def test_alter_options(self):
        class AlterTable(Model):
            __compaction__ = LeveledCompactionStrategy
            __compaction_sstable_size_in_mb__ = 64

            user_id = columns.UUID(primary_key=True)
            name = columns.Text()

        drop_table(AlterTable)
        sync_table(AlterTable)
        AlterTable.__compaction_sstable_size_in_mb__ = 128
        sync_table(AlterTable)
Example #36
0
    def test_alter_actually_alters(self):
        tmp = copy.deepcopy(LeveledcompactionTestTable)
        drop_table(tmp)
        sync_table(tmp)
        tmp.__compaction__ = SizeTieredCompactionStrategy
        tmp.__compaction_sstable_size_in_mb__ = None
        sync_table(tmp)

        table_settings = get_table_settings(tmp)

        self.assertRegexpMatches(table_settings['compaction_strategy_class'],
                                 '.*SizeTieredCompactionStrategy$')
Example #37
0
    def test_has_extra_cf(self):
        Foo = make_model(table_name='foo_bar')
        Bar = make_model(table_name='baz_qux')
        sync_table(Foo)
        sync_table(Bar)

        results = verify(Foo)
        [result.report() for result in results]
        assert len(results) == 1
        result = results[0]
        assert result.model == u'baz_qux'
        assert result.is_extra
Example #38
0
    def test_reserved_cql_words_can_be_used_as_column_names(self):
        """
        """
        sync_table(ReservedWordModel)

        model1 = ReservedWordModel.create(token='1', insert=5)

        model2 = ReservedWordModel.filter(token='1')

        assert len(model2) == 1
        assert model1.token == model2[0].token
        assert model1.insert == model2[0].insert
Example #39
0
def test_none_filter_fails():
    class NoneFilterModel(Model):

        pk = columns.Integer(primary_key=True)
        v = columns.Integer()
    sync_table(NoneFilterModel)

    try:
        NoneFilterModel.objects(pk=None)
        raise Exception("fail")
    except CQLEngineException as e:
        pass
Example #40
0
def test_none_filter_fails():
    class NoneFilterModel(Model):
        __keyspace__ = 'test'
        pk = columns.Integer(primary_key=True)
        v = columns.Integer()

    sync_table(NoneFilterModel)

    try:
        NoneFilterModel.objects(pk=None)
        raise Exception("fail")
    except CQLEngineException as e:
        pass
    def test_alter_options(self):

        class AlterTable(Model):
            __compaction__ = LeveledCompactionStrategy
            __compaction_sstable_size_in_mb__ = 64

            user_id = columns.UUID(primary_key=True)
            name = columns.Text()

        drop_table(AlterTable)
        sync_table(AlterTable)
        AlterTable.__compaction_sstable_size_in_mb__ = 128
        sync_table(AlterTable)
Example #42
0
    def test_has_different(self):
        Foo = make_model(table_name='foo_bar')
        sync_table(Foo)

        Foo2 = make_model(table_name='foo_bar', different={'title': columns.Ascii()})
        results = verify(Foo2)
        assert len(results) == 1
        result = results[0]

        assert not result.extra
        assert not result.missing
        assert len(result.different) == 1
        assert 'title' in result.different
Example #43
0
    def test_all_leveled_options(self):
        class AllLeveledOptionsModel(Model):
            __compaction__ = LeveledCompactionStrategy
            __compaction_sstable_size_in_mb__ = 64

            cid = columns.UUID(primary_key=True)
            name = columns.Text()

        drop_table(AllLeveledOptionsModel)
        sync_table(AllLeveledOptionsModel)

        settings = get_table_settings(AllLeveledOptionsModel)
        options = json.loads(settings['compaction_strategy_options'])
        self.assertDictEqual(options, {u'sstable_size_in_mb': u'64'})
Example #44
0
def AddToCassandra_allcountsbatch_bypartition(d_iter):
        class userbase2(Model):
                from cqlengine import columns
                from cqlengine.models import Model
                from cqlengine import connection
                from cqlengine.management import sync_table
                CASSANDRA_KEYSPACE = "playground"
                uid = columns.Integer(primary_key=True)
                reviewerID = columns.Text(primary_key=True)
                reviewerName = columns.Text()
        connection.setup(['172.31.39.226'], CASSANDRA_KEYSPACE)
        sync_table(userbase2)
        for d in d_iter:
                userbase2.create(**d)
Example #45
0
def AddToCassandra_allcountsbatch_bypartition(d_iter):
        from cqlengine import columns
        from cqlengine.models import Model
        from cqlengine import connection
        from cqlengine.management import sync_table
        CASSANDRA_KEYSPACE = "playground"
        class predictions3(Model):
                user = columns.Integer(primary_key=True)
                product = columns.Integer()
                rating = columns.Float(primary_key=True, clustering_order="DESC")
        connection.setup(['172.31.39.226'], CASSANDRA_KEYSPACE)
        sync_table(predictions3)
        for d in d_iter:
                predictions3.create(**d)
Example #46
0
			def syncToCassandra(d_iter):
        			from cqlengine import columns
        			from cqlengine.models import Model
        			from cqlengine import connection
        			from cqlengine.management import sync_table
        			CASSANDRA_KEYSPACE = "playground"
        			connection.setup(['172.31.39.226'], CASSANDRA_KEYSPACE)
        			class recommendations9(Model):
               				uid = columns.Integer(primary_key=True)
               				mid = columns.Integer(primary_key=True)
               				rating = columns.Float()
        			sync_table(recommendations9)
        			for d in d_iter:
               				recommendations9.create(**d)
Example #47
0
def AddToCassandra_allcountsbatch_bypartition(d_iter):
    from cqlengine import columns
    from cqlengine.models import Model
    from cqlengine import connection
    from cqlengine.management import sync_table
    CASSANDRA_KEYSPACE = "playground"

    class reviewerProfile(Model):
        reviewerID = columns.Text(primary_key=True)
        reviews = columns.Map(columns.Text, columns.Float)

    connection.setup(['172.31.39.226'], CASSANDRA_KEYSPACE)
    sync_table(reviewerProfile)
    for d in d_iter:
        reviewerProfile.create(**d)
Example #48
0
def AddToCassandra_allcountsbatch_bypartition(d_iter):
    class userbase2(Model):
        from cqlengine import columns
        from cqlengine.models import Model
        from cqlengine import connection
        from cqlengine.management import sync_table
        CASSANDRA_KEYSPACE = "playground"
        uid = columns.Integer(primary_key=True)
        reviewerID = columns.Text(primary_key=True)
        reviewerName = columns.Text()

    connection.setup(['172.31.39.226'], CASSANDRA_KEYSPACE)
    sync_table(userbase2)
    for d in d_iter:
        userbase2.create(**d)
Example #49
0
def AddToCassandra_stocktotalsbatch_bypartition(d_iter):
    from cqlengine import columns
    from cqlengine.models import Model
    from cqlengine import connection
    from cqlengine.management import sync_table

    class stock_totals_batch(Model):
        user = columns.Text(primary_key=True)
        portfolio_total = columns.Integer()

    host = "ec2-54-215-237-86.us-west-1.compute.amazonaws.com"  #cassandra seed node, TODO: do not hard code this
    connection.setup([host], "finance_news")
    sync_table(stock_totals_batch)
    for d in d_iter:
        stock_totals_batch.create(**d)
Example #50
0
def test_paged_result_handling():
    # addresses #225
    class PagingTest(Model):
        id = columns.Integer(primary_key=True)
        val = columns.Integer()
    sync_table(PagingTest)

    PagingTest.create(id=1, val=1)
    PagingTest.create(id=2, val=2)

    session = get_session()
    with mock.patch.object(session, 'default_fetch_size', 1):
        results = PagingTest.objects()[:]

    assert len(results) == 2
Example #51
0
def AddToCassandra_allcountsbatch_bypartition(d_iter):
        from cqlengine import columns
        from cqlengine.models import Model
        from cqlengine import connection
        from cqlengine.management import sync_table
        CASSANDRA_KEYSPACE = "playground"
        class reviewerProfile(Model):
                user = columns.Integer(primary_key=True)
                product = columns.Integer(primary_key=True)
                rating = columns.Float(primary_key=True, clustering_order="DESC")
                
        connection.setup(['172.31.39.226'], CASSANDRA_KEYSPACE)
        sync_table(reviewerProfile)
        for d in d_iter:
                reviewerProfile.create(**d)
    def test_compaction_not_altered_without_changes_leveled(self):
        from cqlengine.management import update_compaction

        class LeveledCompactionChangesDetectionTest(Model):
            __keyspace__ = 'test'
            __compaction__ = LeveledCompactionStrategy
            __compaction_sstable_size_in_mb__ = 160
            __compaction_tombstone_threshold__ = 0.125
            __compaction_tombstone_compaction_interval__ = 3600

            pk = columns.Integer(primary_key=True)

        drop_table(LeveledCompactionChangesDetectionTest)
        sync_table(LeveledCompactionChangesDetectionTest)

        assert not update_compaction(LeveledCompactionChangesDetectionTest)
Example #53
0
            def syncToCassandra(d_iter):
                from cqlengine import columns
                from cqlengine.models import Model
                from cqlengine import connection
                from cqlengine.management import sync_table
                CASSANDRA_KEYSPACE = "playground"
                connection.setup(['172.31.39.226'], CASSANDRA_KEYSPACE)

                class recommendations9(Model):
                    uid = columns.Integer(primary_key=True)
                    mid = columns.Integer(primary_key=True)
                    rating = columns.Float()

                sync_table(recommendations9)
                for d in d_iter:
                    recommendations9.create(**d)
    def test_concrete_class_table_creation_cycle(self):
        """ Tests that models with inherited abstract classes can be created, and have io performed """
        from cqlengine.management import sync_table, drop_table
        sync_table(ConcreteModelWithCol)

        w1 = ConcreteModelWithCol.create(pkey=5, data=6)
        w2 = ConcreteModelWithCol.create(pkey=6, data=7)

        r1 = ConcreteModelWithCol.get(pkey=5)
        r2 = ConcreteModelWithCol.get(pkey=6)

        assert w1.pkey == r1.pkey
        assert w1.data == r1.data
        assert w2.pkey == r2.pkey
        assert w2.data == r2.data

        drop_table(ConcreteModelWithCol)
def syncToCassandra(d_iter):
    from cqlengine import columns
    from cqlengine.models import Model
    from cqlengine import connection
    from cqlengine.management import sync_table
    CASSANDRA_KEYSPACE = "playground"

    class movieprofile9(Model):
        mid = columns.Integer(primary_key=True)
        asin = columns.Text()
        title = columns.Text()
        imurl = columns.Text()

    connection.setup(['172.31.39.226'], CASSANDRA_KEYSPACE)
    sync_table(movieprofile9)
    for d in d_iter:
        movieprofile9.create(**d)
Example #56
0
def AddToCassandra_allcountsbatch_bypartition(d_iter):
    class movieCatalog2(Model):
        from cqlengine import columns
        from cqlengine.models import Model
        from cqlengine import connection
        from cqlengine.management import sync_table
        CASSANDRA_KEYSPACE = "playground"
        pid = columns.Integer(primary_key=True)
        asin = columns.Text(primary_key=True)
        brand = columns.Text()
        imUrl = columns.Text()
        price = columns.Float()
        title = columns.Text()

    connection.setup(['172.31.39.226'], CASSANDRA_KEYSPACE)
    sync_table(movieCatalog2)
    for d in d_iter:
        movieCatalog2.create(**d)
def syncToCassandra(d_iter):
    from cqlengine import columns
    from cqlengine.models import Model
    from cqlengine import connection
    from cqlengine.management import sync_table
    CASSANDRA_KEYSPACE = "playground"

    class userprofile9(Model):
        uid = columns.Integer(primary_key=True)
        reviewerid = columns.Text()
        reviewername = columns.Text()
        numofreviews = columns.Float()
        ratings = columns.Map(columns.Text, columns.Float)

    connection.setup(['172.31.39.226'], CASSANDRA_KEYSPACE)
    sync_table(userprofile9)
    for d in d_iter:
        userprofile9.create(**d)