예제 #1
0
    def apply_schema(client, new_schema, model, silent=False):
        """
        riak doesn't support schema/index updates ( http://git.io/vLOTS )

        as a workaround, we create a temporary index,
        attach it to the bucket, delete the old index/schema,
        re-create the index with new schema, assign it to bucket,
        then delete the temporary index.

        :param byte new_schema: compiled schema
        :param str bucket_name: name of schema, index and bucket.
        :return: True or False
        :rtype: bool
        """
        bucket_name = model._get_bucket_name()
        bucket_type = client.bucket_type(settings.DEFAULT_BUCKET_TYPE)
        bucket = bucket_type.bucket(bucket_name)
        n_val = bucket_type.get_property('n_val')
        # delete stale indexes
        # inuse_indexes = [b.get_properties().get('search_index') for b in
        #                  bucket_type.get_buckets()]
        # stale_indexes = [si['name'] for si in self.client.list_search_indexes()
        #                     if si['name'] not in inuse_indexes]
        # for stale_index in stale_indexes:
        #     self.client.delete_search_index(stale_index)

        suffix = 9000000000 - int(time.time())
        new_index_name = "%s_%s_%s" % (settings.DEFAULT_BUCKET_TYPE, bucket_name, suffix)
        client.create_search_schema(new_index_name, new_schema)
        client.create_search_index(new_index_name, new_index_name, n_val)
        bucket.set_property('search_index', new_index_name)
        # settings.update_index(bucket_name, new_index_name)
        if not silent:
            print("+ %s (%s)" % (model.__name__, new_index_name))
예제 #2
0
    def create_index(self, model, waiting_models):
        """
        Creates search indexes.

        Args:
            model: model to execute
            waiting_models: if riak can't return response immediately, model is taken to queue.
            After first execution session, method is executed with waiting models and controlled.
            And be ensured that all given models are executed properly.

        Returns:

        """
        bucket_name = model._get_bucket_name()
        bucket_type = client.bucket_type(settings.DEFAULT_BUCKET_TYPE)
        index_name = "%s_%s" % (settings.DEFAULT_BUCKET_TYPE, bucket_name)
        bucket = bucket_type.bucket(bucket_name)
        try:
            client.get_search_index(index_name)
            if not (bucket.get_property('search_index') == index_name):
                bucket.set_property('search_index', index_name)
                print("+ %s (%s) search index is created." %
                      (model.__name__, index_name))
        except RiakError:
            try:
                client.create_search_index(index_name, index_name, self.n_val)
                bucket.set_property('search_index', index_name)
                print("+ %s (%s) search index is created." %
                      (model.__name__, index_name))
            except RiakError:
                print(
                    "+ %s (%s) search index checking operation is taken to queue."
                    % (model.__name__, index_name))
                waiting_models.append(model)
예제 #3
0
 def __init__(self, models, threads, force):
     self.report = []
     self.models = models
     self.force = force
     self.client = client
     self.threads = int(threads)
     self.n_val = client.bucket_type(
         settings.DEFAULT_BUCKET_TYPE).get_property('n_val')
     self.base_thread = BaseThreadedCommand()
예제 #4
0
파일: yoksis.py 프로젝트: mithathan/ulakbus
    def birim_kaydet(self, birim_id):
        # self.kvdb.conn.set(birim_id, self.birim_detaylari())
        # self.logger.info("%s icin degerler: %s\n\n" % (birim_id, self.birim_detaylari()))
        from pyoko.db.connection import client
        yoksis_birim = client.bucket_type('catalog').bucket('ulakbus_yoksis_birim')

        y = yoksis_birim.get(str(birim_id))
        data = self.birim_detaylari()
        y.data = data
        # print("%s: stored.." % (birim_id))
        self.logger.info("%s icin kaydedildi: \n\n" % birim_id)
        y.store()
예제 #5
0
파일: yoksis.py 프로젝트: zetaops/ulakbus
    def birim_kaydet(self, birim_id):
        # self.kvdb.conn.set(birim_id, self.birim_detaylari())
        # self.logger.info("%s icin degerler: %s\n\n" % (birim_id, self.birim_detaylari()))
        from pyoko.db.connection import client
        yoksis_birim = client.bucket_type('catalog').bucket(
            'ulakbus_yoksis_birim')

        y = yoksis_birim.get(str(birim_id))
        data = self.birim_detaylari()
        y.data = data
        # print("%s: stored.." % (birim_id))
        self.logger.info("%s icin kaydedildi: \n\n" % birim_id)
        y.store()
예제 #6
0
    def run(self):
        from pyoko.db.connection import client
        import os

        fixture_bucket = client.bucket_type('catalog').bucket('ulakbus_settings_fixtures')
        path = self.manager.args.path

        if os.path.isdir(path):
            from glob import glob
            for fixture_file in glob(os.path.join(path, "*.json")):
                self.dump(fixture_file, fixture_bucket)
        else:
            self.dump(path, fixture_bucket)
예제 #7
0
    def run(self):
        from pyoko.db.connection import client
        import os

        fixture_bucket = client.bucket_type('catalog').bucket('ulakbus_settings_fixtures')
        path = self.manager.args.path

        if os.path.isdir(path):
            from glob import glob
            for fixture_file in glob(os.path.join(path, "*.json")):
                self.dump(fixture_file, fixture_bucket)
        else:
            self.dump(path, fixture_bucket)
예제 #8
0
    def test_count(self):
        mb = client.bucket_type('pyoko_models').bucket('student')
        Student.objects._clear()
        results = mb.search('-deleted:True', 'pyoko_models_student',
                            **{'rows': 0})
        assert Student.objects.count() == results['num_found'] == 0

        # 770 records will be saved.
        for i in range(770):
            Student(number=str(i % 3)).save()

        # wait until 770 records are saved.
        while mb.search('-deleted:True', 'pyoko_models_student', **
                        {'rows': 0})['num_found'] != 770:
            time.sleep(0.3)

        # total count
        assert Student.objects.count() == 770

        # number '2' results count
        results = mb.search('-deleted:True AND number:2',
                            'pyoko_models_student', **{'rows': 0})
        assert Student.objects.filter(
            number='2').count() == results['num_found'] == 256

        # total count
        assert Student.objects.filter(
            number='2').count() + Student.objects.filter(
                number='1').count() + Student.objects.filter(
                    number='0').count() == 770

        # set_params and count tests:
        assert Student.objects.filter(number='2').set_params(
            start=0).count() == 256
        assert Student.objects.filter(number='2').set_params(
            start=0, rows=35).count() == 35
        assert Student.objects.filter(number='2').set_params(
            start=125, rows=35).count() == 35
        assert Student.objects.filter(number='2').set_params(
            start=0, rows=0).count() == 0
        assert Student.objects.filter(number='2').set_params(
            rows=100).count() == 100
        assert Student.objects.filter(number='2').set_params(
            start=250, rows=100).count() == 6
        assert Student.objects.filter(number='2').set_params(
            start=300, rows=100).count() == 0
        self.prepare_testbed(reset=True)
예제 #9
0
    def find_models_and_delete_search_index(self, model, force, exec_models,
                                            check_only):
        """
        Finds models to execute and these models' exist search indexes are deleted.
        For other operations, necessary models are gathered to list(exec_models)

        Args:
            model: model to execute
            force(bool): True or False if True, all given models are executed.
            exec_models(list): if not force, models to execute are gathered to list.
            If there is not necessity to migrate operation model doesn't put to exec list.
            check_only: do not migrate, only report migration is needed or not if True

        Returns:

        """
        ins = model(fake_context)
        fields = self.get_schema_fields(ins._collect_index_fields())
        new_schema = self.compile_schema(fields)
        bucket_name = model._get_bucket_name()
        bucket_type = client.bucket_type(settings.DEFAULT_BUCKET_TYPE)
        bucket = bucket_type.bucket(bucket_name)
        index_name = "%s_%s" % (settings.DEFAULT_BUCKET_TYPE, bucket_name)
        if not force:
            try:
                schema = get_schema_from_solr(index_name)
                if schema == new_schema:
                    print("Schema %s is already up to date, nothing to do!" %
                          index_name)
                    return
                elif check_only and schema != new_schema:
                    print("Schema %s is not up to date, migrate this model!" %
                          index_name)
                    return
            except:
                import traceback
                traceback.print_exc()
        bucket.set_property('search_index', 'foo_index')
        try:
            client.delete_search_index(index_name)
        except RiakError as e:
            if 'notfound' != e.value:
                raise
        wait_for_schema_deletion(index_name)
        exec_models.append(model)
예제 #10
0
    def test_all(self):
        mb = client.bucket_type('pyoko_models').bucket('student')
        row_size = BaseAdapter()._cfg['row_size']
        Student.objects._clear()
        assert Student.objects.count() == 0

        for i in range(row_size + 100):
            Student(name=str(i)).save()

        while Student.objects.count() != row_size + 100:
            time.sleep(0.3)

        # Wanted result from filter method much than default row_size.
        # It should raise an exception.
        with pytest.raises(Exception):
            Student.objects.filter()

        # Results are taken from solr in ordered with 'timestamp' sort parameter.
        results = mb.search(
            '-deleted:True', 'pyoko_models_student', **{
                'sort': 'timestamp desc',
                'fl': '_yz_rk, score',
                'rows': row_size + 100
            })

        # Ordered key list is created.
        ordered_key_list = [doc['_yz_rk'] for doc in results['docs']]

        # Getting data from riak with unordered way is tested.
        students = Student.objects.all()
        assert len(students) == row_size + 100
        assert students.adapter.ordered == False

        # Getting data from riak with ordered way is tested.
        temp_key_list = []
        students = Student.objects.order_by().all()
        assert students.adapter.ordered == True
        for student in students:
            temp_key_list.append(student.key)

        assert len(temp_key_list) == row_size + 100
        assert temp_key_list == ordered_key_list
        self.prepare_testbed(reset=True)
예제 #11
0
        inline_edit = ['catalog_key', 'tr', 'en']
        # we do NOT want checkboxes on right of the ui table view
        allow_selection = False
        # set meta translate_widget to True to use translate view for ui
        translate_widget = True

    save = fields.Button("Save", cmd="save_catalog", flow="start")
    cancel = fields.Button("Cancel", cmd="cancel", flow="start")

    class CatalogDatas(ListNode):
        catalog_key = fields.String()
        tr = fields.String("Türkçe")
        en = fields.String("English")


fixture_bucket = client.bucket_type('catalog').bucket(
    'ulakbus_settings_fixtures')


class CatalogDataView(CrudView):
    """
    Workflow class of catalog add/edit screens
    """
    def list_catalogs(self):
        """
        Lists existing catalogs respect to ui view template format
        """
        _form = CatalogSelectForm(current=self.current)
        _form.set_choices_of('catalog',
                             [(i, i) for i in fixture_bucket.get_keys()])
        self.form_out(_form)
예제 #12
0
    def apply_schema(client, force, job_pack, check_only):
        """
        riak doesn't support schema/index updates ( http://git.io/vLOTS )

        as a workaround, we create a temporary index,
        attach it to the bucket, delete the old index/schema,
        re-create the index with new schema, assign it to bucket,
        then delete the temporary index.

        :param byte new_schema: compiled schema
        :param str bucket_name: name of schema, index and bucket.
        :return: True or False
        :rtype: bool
        """
        for new_schema, model in job_pack:
            try:
                bucket_name = model._get_bucket_name()
                bucket_type = client.bucket_type(settings.DEFAULT_BUCKET_TYPE)
                bucket = bucket_type.bucket(bucket_name)
                n_val = bucket_type.get_property('n_val')
                index_name = "%s_%s" % (settings.DEFAULT_BUCKET_TYPE, bucket_name)
                if not force:
                    try:
                        schema = get_schema_from_solr(index_name)
                        if schema == new_schema:
                            print("Schema %s is already up to date, nothing to do!" % index_name)
                            continue
                        elif check_only and schema != new_schema:
                            print("Schema %s is not up to date, migrate this model!" % index_name)
                            continue
                    except:
                        import traceback
                        traceback.print_exc()
                bucket.set_property('search_index', 'foo_index')
                try:
                    client.delete_search_index(index_name)
                except RiakError as e:
                    if 'notfound' != e.value:
                        raise
                wait_for_schema_deletion(index_name)
                client.create_search_schema(index_name, new_schema)
                client.create_search_index(index_name, index_name, n_val)
                bucket.set_property('search_index', index_name)
                print("+ %s (%s)" % (model.__name__, index_name))
                stream = bucket.stream_keys()
                i = 0
                unsaved_keys = []
                for key_list in stream:
                    for key in key_list:
                        i += 1
                        # time.sleep(0.4)
                        try:
                            obj = bucket.get(key)
                            if obj.data:
                                obj.store()
                        except ConflictError:
                            unsaved_keys.append(key)
                            print("Error on save. Record in conflict: %s > %s" % (bucket_name, key))
                        except:
                            unsaved_keys.append(key)
                            print("Error on save! %s > %s" % (bucket_name, key))
                            import traceback
                            traceback.print_exc()
                stream.close()
                print("Re-indexed %s records of %s" % (i, bucket_name))
                if unsaved_keys:
                    print("\nThese keys cannot be updated:\n\n", unsaved_keys)

            except:
                print("n_val: %s" % n_val)
                print("bucket_name: %s" % bucket_name)
                print("bucket_type: %s" % bucket_type)
                raise
예제 #13
0
    def apply_schema(client, force, job_pack):
        """
        riak doesn't support schema/index updates ( http://git.io/vLOTS )

        as a workaround, we create a temporary index,
        attach it to the bucket, delete the old index/schema,
        re-create the index with new schema, assign it to bucket,
        then delete the temporary index.

        :param byte new_schema: compiled schema
        :param str bucket_name: name of schema, index and bucket.
        :return: True or False
        :rtype: bool
        """
        for new_schema, model in job_pack:
            try:
                bucket_name = model._get_bucket_name()
                bucket_type = client.bucket_type(settings.DEFAULT_BUCKET_TYPE)
                bucket = bucket_type.bucket(bucket_name)
                n_val = bucket_type.get_property('n_val')
                index_name = "%s_%s" % (settings.DEFAULT_BUCKET_TYPE,
                                        bucket_name)
                if not force:
                    try:
                        if get_schema_from_solr(index_name) == new_schema:
                            print(
                                "Schema %s already up to date, nothing to do!"
                                % index_name)
                            continue
                    except:
                        import traceback
                        traceback.print_exc()
                bucket.set_property('search_index', 'foo_index')
                try:
                    client.delete_search_index(index_name)
                except RiakError as e:
                    if 'notfound' != e.value:
                        raise
                wait_for_schema_deletion(index_name)
                client.create_search_schema(index_name, new_schema)
                client.create_search_index(index_name, index_name, n_val)
                bucket.set_property('search_index', index_name)
                print("+ %s (%s)" % (model.__name__, index_name))
                stream = bucket.stream_keys()
                i = 0
                unsaved_keys = []
                for key_list in stream:
                    for key in key_list:
                        i += 1
                        # time.sleep(0.4)
                        try:
                            obj = bucket.get(key)
                            if obj.data:
                                obj.store()
                        except ConflictError:
                            unsaved_keys.append(key)
                            print(
                                "Error on save. Record in conflict: %s > %s" %
                                (bucket_name, key))
                        except:
                            unsaved_keys.append(key)
                            print("Error on save! %s > %s" %
                                  (bucket_name, key))
                            import traceback
                            traceback.print_exc()
                stream.close()
                print("Re-indexed %s records of %s" % (i, bucket_name))
                if unsaved_keys:
                    print("\nThese keys cannot be updated:\n\n", unsaved_keys)

            except:
                print("n_val: %s" % n_val)
                print("bucket_name: %s" % bucket_name)
                print("bucket_type: %s" % bucket_type)
                raise
예제 #14
0
 def get_from_db(self, cat):
     from pyoko.db.connection import client
     data = client.bucket_type('catalog').bucket(
         'ulakbus_settings_fixtures').get(cat).data
     return self.parse_db_data(data, cat)
 def get_from_db(self, cat):
     from pyoko.db.connection import client
     data = client.bucket_type('catalog').bucket('ulakbus_settings_fixtures').get(cat).data
     return self.parse_db_data(data, cat)