Пример #1
0
    def create_index(self, model, waiting_models):
        """
        Creates search indexes.

        Args:
            model: model to execute
            waiting_models: if riak can't return response immediately, model is taken to queue.
            After first execution session, method is executed with waiting models and controlled.
            And be ensured that all given models are executed properly.

        Returns:

        """
        bucket_name = model._get_bucket_name()
        bucket_type = client.bucket_type(settings.DEFAULT_BUCKET_TYPE)
        index_name = "%s_%s" % (settings.DEFAULT_BUCKET_TYPE, bucket_name)
        bucket = bucket_type.bucket(bucket_name)
        try:
            client.get_search_index(index_name)
            if not (bucket.get_property('search_index') == index_name):
                bucket.set_property('search_index', index_name)
                print("+ %s (%s) search index is created." %
                      (model.__name__, index_name))
        except RiakError:
            try:
                client.create_search_index(index_name, index_name, self.n_val)
                bucket.set_property('search_index', index_name)
                print("+ %s (%s) search index is created." %
                      (model.__name__, index_name))
            except RiakError:
                print(
                    "+ %s (%s) search index checking operation is taken to queue."
                    % (model.__name__, index_name))
                waiting_models.append(model)
Пример #2
0
    def apply_schema(client, new_schema, model, silent=False):
        """
        riak doesn't support schema/index updates ( http://git.io/vLOTS )

        as a workaround, we create a temporary index,
        attach it to the bucket, delete the old index/schema,
        re-create the index with new schema, assign it to bucket,
        then delete the temporary index.

        :param byte new_schema: compiled schema
        :param str bucket_name: name of schema, index and bucket.
        :return: True or False
        :rtype: bool
        """
        bucket_name = model._get_bucket_name()
        bucket_type = client.bucket_type(settings.DEFAULT_BUCKET_TYPE)
        bucket = bucket_type.bucket(bucket_name)
        n_val = bucket_type.get_property('n_val')
        # delete stale indexes
        # inuse_indexes = [b.get_properties().get('search_index') for b in
        #                  bucket_type.get_buckets()]
        # stale_indexes = [si['name'] for si in self.client.list_search_indexes()
        #                     if si['name'] not in inuse_indexes]
        # for stale_index in stale_indexes:
        #     self.client.delete_search_index(stale_index)

        suffix = 9000000000 - int(time.time())
        new_index_name = "%s_%s_%s" % (settings.DEFAULT_BUCKET_TYPE, bucket_name, suffix)
        client.create_search_schema(new_index_name, new_schema)
        client.create_search_index(new_index_name, new_index_name, n_val)
        bucket.set_property('search_index', new_index_name)
        # settings.update_index(bucket_name, new_index_name)
        if not silent:
            print("+ %s (%s)" % (model.__name__, new_index_name))
Пример #3
0
    def apply_schema(client, force, job_pack, check_only):
        """
        riak doesn't support schema/index updates ( http://git.io/vLOTS )

        as a workaround, we create a temporary index,
        attach it to the bucket, delete the old index/schema,
        re-create the index with new schema, assign it to bucket,
        then delete the temporary index.

        :param byte new_schema: compiled schema
        :param str bucket_name: name of schema, index and bucket.
        :return: True or False
        :rtype: bool
        """
        for new_schema, model in job_pack:
            try:
                bucket_name = model._get_bucket_name()
                bucket_type = client.bucket_type(settings.DEFAULT_BUCKET_TYPE)
                bucket = bucket_type.bucket(bucket_name)
                n_val = bucket_type.get_property('n_val')
                index_name = "%s_%s" % (settings.DEFAULT_BUCKET_TYPE, bucket_name)
                if not force:
                    try:
                        schema = get_schema_from_solr(index_name)
                        if schema == new_schema:
                            print("Schema %s is already up to date, nothing to do!" % index_name)
                            continue
                        elif check_only and schema != new_schema:
                            print("Schema %s is not up to date, migrate this model!" % index_name)
                            continue
                    except:
                        import traceback
                        traceback.print_exc()
                bucket.set_property('search_index', 'foo_index')
                try:
                    client.delete_search_index(index_name)
                except RiakError as e:
                    if 'notfound' != e.value:
                        raise
                wait_for_schema_deletion(index_name)
                client.create_search_schema(index_name, new_schema)
                client.create_search_index(index_name, index_name, n_val)
                bucket.set_property('search_index', index_name)
                print("+ %s (%s)" % (model.__name__, index_name))
                stream = bucket.stream_keys()
                i = 0
                unsaved_keys = []
                for key_list in stream:
                    for key in key_list:
                        i += 1
                        # time.sleep(0.4)
                        try:
                            obj = bucket.get(key)
                            if obj.data:
                                obj.store()
                        except ConflictError:
                            unsaved_keys.append(key)
                            print("Error on save. Record in conflict: %s > %s" % (bucket_name, key))
                        except:
                            unsaved_keys.append(key)
                            print("Error on save! %s > %s" % (bucket_name, key))
                            import traceback
                            traceback.print_exc()
                stream.close()
                print("Re-indexed %s records of %s" % (i, bucket_name))
                if unsaved_keys:
                    print("\nThese keys cannot be updated:\n\n", unsaved_keys)

            except:
                print("n_val: %s" % n_val)
                print("bucket_name: %s" % bucket_name)
                print("bucket_type: %s" % bucket_type)
                raise
Пример #4
0
    def apply_schema(client, force, job_pack):
        """
        riak doesn't support schema/index updates ( http://git.io/vLOTS )

        as a workaround, we create a temporary index,
        attach it to the bucket, delete the old index/schema,
        re-create the index with new schema, assign it to bucket,
        then delete the temporary index.

        :param byte new_schema: compiled schema
        :param str bucket_name: name of schema, index and bucket.
        :return: True or False
        :rtype: bool
        """
        for new_schema, model in job_pack:
            try:
                bucket_name = model._get_bucket_name()
                bucket_type = client.bucket_type(settings.DEFAULT_BUCKET_TYPE)
                bucket = bucket_type.bucket(bucket_name)
                n_val = bucket_type.get_property('n_val')
                index_name = "%s_%s" % (settings.DEFAULT_BUCKET_TYPE,
                                        bucket_name)
                if not force:
                    try:
                        if get_schema_from_solr(index_name) == new_schema:
                            print(
                                "Schema %s already up to date, nothing to do!"
                                % index_name)
                            continue
                    except:
                        import traceback
                        traceback.print_exc()
                bucket.set_property('search_index', 'foo_index')
                try:
                    client.delete_search_index(index_name)
                except RiakError as e:
                    if 'notfound' != e.value:
                        raise
                wait_for_schema_deletion(index_name)
                client.create_search_schema(index_name, new_schema)
                client.create_search_index(index_name, index_name, n_val)
                bucket.set_property('search_index', index_name)
                print("+ %s (%s)" % (model.__name__, index_name))
                stream = bucket.stream_keys()
                i = 0
                unsaved_keys = []
                for key_list in stream:
                    for key in key_list:
                        i += 1
                        # time.sleep(0.4)
                        try:
                            obj = bucket.get(key)
                            if obj.data:
                                obj.store()
                        except ConflictError:
                            unsaved_keys.append(key)
                            print(
                                "Error on save. Record in conflict: %s > %s" %
                                (bucket_name, key))
                        except:
                            unsaved_keys.append(key)
                            print("Error on save! %s > %s" %
                                  (bucket_name, key))
                            import traceback
                            traceback.print_exc()
                stream.close()
                print("Re-indexed %s records of %s" % (i, bucket_name))
                if unsaved_keys:
                    print("\nThese keys cannot be updated:\n\n", unsaved_keys)

            except:
                print("n_val: %s" % n_val)
                print("bucket_name: %s" % bucket_name)
                print("bucket_type: %s" % bucket_type)
                raise