Example #1
0
File: db.py Project: hasadna/ckan
def create_indexes(context, data_dict):
    connection = context["connection"]
    indexes = datastore_helpers.get_list(data_dict.get("indexes"))
    # primary key is not a real primary key
    # it's just a unique key
    primary_key = datastore_helpers.get_list(data_dict.get("primary_key"))

    sql_index_tmpl = u'CREATE {unique} INDEX "{name}" ON "{res_id}"'
    sql_index_string_method = sql_index_tmpl + u" USING {method}({fields})"
    sql_index_string = sql_index_tmpl + u" ({fields})"
    sql_index_strings = []

    fields = _get_fields(context, data_dict)
    field_ids = _pluck("id", fields)
    json_fields = [x["id"] for x in fields if x["type"] == "nested"]

    fts_indexes = _build_fts_indexes(connection, data_dict, sql_index_string_method, fields)
    sql_index_strings = sql_index_strings + fts_indexes

    if indexes is not None:
        _drop_indexes(context, data_dict, False)
    else:
        indexes = []

    if primary_key is not None:
        _drop_indexes(context, data_dict, True)
        indexes.append(primary_key)

    for index in indexes:
        if not index:
            continue

        index_fields = datastore_helpers.get_list(index)
        for field in index_fields:
            if field not in field_ids:
                raise ValidationError({"index": [('The field "{0}" is not a valid column name.').format(index)]})
        fields_string = u", ".join(
            ['(("{0}").json::text)'.format(field) if field in json_fields else '"%s"' % field for field in index_fields]
        )
        sql_index_strings.append(
            sql_index_string.format(
                res_id=data_dict["resource_id"],
                unique="unique" if index == primary_key else "",
                name=_generate_index_name(data_dict["resource_id"], fields_string),
                fields=fields_string,
            )
        )

    sql_index_strings = map(lambda x: x.replace("%", "%%"), sql_index_strings)
    current_indexes = _get_index_names(context["connection"], data_dict["resource_id"])
    for sql_index_string in sql_index_strings:
        has_index = [c for c in current_indexes if sql_index_string.find(c) != -1]
        if not has_index:
            connection.execute(sql_index_string)
Example #2
0
def create_alias(context, data_dict):
    aliases = datastore_helpers.get_list(data_dict.get('aliases'))
    if aliases is not None:
        # delete previous aliases
        previous_aliases = _get_aliases(context, data_dict)
        for alias in previous_aliases:
            sql_alias_drop_string = u'DROP VIEW "{0}"'.format(alias)
            context['connection'].execute(sql_alias_drop_string)

        try:
            for alias in aliases:
                sql_alias_string = u'''CREATE VIEW "{alias}"
                    AS SELECT * FROM "{main}"'''.format(
                    alias=alias, main=data_dict['resource_id'])

                res_ids = _get_resources(context, alias)
                if res_ids:
                    raise ValidationError({
                        'alias':
                        [(u'The alias "{0}" already exists.').format(alias)]
                    })

                context['connection'].execute(sql_alias_string)
        except DBAPIError, e:
            if e.orig.pgcode in [
                    _PG_ERR_CODE['duplicate_table'],
                    _PG_ERR_CODE['duplicate_alias']
            ]:
                raise ValidationError(
                    {'alias': ['"{0}" already exists'.format(alias)]})
Example #3
0
    def datastore_search(self, context, data_dict, fields_types, query_dict):
        fields = data_dict.get("fields")

        if fields:
            field_ids = datastore_helpers.get_list(fields)
        else:
            field_ids = fields_types.keys()

        ts_query, rank_column = self._textsearch_query(data_dict)
        limit = data_dict.get("limit", 100)
        offset = data_dict.get("offset", 0)

        sort = self._sort(data_dict, fields_types)
        where = self._where(data_dict, fields_types)

        select_cols = [u'"{0}"'.format(field_id) for field_id in field_ids] + [
            u'count(*) over() as "_full_count" %s' % rank_column
        ]

        query_dict["distinct"] = data_dict.get("distinct", False)
        query_dict["select"] += select_cols
        query_dict["ts_query"] = ts_query
        query_dict["sort"] += sort
        query_dict["where"] += where
        query_dict["limit"] = limit
        query_dict["offset"] = offset

        return query_dict
Example #4
0
    def datastore_search(self, context, data_dict, fields_types, query_dict):
        fields = data_dict.get('fields')

        if fields:
            field_ids = datastore_helpers.get_list(fields)
        else:
            field_ids = fields_types.keys()

        ts_query, rank_column = self._textsearch_query(data_dict)
        limit = data_dict.get('limit', 100)
        offset = data_dict.get('offset', 0)

        sort = self._sort(data_dict, fields_types)
        where = self._where(data_dict, fields_types)

        select_cols = [u'"{0}"'.format(field_id) for field_id in field_ids] +\
                      [u'count(*) over() as "_full_count" %s' % rank_column]

        query_dict['distinct'] = data_dict.get('distinct', False)
        query_dict['select'] += select_cols
        query_dict['ts_query'] = ts_query
        query_dict['sort'] += sort
        query_dict['where'] += where
        query_dict['limit'] = limit
        query_dict['offset'] = offset

        return query_dict
Example #5
0
    def datastore_search(self, context, data_dict, fields_types, query_dict):
        fields = data_dict.get('fields')

        if fields:
            field_ids = datastore_helpers.get_list(fields)
        else:
            field_ids = fields_types.keys()

        ts_query, rank_column = self._textsearch_query(data_dict)
        limit = data_dict.get('limit', 100)
        offset = data_dict.get('offset', 0)

        sort = self._sort(data_dict, fields_types)
        where = self._where(data_dict, fields_types)

        select_cols = [
            datastore_helpers.identifier(field_id) for field_id in field_ids
        ]
        if rank_column:
            select_cols.append(rank_column)

        query_dict['distinct'] = data_dict.get('distinct', False)
        query_dict['select'] += select_cols
        query_dict['ts_query'] = ts_query
        query_dict['sort'] += sort
        query_dict['where'] += where
        query_dict['limit'] = limit
        query_dict['offset'] = offset

        return query_dict
Example #6
0
    def _sort(self, data_dict, fields_types):
        sort = data_dict.get('sort')
        if not sort:
            q = data_dict.get('q')
            if q:
                if isinstance(q, basestring):
                    return [self._ts_rank_alias()]
                elif isinstance(q, dict):
                    return [
                        self._ts_rank_alias(field) for field in q
                        if field not in fields_types
                    ]
            else:
                return []

        clauses = datastore_helpers.get_list(sort, False)

        clause_parsed = []

        for clause in clauses:
            field, sort = self._parse_sort_clause(clause, fields_types)
            clause_parsed.append(u'{0} {1}'.format(
                datastore_helpers.identifier(field), sort))

        return clause_parsed
Example #7
0
    def _sort(self, data_dict, fields_types):
        sort = data_dict.get('sort')
        if not sort:
            q = data_dict.get('q')
            if q:
                if isinstance(q, basestring):
                    return [self._ts_rank_alias()]
                elif isinstance(q, dict):
                    return [self._ts_rank_alias(field) for field in q
                            if field not in fields_types]
            else:
                return []

        clauses = datastore_helpers.get_list(sort, False)

        clause_parsed = []

        for clause in clauses:
            clause = clause.encode('utf-8')
            clause_parts = shlex.split(clause)
            if len(clause_parts) == 1:
                field, sort = clause_parts[0], 'asc'
            elif len(clause_parts) == 2:
                field, sort = clause_parts

            field, sort = unicode(field, 'utf-8'), unicode(sort, 'utf-8')

            clause_parsed.append(u'"{0}" {1}'.format(field, sort))

        return clause_parsed
Example #8
0
File: plugin.py Project: morty/ckan
    def _sort(self, data_dict):
        sort = data_dict.get('sort')
        if not sort:
            if data_dict.get('q'):
                return [u'rank']
            else:
                return []

        clauses = datastore_helpers.get_list(sort, False)

        clause_parsed = []

        for clause in clauses:
            clause = clause.encode('utf-8')
            clause_parts = shlex.split(clause)
            if len(clause_parts) == 1:
                field, sort = clause_parts[0], 'asc'
            elif len(clause_parts) == 2:
                field, sort = clause_parts

            field, sort = unicode(field, 'utf-8'), unicode(sort, 'utf-8')

            clause_parsed.append(u'"{0}" {1}'.format(field, sort))

        return clause_parsed
Example #9
0
    def datastore_search(self, context, data_dict, fields_types, query_dict):
        fields = data_dict.get('fields')

        if fields:
            field_ids = datastore_helpers.get_list(fields)
        else:
            field_ids = fields_types.keys()

        ts_query, rank_column = self._textsearch_query(data_dict)
        limit = data_dict.get('limit', 100)
        offset = data_dict.get('offset', 0)

        sort = self._sort(data_dict, fields_types)
        where = self._where(data_dict, fields_types)

        select_cols = [u'"{0}"'.format(field_id) for field_id in field_ids] +\
                      [u'count(*) over() as "_full_count" %s' % rank_column]

        query_dict['distinct'] = data_dict.get('distinct', False)
        query_dict['select'] += select_cols
        query_dict['ts_query'] = ts_query
        query_dict['sort'] += sort
        query_dict['where'] += where
        query_dict['limit'] = limit
        query_dict['offset'] = offset

        return query_dict
Example #10
0
    def _sort(self, data_dict):
        sort = data_dict.get('sort')
        if not sort:
            if data_dict.get('q'):
                return [u'rank']
            else:
                return []

        clauses = datastore_helpers.get_list(sort, False)

        clause_parsed = []

        for clause in clauses:
            clause = clause.encode('utf-8')
            clause_parts = shlex.split(clause)
            if len(clause_parts) == 1:
                field, sort = clause_parts[0], 'asc'
            elif len(clause_parts) == 2:
                field, sort = clause_parts

            field, sort = unicode(field, 'utf-8'), unicode(sort, 'utf-8')

            clause_parsed.append(u'"{0}" {1}'.format(field, sort))

        return clause_parsed
Example #11
0
File: db.py Project: 6779660/ckan
def create_alias(context, data_dict):
    aliases = datastore_helpers.get_list(data_dict.get('aliases'))
    if aliases is not None:
        # delete previous aliases
        previous_aliases = _get_aliases(context, data_dict)
        for alias in previous_aliases:
            sql_alias_drop_string = u'DROP VIEW "{0}"'.format(alias)
            context['connection'].execute(sql_alias_drop_string)

        try:
            for alias in aliases:
                sql_alias_string = u'''CREATE VIEW "{alias}"
                    AS SELECT * FROM "{main}"'''.format(
                    alias=alias,
                    main=data_dict['resource_id']
                )

                res_ids = _get_resources(context, alias)
                if res_ids:
                    raise ValidationError({
                        'alias': [(u'The alias "{0}" already exists.').format(
                            alias)]
                    })

                context['connection'].execute(sql_alias_string)
        except DBAPIError, e:
            if e.orig.pgcode in [_PG_ERR_CODE['duplicate_table'],
                                 _PG_ERR_CODE['duplicate_alias']]:
                raise ValidationError({
                    'alias': [u'"{0}" already exists'.format(alias)]
                })
Example #12
0
    def _sort(self, data_dict, fields_types):
        sort = data_dict.get('sort')
        if not sort:
            q = data_dict.get('q')
            if q:
                if isinstance(q, basestring):
                    return [self._ts_rank_alias()]
                elif isinstance(q, dict):
                    return [
                        self._ts_rank_alias(field) for field in q
                        if field not in fields_types
                    ]
            else:
                return []

        clauses = datastore_helpers.get_list(sort, False)

        clause_parsed = []

        for clause in clauses:
            clause = clause.encode('utf-8')
            clause_parts = shlex.split(clause)
            if len(clause_parts) == 1:
                field, sort = clause_parts[0], 'asc'
            elif len(clause_parts) == 2:
                field, sort = clause_parts

            field, sort = unicode(field, 'utf-8'), unicode(sort, 'utf-8')

            clause_parsed.append(u'"{0}" {1}'.format(field, sort))

        return clause_parsed
Example #13
0
File: db.py Project: PetoO/ckan
def validate(context, data_dict):
    all_fields = _get_fields(context, data_dict)
    column_names = _pluck('id', all_fields)
    column_names.insert(0, '_id')
    data_dict_copy = copy.deepcopy(data_dict)

    # TODO: Convert all attributes that can be a comma-separated string to
    # lists
    if 'fields' in data_dict_copy:
        fields = datastore_helpers.get_list(data_dict_copy['fields'])
        data_dict_copy['fields'] = fields
    if 'sort' in data_dict_copy:
        fields = datastore_helpers.get_list(data_dict_copy['sort'], False)
        data_dict_copy['sort'] = fields

    for plugin in p.PluginImplementations(interfaces.IDatastore):
        data_dict_copy = plugin.datastore_validate(context,
                                                   data_dict_copy,
                                                   column_names)

    # Remove default elements in data_dict
    del data_dict_copy['connection_url']
    del data_dict_copy['resource_id']
    data_dict_copy.pop('id', None)

    for key, values in data_dict_copy.iteritems():
        if not values:
            continue
        if isinstance(values, basestring):
            value = values
        elif isinstance(values, (list, tuple)):
            value = values[0]
        elif isinstance(values, dict):
            value = values.keys()[0]
        else:
            value = values

        raise ValidationError({
            key: [u'invalid value "{0}"'.format(value)]
        })

    return True
Example #14
0
File: db.py Project: yuriprym/ckan
def validate(context, data_dict):
    fields_types = _get_fields_types(context, data_dict)
    data_dict_copy = copy.deepcopy(data_dict)

    # TODO: Convert all attributes that can be a comma-separated string to
    # lists
    if 'fields' in data_dict_copy:
        fields = datastore_helpers.get_list(data_dict_copy['fields'])
        data_dict_copy['fields'] = fields
    if 'sort' in data_dict_copy:
        fields = datastore_helpers.get_list(data_dict_copy['sort'], False)
        data_dict_copy['sort'] = fields

    for plugin in p.PluginImplementations(interfaces.IDatastore):
        data_dict_copy = plugin.datastore_validate(context,
                                                   data_dict_copy,
                                                   fields_types)

    # Remove default elements in data_dict
    del data_dict_copy['connection_url']
    del data_dict_copy['resource_id']
    data_dict_copy.pop('id', None)

    for key, values in data_dict_copy.iteritems():
        if not values:
            continue
        if isinstance(values, basestring):
            value = values
        elif isinstance(values, (list, tuple)):
            value = values[0]
        elif isinstance(values, dict):
            value = values.keys()[0]
        else:
            value = values

        raise ValidationError({
            key: [u'invalid value "{0}"'.format(value)]
        })

    return True
Example #15
0
    def querystore_resolve(self, pid, records_format='objects'):
        qs = QueryStore()
        query = qs.retrieve_query(pid)

        if query is None:
            return None

        if resource_exists(query.resource_id):
            connection = get_write_engine().connect()
            rs = postgres_querystore_resolve(query)

            #column names as a list
            column_names = rs.keys()

            #to delete standard sys_period column for versioning
            #(is not necessary for the user, it has a technical meaning)
            del column_names[-1]

            search_result = refine_results(rs, column_names)
            result_dictionary = {
                'column_names': sorted(column_names),
                'result_set': search_result,
                'query': query,
                'resource_id': query.resource_id
            }
            context = {'connection': connection}
            fields_types = _get_fields_types(context['connection'],
                                             query.resource_id)
            result_dictionary['fields'] = sorted(
                _result_fields(
                    fields_types,
                    _get_field_info(context['connection'], query.resource_id),
                    get_list(result_dictionary.get('fields'))))

            # do not show sys_period column because, it has only
            # a technical meaning
            result_dictionary['fields'] = exclude_sys_period(
                result_dictionary['fields'])

            if records_format == 'objects':
                result_dictionary['result_set'] = list(
                    result_dictionary['result_set'])
            elif records_format == 'csv':
                result_dictionary['result_set'] = convert_to_csv(
                    result_dictionary['result_set'])

            return result_dictionary
        else:
            return {}
Example #16
0
File: db.py Project: hasadna/ckan
def validate(context, data_dict):
    fields_types = _get_fields_types(context, data_dict)
    data_dict_copy = copy.deepcopy(data_dict)

    # TODO: Convert all attributes that can be a comma-separated string to
    # lists
    if "fields" in data_dict_copy:
        fields = datastore_helpers.get_list(data_dict_copy["fields"])
        data_dict_copy["fields"] = fields
    if "sort" in data_dict_copy:
        fields = datastore_helpers.get_list(data_dict_copy["sort"], False)
        data_dict_copy["sort"] = fields

    for plugin in p.PluginImplementations(interfaces.IDatastore):
        data_dict_copy = plugin.datastore_validate(context, data_dict_copy, fields_types)

    # Remove default elements in data_dict
    del data_dict_copy["connection_url"]
    del data_dict_copy["resource_id"]
    data_dict_copy.pop("id", None)

    for key, values in data_dict_copy.iteritems():
        if not values:
            continue
        if isinstance(values, basestring):
            value = values
        elif isinstance(values, (list, tuple)):
            value = values[0]
        elif isinstance(values, dict):
            value = values.keys()[0]
        else:
            value = values

        raise ValidationError({key: [u'invalid value "{0}"'.format(value)]})

    return True
Example #17
0
    def datastore_search(self, context, data_dict, fields_types, query_dict):
        fields = data_dict.get('fields')

        if fields:
            field_ids = datastore_helpers.get_list(fields)
        else:
            field_ids = fields_types.keys()

        ts_query, rank_column = self._textsearch_query(data_dict)
        limit = data_dict.get('limit', 100)
        offset = data_dict.get('offset', 0)

        sort = self._sort(data_dict, fields_types)
        where = self._where(data_dict, fields_types)

        select_cols = []
        records_format = data_dict.get(u'records_format')
        json_values = records_format in (u'objects', u'lists')
        for field_id in field_ids:
            fmt = u'to_json({0})' if records_format == u'lists' else u'{0}'
            typ = fields_types.get(field_id)
            if typ == u'nested':
                fmt = u'({0}).json'
            elif typ == u'timestamp':
                fmt = u"to_char({0}, 'YYYY-MM-DD\"T\"HH24:MI:SS')"
                if json_values:
                    fmt = u"to_json({0})".format(fmt)
            elif typ.startswith(u'_') or typ.endswith(u'[]'):
                fmt = u'array_to_json({0})'
            if records_format == u'objects':
                fmt += u' as {0}'
            select_cols.append(fmt.format(
                datastore_helpers.identifier(field_id)))
        if rank_column:
            select_cols.append(rank_column)

        query_dict['distinct'] = data_dict.get('distinct', False)
        query_dict['select'] += select_cols
        query_dict['ts_query'] = ts_query
        query_dict['sort'] += sort
        query_dict['where'] += where
        query_dict['limit'] = limit
        query_dict['offset'] = offset

        return query_dict
Example #18
0
    def _sort(self, data_dict, fields_types):
        sort = data_dict.get("sort")
        if not sort:
            q = data_dict.get("q")
            if q:
                if isinstance(q, basestring):
                    return [self._ts_rank_alias()]
                elif isinstance(q, dict):
                    return [self._ts_rank_alias(field) for field in q if field not in fields_types]
            else:
                return []

        clauses = datastore_helpers.get_list(sort, False)

        clause_parsed = []

        for clause in clauses:
            field, sort = self._parse_sort_clause(clause, fields_types)
            clause_parsed.append(u'"{0}" {1}'.format(field, sort))

        return clause_parsed
Example #19
0
def create_indexes(context, data_dict):
    connection = context['connection']
    indexes = datastore_helpers.get_list(data_dict.get('indexes'))
    # primary key is not a real primary key
    # it's just a unique key
    primary_key = datastore_helpers.get_list(data_dict.get('primary_key'))

    sql_index_tmpl = u'CREATE {unique} INDEX "{name}" ON "{res_id}"'
    sql_index_string_method = sql_index_tmpl + u' USING {method}({fields})'
    sql_index_string = sql_index_tmpl + u' ({fields})'
    sql_index_strings = []

    fields = _get_fields(context, data_dict)
    field_ids = _pluck('id', fields)
    json_fields = [x['id'] for x in fields if x['type'] == 'nested']

    fts_indexes = _build_fts_indexes(connection, data_dict,
                                     sql_index_string_method, fields)
    sql_index_strings = sql_index_strings + fts_indexes

    if indexes is not None:
        _drop_indexes(context, data_dict, False)
    else:
        indexes = []

    if primary_key is not None:
        _drop_indexes(context, data_dict, True)
        indexes.append(primary_key)

    for index in indexes:
        if not index:
            continue

        index_fields = datastore_helpers.get_list(index)
        for field in index_fields:
            if field not in field_ids:
                raise ValidationError({
                    'index': [('The field "{0}" is not a valid column name.'
                               ).format(index)]
                })
        fields_string = u', '.join([
            '(("{0}").json::text)'.format(field)
            if field in json_fields else '"%s"' % field
            for field in index_fields
        ])
        sql_index_strings.append(
            sql_index_string.format(
                res_id=data_dict['resource_id'],
                unique='unique' if index == primary_key else '',
                name=_generate_index_name(data_dict['resource_id'],
                                          fields_string),
                fields=fields_string))

    sql_index_strings = map(lambda x: x.replace('%', '%%'), sql_index_strings)
    current_indexes = _get_index_names(context['connection'],
                                       data_dict['resource_id'])
    for sql_index_string in sql_index_strings:
        has_index = [
            c for c in current_indexes if sql_index_string.find(c) != -1
        ]
        if not has_index:
            connection.execute(sql_index_string)
Example #20
0
def datastore_create(context, data_dict):
    '''Adds a new table to the DataStore.

    The datastore_create action allows you to post JSON data to be
    stored against a resource. This endpoint also supports altering tables,
    aliases and indexes and bulk insertion. This endpoint can be called multiple
    times to initially insert more data, add fields, change the aliases or indexes
    as well as the primary keys.

    To create an empty datastore resource and a CKAN resource at the same time,
    provide ``resource`` with a valid ``package_id`` and omit the ``resource_id``.

    If you want to create a datastore resource from the content of a file,
    provide ``resource`` with a valid ``url``.

    See :ref:`fields` and :ref:`records` for details on how to lay out records.

    :param resource_id: resource id that the data is going to be stored against.
    :type resource_id: string
    :param force: set to True to edit a read-only resource
    :type force: bool (optional, default: False)
    :param resource: resource dictionary that is passed to
        :meth:`~ckan.logic.action.create.resource_create`.
        Use instead of ``resource_id`` (optional)
    :type resource: dictionary
    :param aliases: names for read only aliases of the resource. (optional)
    :type aliases: list or comma separated string
    :param fields: fields/columns and their extra metadata. (optional)
    :type fields: list of dictionaries
    :param records: the data, eg: [{"dob": "2005", "some_stuff": ["a", "b"]}]  (optional)
    :type records: list of dictionaries
    :param primary_key: fields that represent a unique key (optional)
    :type primary_key: list or comma separated string
    :param indexes: indexes on table (optional)
    :type indexes: list or comma separated string

    Please note that setting the ``aliases``, ``indexes`` or ``primary_key`` replaces the exising
    aliases or constraints. Setting ``records`` appends the provided records to the resource.

    **Results:**

    :returns: The newly created data object.
    :rtype: dictionary

    See :ref:`fields` and :ref:`records` for details on how to lay out records.

    '''
    schema = context.get('schema', dsschema.datastore_create_schema())
    records = data_dict.pop('records', None)
    resource = data_dict.pop('resource', None)
    data_dict, errors = _validate(data_dict, schema, context)
    resource_dict = None
    if records:
        data_dict['records'] = records
    if resource:
        data_dict['resource'] = resource
    if errors:
        raise p.toolkit.ValidationError(errors)

    p.toolkit.check_access('datastore_create', context, data_dict)

    if 'resource' in data_dict and 'resource_id' in data_dict:
        raise p.toolkit.ValidationError(
            {'resource': ['resource cannot be used with resource_id']})

    if not 'resource' in data_dict and not 'resource_id' in data_dict:
        raise p.toolkit.ValidationError(
            {'resource_id': ['resource_id or resource required']})

    if 'resource' in data_dict:
        has_url = 'url' in data_dict['resource']
        # A datastore only resource does not have a url in the db
        data_dict['resource'].setdefault('url', '_datastore_only_resource')
        resource_dict = p.toolkit.get_action('resource_create')(
            context, data_dict['resource'])
        data_dict['resource_id'] = resource_dict['id']

        # create resource from file
        if has_url:
            if not p.plugin_loaded('datapusher'):
                raise p.toolkit.ValidationError(
                    {'resource': ['The datapusher has to be enabled.']})
            p.toolkit.get_action('datapusher_submit')(
                context, {
                    'resource_id': resource_dict['id'],
                    'set_url_type': True
                })
            # since we'll overwrite the datastore resource anyway, we
            # don't need to create it here
            return

        # create empty resource
        else:
            # no need to set the full url because it will be set in before_show
            resource_dict['url_type'] = 'datastore'
            p.toolkit.get_action('resource_update')(context, resource_dict)
    else:
        if not data_dict.pop('force', False):
            resource_id = data_dict['resource_id']
            _check_read_only(context, resource_id)

    data_dict['connection_url'] = config['ckan.datastore.write_url']

    # validate aliases
    aliases = datastore_helpers.get_list(data_dict.get('aliases', []))
    for alias in aliases:
        if not db._is_valid_table_name(alias):
            raise p.toolkit.ValidationError(
                {'alias': [u'"{0}" is not a valid alias name'.format(alias)]})

    # create a private datastore resource, if necessary
    model = _get_or_bust(context, 'model')
    resource = model.Resource.get(data_dict['resource_id'])
    legacy_mode = 'ckan.datastore.read_url' not in config
    if not legacy_mode and resource.package.private:
        data_dict['private'] = True

    try:
        result = db.create(context, data_dict)
    except db.InvalidDataError as err:
        raise p.toolkit.ValidationError(unicode(err))

    # Set the datastore_active flag on the resource if necessary
    if resource.extras.get('datastore_active') is not True:
        log.debug('Setting datastore_active=True on resource {0}'.format(
            resource.id))
        # issue #3245: race condition
        update_dict = {'datastore_active': True}

        # get extras(for entity update) and package_id(for search index update)
        res_query = model.Session.query(
            model.resource_table.c.extras,
            model.resource_table.c.package_id).filter(
                model.Resource.id == data_dict['resource_id'])
        extras, package_id = res_query.one()

        # update extras in database for record and its revision
        extras.update(update_dict)
        res_query.update({'extras': extras}, synchronize_session=False)

        model.Session.query(model.resource_revision_table).filter(
            model.ResourceRevision.id == data_dict['resource_id'],
            model.ResourceRevision.current is True).update(
                {'extras': extras}, synchronize_session=False)

        model.Session.commit()

        # get package with  updated resource from solr
        # find changed resource, patch it and reindex package
        psi = search.PackageSearchIndex()
        solr_query = search.PackageSearchQuery()
        q = {
            'q': 'id:"{0}"'.format(package_id),
            'fl': 'data_dict',
            'wt': 'json',
            'fq': 'site_id:"%s"' % config.get('ckan.site_id'),
            'rows': 1
        }
        for record in solr_query.run(q)['results']:
            solr_data_dict = json.loads(record['data_dict'])
            for resource in solr_data_dict['resources']:
                if resource['id'] == data_dict['resource_id']:
                    resource.update(update_dict)
                    psi.index_package(solr_data_dict)
                    break

    result.pop('id', None)
    result.pop('private', None)
    result.pop('connection_url')
    return result
Example #21
0
def datastore_create(context, data_dict):
    '''Adds a new table to the DataStore.

    The datastore_create action allows you to post JSON data to be
    stored against a resource. This endpoint also supports altering tables,
    aliases and indexes and bulk insertion. This endpoint can be called
    multiple times to initially insert more data, add fields, change the
    aliases or indexes as well as the primary keys.

    To create an empty datastore resource and a CKAN resource at the same time,
    provide ``resource`` with a valid ``package_id`` and omit the
    ``resource_id``.

    If you want to create a datastore resource from the content of a file,
    provide ``resource`` with a valid ``url``.

    See :ref:`fields` and :ref:`records` for details on how to lay out records.

    :param resource_id: resource id that the data is going to be stored
                        against.
    :type resource_id: string
    :param force: set to True to edit a read-only resource
    :type force: bool (optional, default: False)
    :param resource: resource dictionary that is passed to
        :meth:`~ckan.logic.action.create.resource_create`.
        Use instead of ``resource_id`` (optional)
    :type resource: dictionary
    :param aliases: names for read only aliases of the resource. (optional)
    :type aliases: list or comma separated string
    :param fields: fields/columns and their extra metadata. (optional)
    :type fields: list of dictionaries
    :param records: the data, eg: [{"dob": "2005", "some_stuff": ["a", "b"]}]
                    (optional)
    :type records: list of dictionaries
    :param primary_key: fields that represent a unique key (optional)
    :type primary_key: list or comma separated string
    :param indexes: indexes on table (optional)
    :type indexes: list or comma separated string
    :param triggers: trigger functions to apply to this table on update/insert.
        functions may be created with
        :meth:`~ckanext.datastore.logic.action.datastore_function_create`.
        eg: [
        {"function": "trigger_clean_reference"},
        {"function": "trigger_check_codes"}]
    :type triggers: list of dictionaries
    :param calculate_record_count: updates the stored count of records, used to
        optimize datastore_search in combination with the
        `total_estimation_threshold` parameter. If doing a series of requests
        to change a resource, you only need to set this to True on the last
        request.
    :type calculate_record_count: bool (optional, default: False)

    Please note that setting the ``aliases``, ``indexes`` or ``primary_key``
    replaces the existing aliases or constraints. Setting ``records`` appends
    the provided records to the resource.

    **Results:**

    :returns: The newly created data object, excluding ``records`` passed.
    :rtype: dictionary

    See :ref:`fields` and :ref:`records` for details on how to lay out records.

    '''
    backend = DatastoreBackend.get_active_backend()
    schema = context.get('schema', dsschema.datastore_create_schema())
    records = data_dict.pop('records', None)
    resource = data_dict.pop('resource', None)
    data_dict, errors = _validate(data_dict, schema, context)
    resource_dict = None
    if records:
        data_dict['records'] = records
    if resource:
        data_dict['resource'] = resource
    if errors:
        raise p.toolkit.ValidationError(errors)

    p.toolkit.check_access('datastore_create', context, data_dict)

    if 'resource' in data_dict and 'resource_id' in data_dict:
        raise p.toolkit.ValidationError(
            {'resource': ['resource cannot be used with resource_id']})

    if 'resource' not in data_dict and 'resource_id' not in data_dict:
        raise p.toolkit.ValidationError(
            {'resource_id': ['resource_id or resource required']})

    if 'resource' in data_dict:
        has_url = 'url' in data_dict['resource']
        # A datastore only resource does not have a url in the db
        data_dict['resource'].setdefault('url', '_datastore_only_resource')
        resource_dict = p.toolkit.get_action('resource_create')(
            context, data_dict['resource'])
        data_dict['resource_id'] = resource_dict['id']

        # create resource from file
        if has_url:
            if not p.plugin_loaded('datapusher'):
                raise p.toolkit.ValidationError(
                    {'resource': ['The datapusher has to be enabled.']})
            p.toolkit.get_action('datapusher_submit')(
                context, {
                    'resource_id': resource_dict['id'],
                    'set_url_type': True
                })
            # since we'll overwrite the datastore resource anyway, we
            # don't need to create it here
            return

        # create empty resource
        else:
            # no need to set the full url because it will be set in before_show
            resource_dict['url_type'] = 'datastore'
            p.toolkit.get_action('resource_update')(context, resource_dict)
    else:
        if not data_dict.pop('force', False):
            resource_id = data_dict['resource_id']
            _check_read_only(context, resource_id)

    # validate aliases
    aliases = datastore_helpers.get_list(data_dict.get('aliases', []))
    for alias in aliases:
        if not datastore_helpers.is_valid_table_name(alias):
            raise p.toolkit.ValidationError(
                {'alias': [u'"{0}" is not a valid alias name'.format(alias)]})

    try:
        result = backend.create(context, data_dict)
    except InvalidDataError as err:
        raise p.toolkit.ValidationError(text_type(err))

    if data_dict.get('calculate_record_count', False):
        backend.calculate_record_count(data_dict['resource_id'])

    # Set the datastore_active flag on the resource if necessary
    model = _get_or_bust(context, 'model')
    resobj = model.Resource.get(data_dict['resource_id'])
    if resobj.extras.get('datastore_active') is not True:
        log.debug('Setting datastore_active=True on resource {0}'.format(
            resobj.id))
        set_datastore_active_flag(model, data_dict, True)

    result.pop('id', None)
    result.pop('connection_url', None)
    result.pop('records', None)
    return result
Example #22
0
def datastore_create(context, data_dict):
    '''Adds a new table to the DataStore.

    The datastore_create action allows you to post JSON data to be
    stored against a resource. This endpoint also supports altering tables,
    aliases and indexes and bulk insertion. This endpoint can be called multiple
    times to initially insert more data, add fields, change the aliases or indexes
    as well as the primary keys.

    To create an empty datastore resource and a CKAN resource at the same time,
    provide ``resource`` with a valid ``package_id`` and omit the ``resource_id``.

    If you want to create a datastore resource from the content of a file,
    provide ``resource`` with a valid ``url``.

    See :ref:`fields` and :ref:`records` for details on how to lay out records.

    :param resource_id: resource id that the data is going to be stored against.
    :type resource_id: string
    :param force: set to True to edit a read-only resource
    :type force: bool (optional, default: False)
    :param resource: resource dictionary that is passed to
        :meth:`~ckan.logic.action.create.resource_create`.
        Use instead of ``resource_id`` (optional)
    :type resource: dictionary
    :param aliases: names for read only aliases of the resource. (optional)
    :type aliases: list or comma separated string
    :param fields: fields/columns and their extra metadata. (optional)
    :type fields: list of dictionaries
    :param records: the data, eg: [{"dob": "2005", "some_stuff": ["a", "b"]}]  (optional)
    :type records: list of dictionaries
    :param primary_key: fields that represent a unique key (optional)
    :type primary_key: list or comma separated string
    :param indexes: indexes on table (optional)
    :type indexes: list or comma separated string

    Please note that setting the ``aliases``, ``indexes`` or ``primary_key`` replaces the exising
    aliases or constraints. Setting ``records`` appends the provided records to the resource.

    **Results:**

    :returns: The newly created data object.
    :rtype: dictionary

    See :ref:`fields` and :ref:`records` for details on how to lay out records.

    '''
    schema = context.get('schema', dsschema.datastore_create_schema())
    records = data_dict.pop('records', None)
    resource = data_dict.pop('resource', None)
    data_dict, errors = _validate(data_dict, schema, context)
    if records:
        data_dict['records'] = records
    if resource:
        data_dict['resource'] = resource
    if errors:
        raise p.toolkit.ValidationError(errors)

    p.toolkit.check_access('datastore_create', context, data_dict)

    if 'resource' in data_dict and 'resource_id' in data_dict:
        raise p.toolkit.ValidationError({
            'resource': ['resource cannot be used with resource_id']
        })

    if not 'resource' in data_dict and not 'resource_id' in data_dict:
        raise p.toolkit.ValidationError({
            'resource_id': ['resource_id or resource required']
        })

    if 'resource' in data_dict:
        has_url = 'url' in data_dict['resource']
        # A datastore only resource does not have a url in the db
        data_dict['resource'].setdefault('url', '_datastore_only_resource')
        res = p.toolkit.get_action('resource_create')(context,
                                                      data_dict['resource'])
        data_dict['resource_id'] = res['id']

        # create resource from file
        if has_url:
            if not p.plugin_loaded('datapusher'):
                raise p.toolkit.ValidationError({'resource': [
                    'The datapusher has to be enabled.']})
            p.toolkit.get_action('datapusher_submit')(context, {
                'resource_id': res['id'],
                'set_url_type': True
            })
            # since we'll overwrite the datastore resource anyway, we
            # don't need to create it here
            return

        # create empty resource
        else:
            # no need to set the full url because it will be set in before_show
            res['url_type'] = 'datastore'
            p.toolkit.get_action('resource_update')(context, res)
    else:
        if not data_dict.pop('force', False):
            resource_id = data_dict['resource_id']
            _check_read_only(context, resource_id)

    data_dict['connection_url'] = pylons.config['ckan.datastore.write_url']

    # validate aliases
    aliases = datastore_helpers.get_list(data_dict.get('aliases', []))
    for alias in aliases:
        if not db._is_valid_table_name(alias):
            raise p.toolkit.ValidationError({
                'alias': [u'"{0}" is not a valid alias name'.format(alias)]
            })

    # create a private datastore resource, if necessary
    model = _get_or_bust(context, 'model')
    resource = model.Resource.get(data_dict['resource_id'])
    legacy_mode = 'ckan.datastore.read_url' not in pylons.config
    if not legacy_mode and resource.resource_group.package.private:
        data_dict['private'] = True

    result = db.create(context, data_dict)
    result.pop('id', None)
    result.pop('private', None)
    result.pop('connection_url')
    return result
Example #23
0
File: db.py Project: 6779660/ckan
def create_indexes(context, data_dict):
    connection = context['connection']
    indexes = datastore_helpers.get_list(data_dict.get('indexes'))
    # primary key is not a real primary key
    # it's just a unique key
    primary_key = datastore_helpers.get_list(data_dict.get('primary_key'))

    sql_index_tmpl = u'CREATE {unique} INDEX "{name}" ON "{res_id}"'
    sql_index_string_method = sql_index_tmpl + u' USING {method}({fields})'
    sql_index_string = sql_index_tmpl + u' ({fields})'
    sql_index_strings = []

    fields = _get_fields(context, data_dict)
    field_ids = _pluck('id', fields)
    json_fields = [x['id'] for x in fields if x['type'] == 'nested']

    fts_indexes = _build_fts_indexes(connection,
                                     data_dict,
                                     sql_index_string_method,
                                     fields)
    sql_index_strings = sql_index_strings + fts_indexes

    if indexes is not None:
        _drop_indexes(context, data_dict, False)
    else:
        indexes = []

    if primary_key is not None:
        _drop_indexes(context, data_dict, True)
        indexes.append(primary_key)

    for index in indexes:
        if not index:
            continue

        index_fields = datastore_helpers.get_list(index)
        for field in index_fields:
            if field not in field_ids:
                raise ValidationError({
                    'index': [
                        u'The field "{0}" is not a valid column name.'.format(
                            index)]
                })
        fields_string = u', '.join(
            ['(("{0}").json::text)'.format(field)
                if field in json_fields else
                '"%s"' % field
                for field in index_fields])
        sql_index_strings.append(sql_index_string.format(
            res_id=data_dict['resource_id'],
            unique='unique' if index == primary_key else '',
            name=_generate_index_name(data_dict['resource_id'], fields_string),
            fields=fields_string))

    sql_index_strings = map(lambda x: x.replace('%', '%%'), sql_index_strings)
    current_indexes = _get_index_names(context['connection'],
                                       data_dict['resource_id'])
    for sql_index_string in sql_index_strings:
        has_index = [c for c in current_indexes
                     if sql_index_string.find(c) != -1]
        if not has_index:
            connection.execute(sql_index_string)
Example #24
0
def create_indexes(context, data_dict):
    indexes = datastore_helpers.get_list(data_dict.get('indexes'))
    # primary key is not a real primary key
    # it's just a unique key
    primary_key = datastore_helpers.get_list(data_dict.get('primary_key'))

    # index and primary key could be [],
    # which means that indexes should be deleted
    if indexes is None and primary_key is None:
        return

    sql_index_tmpl = u'CREATE {unique} INDEX {name} ON "{res_id}"'
    sql_index_string_method = sql_index_tmpl + u' USING {method}({fields})'
    sql_index_string = sql_index_tmpl + u' ({fields})'
    sql_index_strings = []

    fields = _get_fields(context, data_dict)
    field_ids = _pluck('id', fields)
    json_fields = [x['id'] for x in fields if x['type'] == 'nested']

    def generate_index_name():
        # pg 9.0+ do not require an index name
        if _pg_version_is_at_least(context['connection'], '9.0'):
            return ''
        else:
            src = string.ascii_letters + string.digits
            random_string = ''.join([random.choice(src) for n in xrange(10)])
            return 'idx_' + random_string

    if indexes is not None:
        _drop_indexes(context, data_dict, False)

        # create index for faster full text search (indexes: gin or gist)
        sql_index_strings.append(sql_index_string_method.format(
            res_id=data_dict['resource_id'],
            unique='',
            name=generate_index_name(),
            method='gist', fields='_full_text'))
    else:
        indexes = []

    if primary_key is not None:
        _drop_indexes(context, data_dict, True)
        indexes.append(primary_key)

    for index in indexes:
        if not index:
            continue

        index_fields = datastore_helpers.get_list(index)
        for field in index_fields:
            if field not in field_ids:
                raise ValidationError({
                    'index': [
                        ('The field "{0}" is not a valid column name.').format(
                            index)]
                })
        fields_string = u', '.join(
            ['(("{0}").json::text)'.format(field)
                if field in json_fields else
                '"%s"' % field
                for field in index_fields])
        sql_index_strings.append(sql_index_string.format(
            res_id=data_dict['resource_id'],
            unique='unique' if index == primary_key else '',
            name=generate_index_name(),
            fields=fields_string))

    sql_index_strings = map(lambda x: x.replace('%', '%%'), sql_index_strings)
    map(context['connection'].execute, sql_index_strings)
Example #25
0
def datastore_create(context, data_dict):
    '''Adds a new table to the DataStore.

    The datastore_create action allows you to post JSON data to be
    stored against a resource. This endpoint also supports altering tables,
    aliases and indexes and bulk insertion. This endpoint can be called multiple
    times to initially insert more data, add fields, change the aliases or indexes
    as well as the primary keys.

    To create an empty datastore resource and a CKAN resource at the same time,
    provide ``resource`` with a valid ``package_id`` and omit the ``resource_id``.

    If you want to create a datastore resource from the content of a file,
    provide ``resource`` with a valid ``url``.

    See :ref:`fields` and :ref:`records` for details on how to lay out records.

    :param resource_id: resource id that the data is going to be stored against.
    :type resource_id: string
    :param force: set to True to edit a read-only resource
    :type force: bool (optional, default: False)
    :param resource: resource dictionary that is passed to
        :meth:`~ckan.logic.action.create.resource_create`.
        Use instead of ``resource_id`` (optional)
    :type resource: dictionary
    :param aliases: names for read only aliases of the resource. (optional)
    :type aliases: list or comma separated string
    :param fields: fields/columns and their extra metadata. (optional)
    :type fields: list of dictionaries
    :param records: the data, eg: [{"dob": "2005", "some_stuff": ["a", "b"]}]  (optional)
    :type records: list of dictionaries
    :param primary_key: fields that represent a unique key (optional)
    :type primary_key: list or comma separated string
    :param indexes: indexes on table (optional)
    :type indexes: list or comma separated string

    Please note that setting the ``aliases``, ``indexes`` or ``primary_key`` replaces the exising
    aliases or constraints. Setting ``records`` appends the provided records to the resource.

    **Results:**

    :returns: The newly created data object.
    :rtype: dictionary

    See :ref:`fields` and :ref:`records` for details on how to lay out records.

    '''
    schema = context.get('schema', dsschema.datastore_create_schema())
    records = data_dict.pop('records', None)
    resource = data_dict.pop('resource', None)
    data_dict, errors = _validate(data_dict, schema, context)
    resource_dict = None
    if records:
        data_dict['records'] = records
    if resource:
        data_dict['resource'] = resource
    if errors:
        raise p.toolkit.ValidationError(errors)

    p.toolkit.check_access('datastore_create', context, data_dict)

    if 'resource' in data_dict and 'resource_id' in data_dict:
        raise p.toolkit.ValidationError({
            'resource': ['resource cannot be used with resource_id']
        })

    if not 'resource' in data_dict and not 'resource_id' in data_dict:
        raise p.toolkit.ValidationError({
            'resource_id': ['resource_id or resource required']
        })

    if 'resource' in data_dict:
        has_url = 'url' in data_dict['resource']
        # A datastore only resource does not have a url in the db
        data_dict['resource'].setdefault('url', '_datastore_only_resource')
        resource_dict = p.toolkit.get_action('resource_create')(
            context, data_dict['resource'])
        data_dict['resource_id'] = resource_dict['id']

        # create resource from file
        if has_url:
            if not p.plugin_loaded('datapusher'):
                raise p.toolkit.ValidationError({'resource': [
                    'The datapusher has to be enabled.']})
            p.toolkit.get_action('datapusher_submit')(context, {
                'resource_id': resource_dict['id'],
                'set_url_type': True
            })
            # since we'll overwrite the datastore resource anyway, we
            # don't need to create it here
            return

        # create empty resource
        else:
            # no need to set the full url because it will be set in before_show
            resource_dict['url_type'] = 'datastore'
            p.toolkit.get_action('resource_update')(context, resource_dict)
    else:
        if not data_dict.pop('force', False):
            resource_id = data_dict['resource_id']
            _check_read_only(context, resource_id)

    data_dict['connection_url'] = config['ckan.datastore.write_url']

    # validate aliases
    aliases = datastore_helpers.get_list(data_dict.get('aliases', []))
    for alias in aliases:
        if not db._is_valid_table_name(alias):
            raise p.toolkit.ValidationError({
                'alias': [u'"{0}" is not a valid alias name'.format(alias)]
            })

    # create a private datastore resource, if necessary
    model = _get_or_bust(context, 'model')
    resource = model.Resource.get(data_dict['resource_id'])
    legacy_mode = 'ckan.datastore.read_url' not in config
    if not legacy_mode and resource.package.private:
        data_dict['private'] = True

    try:
        result = db.create(context, data_dict)
    except db.InvalidDataError as err:
        raise p.toolkit.ValidationError(unicode(err))

    # Set the datastore_active flag on the resource if necessary
    if resource.extras.get('datastore_active') is not True:
        log.debug(
            'Setting datastore_active=True on resource {0}'.format(resource.id)
        )
        # issue #3245: race condition
        update_dict = {'datastore_active': True}

        # get extras(for entity update) and package_id(for search index update)
        res_query = model.Session.query(
            model.resource_table.c.extras,
            model.resource_table.c.package_id
        ).filter(
            model.Resource.id == data_dict['resource_id']
        )
        extras, package_id = res_query.one()

        # update extras in database for record and its revision
        extras.update(update_dict)
        res_query.update({'extras': extras}, synchronize_session=False)

        model.Session.query(model.resource_revision_table).filter(
            model.ResourceRevision.id == data_dict['resource_id'],
            model.ResourceRevision.current is True
        ).update({'extras': extras}, synchronize_session=False)

        model.Session.commit()

        # get package with  updated resource from solr
        # find changed resource, patch it and reindex package
        psi = search.PackageSearchIndex()
        solr_query = search.PackageSearchQuery()
        q = {
            'q': 'id:"{0}"'.format(package_id),
            'fl': 'data_dict',
            'wt': 'json',
            'fq': 'site_id:"%s"' % config.get('ckan.site_id'),
            'rows': 1
        }
        for record in solr_query.run(q)['results']:
            solr_data_dict = json.loads(record['data_dict'])
            for resource in solr_data_dict['resources']:
                if resource['id'] == data_dict['resource_id']:
                    resource.update(update_dict)
                    psi.index_package(solr_data_dict)
                    break

    result.pop('id', None)
    result.pop('private', None)
    result.pop('connection_url')
    return result
Example #26
0
def datastore_create(context, data_dict):
    '''Adds a new table to the DataStore.

    The datastore_create action allows you to post JSON data to be
    stored against a resource. This endpoint also supports altering tables,
    aliases and indexes and bulk insertion. This endpoint can be called
    multiple times to initially insert more data, add fields, change the
    aliases or indexes as well as the primary keys.

    To create an empty datastore resource and a CKAN resource at the same time,
    provide ``resource`` with a valid ``package_id`` and omit the
    ``resource_id``.

    If you want to create a datastore resource from the content of a file,
    provide ``resource`` with a valid ``url``.

    See :ref:`fields` and :ref:`records` for details on how to lay out records.

    :param resource_id: resource id that the data is going to be stored
                        against.
    :type resource_id: string
    :param force: set to True to edit a read-only resource
    :type force: bool (optional, default: False)
    :param resource: resource dictionary that is passed to
        :meth:`~ckan.logic.action.create.resource_create`.
        Use instead of ``resource_id`` (optional)
    :type resource: dictionary
    :param aliases: names for read only aliases of the resource. (optional)
    :type aliases: list or comma separated string
    :param fields: fields/columns and their extra metadata. (optional)
    :type fields: list of dictionaries
    :param records: the data, eg: [{"dob": "2005", "some_stuff": ["a", "b"]}]
                    (optional)
    :type records: list of dictionaries
    :param primary_key: fields that represent a unique key (optional)
    :type primary_key: list or comma separated string
    :param indexes: indexes on table (optional)
    :type indexes: list or comma separated string
    :param triggers: trigger functions to apply to this table on update/insert.
        functions may be created with
        :meth:`~ckanext.datastore.logic.action.datastore_function_create`.
        eg: [
        {"function": "trigger_clean_reference"},
        {"function": "trigger_check_codes"}]
    :type triggers: list of dictionaries
    :param calculate_record_count: updates the stored count of records, used to
        optimize datastore_search in combination with the
        `total_estimation_threshold` parameter. If doing a series of requests
        to change a resource, you only need to set this to True on the last
        request.
    :type calculate_record_count: bool (optional, default: False)

    Please note that setting the ``aliases``, ``indexes`` or ``primary_key``
    replaces the exising aliases or constraints. Setting ``records`` appends
    the provided records to the resource.

    **Results:**

    :returns: The newly created data object, excluding ``records`` passed.
    :rtype: dictionary

    See :ref:`fields` and :ref:`records` for details on how to lay out records.

    '''
    backend = DatastoreBackend.get_active_backend()
    schema = context.get('schema', dsschema.datastore_create_schema())
    records = data_dict.pop('records', None)
    resource = data_dict.pop('resource', None)
    data_dict, errors = _validate(data_dict, schema, context)
    resource_dict = None
    if records:
        data_dict['records'] = records
    if resource:
        data_dict['resource'] = resource
    if errors:
        raise p.toolkit.ValidationError(errors)

    p.toolkit.check_access('datastore_create', context, data_dict)

    if 'resource' in data_dict and 'resource_id' in data_dict:
        raise p.toolkit.ValidationError({
            'resource': ['resource cannot be used with resource_id']
        })

    if 'resource' not in data_dict and 'resource_id' not in data_dict:
        raise p.toolkit.ValidationError({
            'resource_id': ['resource_id or resource required']
        })

    if 'resource' in data_dict:
        has_url = 'url' in data_dict['resource']
        # A datastore only resource does not have a url in the db
        data_dict['resource'].setdefault('url', '_datastore_only_resource')
        resource_dict = p.toolkit.get_action('resource_create')(
            context, data_dict['resource'])
        data_dict['resource_id'] = resource_dict['id']

        # create resource from file
        if has_url:
            if not p.plugin_loaded('datapusher'):
                raise p.toolkit.ValidationError({'resource': [
                    'The datapusher has to be enabled.']})
            p.toolkit.get_action('datapusher_submit')(context, {
                'resource_id': resource_dict['id'],
                'set_url_type': True
            })
            # since we'll overwrite the datastore resource anyway, we
            # don't need to create it here
            return

        # create empty resource
        else:
            # no need to set the full url because it will be set in before_show
            resource_dict['url_type'] = 'datastore'
            p.toolkit.get_action('resource_update')(context, resource_dict)
    else:
        if not data_dict.pop('force', False):
            resource_id = data_dict['resource_id']
            _check_read_only(context, resource_id)

    # validate aliases
    aliases = datastore_helpers.get_list(data_dict.get('aliases', []))
    for alias in aliases:
        if not datastore_helpers.is_valid_table_name(alias):
            raise p.toolkit.ValidationError({
                'alias': [u'"{0}" is not a valid alias name'.format(alias)]
            })

    try:
        result = backend.create(context, data_dict)
    except InvalidDataError as err:
        raise p.toolkit.ValidationError(text_type(err))

    if data_dict.get('calculate_record_count', False):
        backend.calculate_record_count(data_dict['resource_id'])

    # Set the datastore_active flag on the resource if necessary
    model = _get_or_bust(context, 'model')
    resobj = model.Resource.get(data_dict['resource_id'])
    if resobj.extras.get('datastore_active') is not True:
        log.debug(
            'Setting datastore_active=True on resource {0}'.format(resobj.id)
        )
        set_datastore_active_flag(model, data_dict, True)

    result.pop('id', None)
    result.pop('connection_url', None)
    result.pop('records', None)
    return result
Example #27
0
def datastore_create(context, data_dict):
    '''Adds a new table to the DataStore.

    The datastore_create action allows you to post JSON data to be
    stored against a resource. This endpoint also supports altering tables,
    aliases and indexes and bulk insertion. This endpoint can be called multiple
    times to initially insert more data, add fields, change the aliases or indexes
    as well as the primary keys.

    To create an empty datastore resource and a CKAN resource at the same time,
    provide ``resource`` with a valid ``package_id`` and omit the ``resource_id``.

    If you want to create a datastore resource from the content of a file,
    provide ``resource`` with a valid ``url``.

    See :ref:`fields` and :ref:`records` for details on how to lay out records.

    :param resource_id: resource id that the data is going to be stored against.
    :type resource_id: string
    :param force: set to True to edit a read-only resource
    :type force: bool (optional, default: False)
    :param resource: resource dictionary that is passed to
        :meth:`~ckan.logic.action.create.resource_create`.
        Use instead of ``resource_id`` (optional)
    :type resource: dictionary
    :param aliases: names for read only aliases of the resource. (optional)
    :type aliases: list or comma separated string
    :param fields: fields/columns and their extra metadata. (optional)
    :type fields: list of dictionaries
    :param records: the data, eg: [{"dob": "2005", "some_stuff": ["a", "b"]}]  (optional)
    :type records: list of dictionaries
    :param primary_key: fields that represent a unique key (optional)
    :type primary_key: list or comma separated string
    :param indexes: indexes on table (optional)
    :type indexes: list or comma separated string

    Please note that setting the ``aliases``, ``indexes`` or ``primary_key`` replaces the exising
    aliases or constraints. Setting ``records`` appends the provided records to the resource.

    **Results:**

    :returns: The newly created data object.
    :rtype: dictionary

    See :ref:`fields` and :ref:`records` for details on how to lay out records.

    '''
    schema = context.get('schema', dsschema.datastore_create_schema())
    records = data_dict.pop('records', None)
    resource = data_dict.pop('resource', None)
    data_dict, errors = _validate(data_dict, schema, context)
    if records:
        data_dict['records'] = records
    if resource:
        data_dict['resource'] = resource
    if errors:
        raise p.toolkit.ValidationError(errors)

    p.toolkit.check_access('datastore_create', context, data_dict)

    if 'resource' in data_dict and 'resource_id' in data_dict:
        raise p.toolkit.ValidationError(
            {'resource': ['resource cannot be used with resource_id']})

    if not 'resource' in data_dict and not 'resource_id' in data_dict:
        raise p.toolkit.ValidationError(
            {'resource_id': ['resource_id or resource required']})

    if 'resource' in data_dict:
        has_url = 'url' in data_dict['resource']
        # A datastore only resource does not have a url in the db
        data_dict['resource'].setdefault('url', '_datastore_only_resource')
        res = p.toolkit.get_action('resource_create')(context,
                                                      data_dict['resource'])
        data_dict['resource_id'] = res['id']

        # create resource from file
        if has_url:
            if not p.plugin_loaded('datapusher'):
                raise p.toolkit.ValidationError(
                    {'resource': ['The datapusher has to be enabled.']})
            p.toolkit.get_action('datapusher_submit')(context, {
                'resource_id': res['id'],
                'set_url_type': True
            })
            # since we'll overwrite the datastore resource anyway, we
            # don't need to create it here
            return

        # create empty resource
        else:
            # no need to set the full url because it will be set in before_show
            res['url_type'] = 'datastore'
            p.toolkit.get_action('resource_update')(context, res)
    else:
        if not data_dict.pop('force', False):
            resource_id = data_dict['resource_id']
            _check_read_only(context, resource_id)

    data_dict['connection_url'] = pylons.config['ckan.datastore.write_url']

    # validate aliases
    aliases = datastore_helpers.get_list(data_dict.get('aliases', []))
    for alias in aliases:
        if not db._is_valid_table_name(alias):
            raise p.toolkit.ValidationError(
                {'alias': [u'"{0}" is not a valid alias name'.format(alias)]})

    # create a private datastore resource, if necessary
    model = _get_or_bust(context, 'model')
    resource = model.Resource.get(data_dict['resource_id'])
    legacy_mode = 'ckan.datastore.read_url' not in pylons.config
    if not legacy_mode and resource.package.private:
        data_dict['private'] = True

    try:
        result = db.create(context, data_dict)
    except db.InvalidDataError as err:
        raise p.toolkit.ValidationError(str(err))

    result.pop('id', None)
    result.pop('private', None)
    result.pop('connection_url')
    return result
Example #28
0
File: db.py Project: morty/ckan
def create_indexes(context, data_dict):
    indexes = datastore_helpers.get_list(data_dict.get('indexes'))
    # primary key is not a real primary key
    # it's just a unique key
    primary_key = datastore_helpers.get_list(data_dict.get('primary_key'))

    # index and primary key could be [],
    # which means that indexes should be deleted
    if indexes is None and primary_key is None:
        return

    sql_index_tmpl = u'CREATE {unique} INDEX {name} ON "{res_id}"'
    sql_index_string_method = sql_index_tmpl + u' USING {method}({fields})'
    sql_index_string = sql_index_tmpl + u' ({fields})'
    sql_index_strings = []

    fields = _get_fields(context, data_dict)
    field_ids = _pluck('id', fields)
    json_fields = [x['id'] for x in fields if x['type'] == 'nested']

    def generate_index_name():
        # pg 9.0+ do not require an index name
        if _pg_version_is_at_least(context['connection'], '9.0'):
            return ''
        else:
            src = string.ascii_letters + string.digits
            random_string = ''.join([random.choice(src) for n in xrange(10)])
            return 'idx_' + random_string

    if indexes is not None:
        _drop_indexes(context, data_dict, False)

        # create index for faster full text search (indexes: gin or gist)
        sql_index_strings.append(
            sql_index_string_method.format(res_id=data_dict['resource_id'],
                                           unique='',
                                           name=generate_index_name(),
                                           method='gist',
                                           fields='_full_text'))
    else:
        indexes = []

    if primary_key is not None:
        _drop_indexes(context, data_dict, True)
        indexes.append(primary_key)

    for index in indexes:
        if not index:
            continue

        index_fields = datastore_helpers.get_list(index)
        for field in index_fields:
            if field not in field_ids:
                raise ValidationError({
                    'index': [('The field "{0}" is not a valid column name.'
                               ).format(index)]
                })
        fields_string = u', '.join([
            '(("{0}").json::text)'.format(field)
            if field in json_fields else '"%s"' % field
            for field in index_fields
        ])
        sql_index_strings.append(
            sql_index_string.format(
                res_id=data_dict['resource_id'],
                unique='unique' if index == primary_key else '',
                name=generate_index_name(),
                fields=fields_string))

    sql_index_strings = map(lambda x: x.replace('%', '%%'), sql_index_strings)
    map(context['connection'].execute, sql_index_strings)