Esempio n. 1
0
    def list(self, request, *args, **kwargs):

        # get schema, table and column_names from the querystring
        schema_name = self.request.GET.get('schema')
        table_name = self.request.GET.get('table')
        column_names = self.request.GET.getlist('column')

        # get the columns which the user is allowed to access
        user_columns = get_user_columns(self.request.user, schema_name, table_name)

        if user_columns:
            # get the row query params from the request
            ordering, page, page_size, search, filters = self._get_query_params(user_columns)

            # filter by input column names by the the allowed columns
            if column_names:
                column_names = [column.name for column in user_columns if column.name in column_names]
            else:
                column_names = [column.name for column in user_columns]

            # get database adapter
            adapter = DatabaseAdapter()

            # query the database for the total number of rows
            count = adapter.count_rows(schema_name, table_name, column_names, search, filters)

            # query the paginated rowset
            results = adapter.fetch_rows(schema_name, table_name, column_names, ordering, page, page_size, search, filters)

            # return ordered dict to be send as json
            return Response(OrderedDict((
                ('count', count),
                ('results', fix_for_json(results)),
                ('next', self._get_next_url(page, page_size, count)),
                ('previous', self._get_previous_url(page))
            )))

        # if nothing worked, return 404
        raise NotFound()
Esempio n. 2
0
    def rows(self, column_names, ordering, page, page_size, search, filters):
        if self.phase == self.PHASE_COMPLETED:
            # check if the columns are actually in the jobs table
            errors = {}

            for column_name in column_names:
                if column_name not in self.column_names:
                    errors[column_name] = _('Column not found.')

            if errors:
                raise ValidationError(errors)

            # get database adapter
            adapter = DatabaseAdapter()

            try:
                # query the database for the total number of rows
                count = adapter.count_rows(self.schema_name, self.table_name,
                                           column_names, search, filters)

                # query the paginated rowset
                rows = adapter.fetch_rows(self.schema_name, self.table_name,
                                          column_names, ordering, page,
                                          page_size, search, filters)

                # flatten the list if only one column is retrieved
                if len(column_names) == 1:
                    return count, [element for row in rows for element in row]
                else:
                    return count, rows

            except ProgrammingError:
                return 0, []

        else:
            raise ValidationError({'phase': ['Job is not COMPLETED.']})
Esempio n. 3
0
    def list(self, request, *args, **kwargs):
        # get the row query params from the request
        ordering, page, page_size, search, filters = self._get_query_params(
            settings.ARCHIVE_COLUMNS)

        # get database adapter
        adapter = DatabaseAdapter()

        # get the schema_name and the table_name from the settings
        schema_name = settings.ARCHIVE_SCHEMA
        table_name = settings.ARCHIVE_TABLE

        # get collecions for this user and add them to the filters
        collections = [
            collection.name for collection in
            Collection.objects.filter_by_access_level(request.user)
        ]
        filters['collection'] = collections

        # get the name of the columns
        column_names = [column['name'] for column in settings.ARCHIVE_COLUMNS]

        # query the database for the total number of rows
        count = adapter.count_rows(schema_name, table_name, column_names,
                                   search, filters)

        # query the paginated rowset
        results = adapter.fetch_rows(schema_name, table_name, column_names,
                                     ordering, page, page_size, search,
                                     filters)

        # return ordered dict to be send as json
        return Response(
            OrderedDict((('count', count), ('results', results),
                         ('next', self._get_next_url(page, page_size, count)),
                         ('previous', self._get_previous_url(page)))))
Esempio n. 4
0
    def process(self):
        # get collections for the owner of this download job
        collections = [
            collection.name for collection in
            Collection.objects.filter_by_access_level(self.owner)
        ]

        # get database adapter
        adapter = DatabaseAdapter()

        # get the schema_name and the table_name from the settings
        schema_name = settings.ARCHIVE_SCHEMA
        table_name = settings.ARCHIVE_TABLE

        # prepare list of files for this archive job
        files = []

        if 'file_ids' in self.data:
            if isinstance(self.data, QueryDict):
                file_ids = self.data.getlist('file_ids')
            else:
                file_ids = self.data.get('file_ids')

            for file_id in file_ids:
                # validate that the file_id is a valid UUID4
                try:
                    uuid.UUID(file_id, version=4)
                except ValueError:
                    raise ValidationError({
                        'files': [
                            _('One or more of the identifiers are not valid UUIDs.'
                              )
                        ]
                    })

                # fetch the path for this file from the database
                row = adapter.fetch_row(schema_name,
                                        table_name, ['path'],
                                        filters={
                                            'id': file_id,
                                            'collection': collections
                                        })

                # append the file to the list of files only if it exists in the database and on the filesystem
                if row and os.path.isfile(
                        os.path.join(settings.ARCHIVE_BASE_PATH, row[0])):
                    files.append(row[0])
                else:
                    raise ValidationError({
                        'files':
                        [_('One or more of the files cannot be found.')]
                    })

        elif 'search' in self.data:
            # retrieve the pathes of all file matching the search criteria
            rows = adapter.fetch_rows(schema_name,
                                      table_name,
                                      page_size=0,
                                      search=self.data['search'],
                                      filters={'collection': collections})

            # get the index of the path column in the row
            path_index = next(
                (i for i, column in enumerate(settings.ARCHIVE_COLUMNS)
                 if column['name'] == 'path'))

            for row in rows:
                # append the file to the list of files only if it exists on the filesystem
                if os.path.isfile(
                        os.path.join(settings.ARCHIVE_BASE_PATH,
                                     row[path_index])):
                    files.append(row[path_index])
                else:
                    raise ValidationError({
                        'files':
                        [_('One or more of the files cannot be found.')]
                    })

        else:
            raise ValidationError({[_('No data received.')]})

        # set files and file_path for this archive job
        self.files = files

        # set clean flag
        self.is_clean = True