def csv_iterator(self,
                     attributes,
                     batch_size=None,
                     filtering=None,
                     headers=True):
        """Iterate over records in the schema and format as CSV.

        Newlines within fields will be escaped as '\\n' to ensure that each
        line contains an entire record.

        If a requested attribute is an object or plural it will be converted
        to a JSON string.

        Args:
            attributes: list of attributes to include
            batch_size: maximum results to return per batch
            filtering: filter to apply
            headers:
                if falsey, don't include headers
                if True, headers will be the attribute path as specified
                if a dict, then the headers will be looked up from it using the
                    attribute path - if lookup fails, fallback to using the
                    attribute path

        Yields:
            a CSV row as a string
        """
        if headers:
            if headers is True:
                headers = {}
            row = []
            for attr in attributes:
                if attr in headers:
                    row.append(headers[attr])
                else:
                    row.append(attr)
            yield to_csv(row)

        kwargs = {'attributes': attributes}
        if batch_size is not None:
            kwargs['batch_size'] = batch_size
        if filtering is not None:
            kwargs['filtering'] = filtering

        for record in self.iterator(**kwargs):
            row = [dot_lookup(record, attr) for attr in attributes]
            yield to_csv(row)
Example #2
0
 def test_to_csv_with_no_delimiter(self):
     # setup
     input = ['first', '2nd,with,commas', 'lasty']
     expected = 'first,"2nd,with,commas",lasty\r\n'
     # call
     result = to_csv(input)
     # test
     self.assertEqual(result, expected)
    def csv_iterator(self, attributes, batch_size=None, filtering=None, headers=True):
        """Iterate over records in the schema and format as CSV.

        Newlines within fields will be escaped as '\\n' to ensure that each
        line contains an entire record.

        If a requested attribute is an object or plural it will be converted
        to a JSON string.

        Args:
            attributes: list of attributes to include
            batch_size: maximum results to return per batch
            filtering: filter to apply
            headers:
                if falsey, don't include headers
                if True, headers will be the attribute path as specified
                if a dict, then the headers will be looked up from it using the
                    attribute path - if lookup fails, fallback to using the
                    attribute path

        Yields:
            a CSV row as a string
        """
        if headers:
            if headers is True:
                headers = {}
            row = []
            for attr in attributes:
                if attr in headers:
                    row.append(headers[attr])
                else:
                    row.append(attr)
            yield to_csv(row)

        kwargs = {
            'attributes': attributes
        }
        if batch_size is not None:
            kwargs['batch_size'] = batch_size
        if filtering is not None:
            kwargs['filtering'] = filtering

        for record in self.iterator(**kwargs):
            row = [dot_lookup(record, attr) for attr in attributes]
            yield to_csv(row)
Example #4
0
 def test_to_csv_with_semicolon_delimiter(self):
     # setup
     input = ['first', '2nd,with,commas', 'lasty']
     expected = 'first;2nd,with,commas;lasty\r\n'
     delimiter = ';'
     # call
     result = to_csv(input, delimiter=delimiter)
     # test
     self.assertEqual(result, expected)