コード例 #1
0
    def test_big_attachments(self):
        """
        Ensure big fields (e.g. b64-encoded image data) can be imported and
        we're not hitting limits of the default CSV parser config
        """
        from PIL import Image

        im = Image.new('RGB', (1920, 1080))
        fout = io.BytesIO()

        writer = pycompat.csv_writer(fout, dialect=None)
        writer.writerows([
            [u'name', u'db_datas'],
            [u'foo', base64.b64encode(im.tobytes()).decode('ascii')]
        ])

        import_wizard = self.env['base_import.import'].create({
            'res_model': 'ir.attachment',
            'file': fout.getvalue(),
            'file_type': 'text/csv'
        })
        results = import_wizard.do(
            ['name', 'db_datas'],
            [],
            {'headers': True, 'separator': ',', 'quoting': '"'})
        self.assertFalse(results['messages'], "results should be empty on successful import")
コード例 #2
0
    def _csv_write_rows(self, rows, lineterminator=u'\r\n'):
        """
        Write FEC rows into a file
        It seems that Bercy's bureaucracy is not too happy about the
        empty new line at the End Of File.

        @param {list(list)} rows: the list of rows. Each row is a list of strings
        @param {unicode string} [optional] lineterminator: effective line terminator
            Has nothing to do with the csv writer parameter
            The last line written won't be terminated with it

        @return the value of the file
        """
        fecfile = io.BytesIO()
        writer = pycompat.csv_writer(fecfile, delimiter='|', lineterminator='')

        rows_length = len(rows)
        for i, row in enumerate(rows):
            if not i == rows_length - 1:
                row[-1] += lineterminator
            writer.writerow(row)

        fecvalue = fecfile.getvalue()
        fecfile.close()
        return fecvalue
コード例 #3
0
 def _makefile(self, rows):
     f = io.BytesIO()
     writer = pycompat.csv_writer(f, quoting=1)
     writer.writerow(['name', 'counter'])
     for i in range(rows):
         writer.writerow(['n_%d' % i, str(i)])
     return f.getvalue()
コード例 #4
0
    def test_newline_import(self):
        """
        Ensure importing keep newlines
        """
        output = io.BytesIO()
        writer = pycompat.csv_writer(output, quoting=1)

        data_row = [u"\tfoo\n\tbar", u" \"hello\" \n\n 'world' "]

        writer.writerow([u"name", u"Some Value"])
        writer.writerow(data_row)

        import_wizard = self.env['base_import.import'].create({
            'res_model':
            'base_import.tests.models.preview',
            'file':
            output.getvalue(),
            'file_type':
            'text/csv',
        })
        data, _ = import_wizard._convert_import_data(['name', 'somevalue'], {
            'quoting': '"',
            'separator': ',',
            'headers': True
        })

        self.assertItemsEqual(data, [data_row])
コード例 #5
0
    def test_limit_on_lines(self):
        """ The limit option should be a limit on the number of *lines*
        imported at at time, not the number of *records*. This is relevant
        when it comes to embedded o2m.

        A big question is whether we want to round up or down (if the limit
        brings us inside a record). Rounding up (aka finishing up the record
        we're currently parsing) seems like a better idea:

        * if the first record has so many sub-lines it hits the limit we still
          want to import it (it's probably extremely rare but it can happen)
        * if we have one line per record, we probably want to import <limit>
          records not <limit-1>, but if we stop in the middle of the "current
          record" we'd always ignore the last record (I think)
        """
        f = io.BytesIO()
        writer = pycompat.csv_writer(f, quoting=1)
        writer.writerow(['name', 'value/value'])
        for record in range(10):
            writer.writerow(['record_%d' % record, '0'])
            for row in range(1, 10):
                writer.writerow(['', str(row)])

        import_wizard = self.env['base_import.import'].create({
            'res_model':
            'base_import.tests.models.o2m',
            'file_type':
            'text/csv',
            'file_name':
            'things.csv',
            'file':
            f.getvalue(),
        })
        opts = {'quoting': '"', 'separator': ',', 'headers': True}
        preview = import_wizard.parse_preview({**opts, 'limit': 15})
        self.assertIs(preview['batch'], True)

        results = import_wizard.do(['name', 'value/value'], [], {
            **opts, 'limit': 5
        })
        self.assertFalse(results['messages'])
        self.assertEqual(
            len(results['ids']), 1,
            "should have imported the first record in full, got %s" %
            results['ids'])
        self.assertEqual(results['nextrow'], 10)

        results = import_wizard.do(['name', 'value/value'], [], {
            **opts, 'limit': 15
        })
        self.assertFalse(results['messages'])
        self.assertEqual(
            len(results['ids']), 2,
            "should have importe the first two records, got %s" %
            results['ids'])
        self.assertEqual(results['nextrow'], 20)
コード例 #6
0
ファイル: tax2csv.py プロジェクト: babarlhr/eagle-12.1
 def fiscal_pos_map_to_csv(self):
     writer = pycompat.csv_writer(
         open(
             'account.fiscal.'
             'position.tax.template-%s.csv' % self.suffix, 'wb'))
     fiscal_pos_map_iterator = self.iter_fiscal_pos_map()
     keys = next(fiscal_pos_map_iterator)
     writer.writerow(keys)
     for row in fiscal_pos_map_iterator:
         writer.writerow([pycompat.to_text(s) for s in row.values()])
コード例 #7
0
ファイル: tax2csv.py プロジェクト: babarlhr/eagle-12.1
 def taxes_to_csv(self):
     writer = pycompat.csv_writer(
         open('account.tax.template-%s.csv' % self.suffix, 'wb'))
     taxes_iterator = self.iter_taxes()
     keys = next(taxes_iterator)
     writer.writerow(keys[3:] + ['sequence'])
     seq = 100
     for row in sorted(taxes_iterator, key=lambda r: r['description']):
         if not _is_true(row['active']):
             continue
         seq += 1
         if row['parent_id:id']:
             cur_seq = seq + 1000
         else:
             cur_seq = seq
         writer.writerow(
             [pycompat.to_text(v)
              for v in list(row.values())[3:]] + [cur_seq])
コード例 #8
0
    def from_data(self, fields, rows):
        fp = io.BytesIO()
        writer = pycompat.csv_writer(fp, quoting=1)

        writer.writerow(fields)

        for data in rows:
            row = []
            for d in data:
                # Spreadsheet apps tend to detect formulas on leading =, + and -
                if isinstance(d, pycompat.string_types) and d.startswith(
                    ('=', '-', '+')):
                    d = "'" + d

                row.append(pycompat.to_text(d))
            writer.writerow(row)

        return fp.getvalue()
コード例 #9
0
ファイル: tax2csv.py プロジェクト: babarlhr/eagle-12.1
    def tax_codes_to_csv(self):
        writer = pycompat.csv_writer(
            open('account.tax.code.template-%s.csv' % self.suffix, 'wb'))
        tax_codes_iterator = self.iter_tax_codes()
        keys = next(tax_codes_iterator)
        writer.writerow(keys)

        # write structure tax codes
        tax_codes = {}  # code: id
        for row in tax_codes_iterator:
            tax_code = row['code']
            if tax_code in tax_codes:
                raise RuntimeError('duplicate tax code %s' % tax_code)
            tax_codes[tax_code] = row['id']
            writer.writerow([pycompat.to_text(v) for v in row.values()])

        # read taxes and add leaf tax codes
        new_tax_codes = {}  # id: parent_code

        def add_new_tax_code(tax_code_id, new_name, new_parent_code):
            if not tax_code_id:
                return
            name, parent_code = new_tax_codes.get(tax_code_id, (None, None))
            if parent_code and parent_code != new_parent_code:
                raise RuntimeError('tax code "%s" already exist with '
                                   'parent %s while trying to add it with '
                                   'parent %s' %
                                   (tax_code_id, parent_code, new_parent_code))
            else:
                new_tax_codes[tax_code_id] = (new_name, new_parent_code)

        taxes_iterator = self.iter_taxes()
        next(taxes_iterator)
        for row in taxes_iterator:
            if not _is_true(row['active']):
                continue
            if row['child_depend'] and row['amount'] != 1:
                raise RuntimeError('amount must be one if child_depend '
                                   'for %s' % row['id'])
            # base parent
            base_code = row['BASE_CODE']
            if not base_code or base_code == '/':
                base_code = 'NA'
            if base_code not in tax_codes:
                raise RuntimeError('undefined tax code %s' % base_code)
            if base_code != 'NA':
                if row['child_depend']:
                    raise RuntimeError('base code specified '
                                       'with child_depend for %s' % row['id'])
            if not row['child_depend']:
                # ... in lux, we have the same code for invoice and refund
                if base_code != 'NA':
                    assert row[
                        'base_code_id:id'], 'missing base_code_id for %s' % row[
                            'id']
                assert row['ref_base_code_id:id'] == row['base_code_id:id']
                add_new_tax_code(row['base_code_id:id'],
                                 'Base - ' + row['name'], base_code)
            # tax parent
            tax_code = row['TAX_CODE']
            if not tax_code or tax_code == '/':
                tax_code = 'NA'
            if tax_code not in tax_codes:
                raise RuntimeError('undefined tax code %s' % tax_code)
            if tax_code == 'NA':
                if row['amount'] and not row['child_depend']:
                    raise RuntimeError('TAX_CODE not specified '
                                       'for non-zero tax %s' % row['id'])
                if row['tax_code_id:id']:
                    raise RuntimeError('tax_code_id specified '
                                       'for tax %s' % row['id'])
            else:
                if row['child_depend']:
                    raise RuntimeError('TAX_CODE specified '
                                       'with child_depend for %s' % row['id'])
                if not row['amount']:
                    raise RuntimeError('TAX_CODE specified '
                                       'for zero tax %s' % row['id'])
                if not row['tax_code_id:id']:
                    raise RuntimeError('tax_code_id not specified '
                                       'for tax %s' % row['id'])
            if not row['child_depend'] and row['amount']:
                # ... in lux, we have the same code for invoice and refund
                assert row[
                    'tax_code_id:id'], 'missing tax_code_id for %s' % row['id']
                assert row['ref_tax_code_id:id'] == row['tax_code_id:id']
                add_new_tax_code(row['tax_code_id:id'],
                                 'Taxe - ' + row['name'], tax_code)

        for tax_code_id in sorted(new_tax_codes):
            name, parent_code = new_tax_codes[tax_code_id]
            writer.writerow([
                tax_code_id, u'lu_tct_m' + parent_code,
                tax_code_id.replace('lu_tax_code_template_', u''), u'1', u'',
                pycompat.to_text(name), u''
            ])