Beispiel #1
0
    def test_big_attachments(self):
        """
        Ensure big fields (e.g. b64-encoded image data) can be imported and
        we're not hitting limits of the default CSV parser config
        """
        from PIL import Image

        im = Image.new('RGB', (1920, 1080))
        fout = io.BytesIO()

        writer = pycompat.csv_writer(fout, dialect=None)
        writer.writerows([
            [u'name', u'db_datas'],
            [u'foo', base64.b64encode(im.tobytes()).decode('ascii')]
        ])

        import_wizard = self.env['base_import.import'].create({
            'res_model': 'ir.attachment',
            'file': fout.getvalue(),
            'file_type': 'text/csv'
        })
        results = import_wizard.do(
            ['name', 'db_datas'],
            {'headers': True, 'separator': ',', 'quoting': '"'})
        self.assertFalse(results, "results should be empty on successful import")
Beispiel #2
0
    def test_newline_import(self):
        """
        Ensure importing keep newlines
        """
        output = io.BytesIO()
        writer = pycompat.csv_writer(output, quoting=1)

        data_row = [u"\tfoo\n\tbar", u" \"hello\" \n\n 'world' "]

        writer.writerow([u"name", u"Some Value"])
        writer.writerow(data_row)

        import_wizard = self.env['base_import.import'].create({
            'res_model':
            'base_import.tests.models.preview',
            'file':
            output.getvalue(),
            'file_type':
            'text/csv',
        })
        data, _ = import_wizard._convert_import_data(['name', 'somevalue'], {
            'quoting': '"',
            'separator': ',',
            'headers': True
        })

        self.assertItemsEqual(data, [data_row])
Beispiel #3
0
 def _makefile(self, rows):
     f = io.BytesIO()
     writer = pycompat.csv_writer(f, quoting=1)
     writer.writerow(['name', 'counter'])
     for i in range(rows):
         writer.writerow(['n_%d' % i, str(i)])
     return f.getvalue()
Beispiel #4
0
    def _csv_write_rows(self, rows, lineterminator=u'\r\n'):
        """
        Write FEC rows into a file
        It seems that Bercy's bureaucracy is not too happy about the
        empty new line at the End Of File.

        @param {list(list)} rows: the list of rows. Each row is a list of strings
        @param {unicode string} [optional] lineterminator: effective line terminator
            Has nothing to do with the csv writer parameter
            The last line written won't be terminated with it

        @return the value of the file
        """
        fecfile = io.BytesIO()
        writer = pycompat.csv_writer(fecfile, delimiter='|', lineterminator='')

        rows_length = len(rows)
        for i, row in enumerate(rows):
            if not i == rows_length - 1:
                row[-1] += lineterminator
            writer.writerow(row)

        fecvalue = fecfile.getvalue()
        fecfile.close()
        return fecvalue
Beispiel #5
0
    def test_big_attachments(self):
        """
        Ensure big fields (e.g. b64-encoded image data) can be imported and
        we're not hitting limits of the default CSV parser config
        """
        from PIL import Image

        im = Image.new('RGB', (1920, 1080))
        fout = io.BytesIO()

        writer = pycompat.csv_writer(fout, dialect=None)
        writer.writerows(
            [[u'name', u'db_datas'],
             [u'foo', base64.b64encode(im.tobytes()).decode('ascii')]])

        import_wizard = self.env['base_import.import'].create({
            'res_model':
            'ir.attachment',
            'file':
            fout.getvalue(),
            'file_type':
            'text/csv'
        })
        results = import_wizard.do(['name', 'db_datas'], [], {
            'headers': True,
            'separator': ',',
            'quoting': '"'
        })
        self.assertFalse(results['messages'],
                         "results should be empty on successful import")
Beispiel #6
0
    def _csv_write_rows(self, rows, lineterminator=u'\r\n'):
        """
        Write FEC rows into a file
        It seems that Bercy's bureaucracy is not too happy about the
        empty new line at the End Of File.

        @param {list(list)} rows: the list of rows. Each row is a list of strings
        @param {unicode string} [optional] lineterminator: effective line terminator
            Has nothing to do with the csv writer parameter
            The last line written won't be terminated with it

        @return the value of the file
        """
        fecfile = io.BytesIO()
        writer = pycompat.csv_writer(fecfile, delimiter='|', lineterminator='')

        rows_length = len(rows)
        for i, row in enumerate(rows):
            if not i == rows_length - 1:
                row.append(lineterminator)
            writer.writerow(row)

        fecvalue = fecfile.getvalue()
        fecfile.close()
        return fecvalue
Beispiel #7
0
    def action_csv(self):
        output = io.BytesIO()
        writer = pycompat.csv_writer(output, quoting=1)

        writer.writerow(['Company', 'FirstName', 'LastName', 'Email', 'Phone', 'Quantity', 'CardValue', 'ToName', 'CarrierMessage', 'FromName'])
        for rec in self:
            writer.writerow([
                rec.partner_id.parent_id.name or '',
                rec.partner_id.name.split(' ')[0],
                rec.partner_id.name.split(' ', 1)[1] if len(rec.partner_id.name.split(' ')) > 1 else '',
                rec.partner_id.email or '',
                rec.partner_id.phone or '',
                '1',
                '100',
                rec.partner_id.name,
                'Thank you for your referral and thank you for being part of the SolarSamritan movement.\n\nBest Wishes from the Team at Beyond Solar.',
                'BeyondSolar'
            ])

        attachment = self.env['ir.attachment'].create({
            'name': "DigitalCorporateOrderUpload.csv",
            'datas': base64.b64encode(output.getvalue()),
            'type': 'binary',
            'mimetype': 'text/csv',
        })

        return {
            'name': "Referral Upload",
            'type': 'ir.actions.act_url',
            'url': f'/web/content/{attachment.id}/DigitalCorporateOrderUpload.csv',
            'target': 'new'
        }
Beispiel #8
0
 def fiscal_pos_map_to_csv(self):
     writer = pycompat.csv_writer(open('account.fiscal.'
                                  'position.tax.template-%s.csv' %
                                  self.suffix, 'wb'))
     fiscal_pos_map_iterator = self.iter_fiscal_pos_map()
     keys = next(fiscal_pos_map_iterator)
     writer.writerow(keys)
     for row in fiscal_pos_map_iterator:
         writer.writerow([pycompat.to_text(s) for s in row.values()])
Beispiel #9
0
    def test_limit_on_lines(self):
        """ The limit option should be a limit on the number of *lines*
        imported at at time, not the number of *records*. This is relevant
        when it comes to embedded o2m.

        A big question is whether we want to round up or down (if the limit
        brings us inside a record). Rounding up (aka finishing up the record
        we're currently parsing) seems like a better idea:

        * if the first record has so many sub-lines it hits the limit we still
          want to import it (it's probably extremely rare but it can happen)
        * if we have one line per record, we probably want to import <limit>
          records not <limit-1>, but if we stop in the middle of the "current
          record" we'd always ignore the last record (I think)
        """
        f = io.BytesIO()
        writer = pycompat.csv_writer(f, quoting=1)
        writer.writerow(['name', 'value/value'])
        for record in range(10):
            writer.writerow(['record_%d' % record, '0'])
            for row in range(1, 10):
                writer.writerow(['', str(row)])

        import_wizard = self.env['base_import.import'].create({
            'res_model':
            'base_import.tests.models.o2m',
            'file_type':
            'text/csv',
            'file_name':
            'things.csv',
            'file':
            f.getvalue(),
        })
        opts = {'quoting': '"', 'separator': ',', 'headers': True}
        preview = import_wizard.parse_preview({**opts, 'limit': 15})
        self.assertIs(preview['batch'], True)

        results = import_wizard.do(['name', 'value/value'], [], {
            **opts, 'limit': 5
        })
        self.assertFalse(results['messages'])
        self.assertEqual(
            len(results['ids']), 1,
            "should have imported the first record in full, got %s" %
            results['ids'])
        self.assertEqual(results['nextrow'], 10)

        results = import_wizard.do(['name', 'value/value'], [], {
            **opts, 'limit': 15
        })
        self.assertFalse(results['messages'])
        self.assertEqual(
            len(results['ids']), 2,
            "should have importe the first two records, got %s" %
            results['ids'])
        self.assertEqual(results['nextrow'], 20)
Beispiel #10
0
 def voicent_start_campaign(self, call_line):
     for rec in self:
         if call_line.helpdesk_ticket_stage_id.id == rec.stage_id.id:
             # Generate the CSV file
             fp = io.BytesIO()
             writer = pycompat.csv_writer(fp, quoting=1)
             headers = []
             vals = []
             MailTemplate = self.env['mail.template']
             for field in call_line.contact_ids:
                 head = field.name
                 if head == 'Other':
                     head = field.other
                 lang = (rec.id and rec.partner_id.lang or "en_US")
                 value = \
                     MailTemplate.with_context(lang=lang)._render_template(
                         '${object.' + field.field_domain + '}',
                         'helpdesk.ticket',
                         rec.id)
                 if not value:
                     value = field.default_value
                 headers.append(head)
                 vals.append(value)
             writer.writerow(headers)
             writer.writerow(vals)
             directory = tempfile.mkdtemp(suffix='-helpdesk.ticket')
             file_name = \
                 directory + "/" + str(rec.id) + '-' + \
                 str(rec.stage_id.id) + '-' + \
                 fields.Datetime.now().strftime('%Y-%m-%d-%H-%M-%S') + \
                 '.csv'
             write_file = open(file_name, 'wb')
             write_file.write(fp.getvalue())
             write_file.close()
             # Connect to Voicent
             voicent_obj = voicent.Voicent(call_line.backend_id.host,
                                           str(call_line.backend_id.port),
                                           call_line.backend_id.callerid,
                                           str(call_line.backend_id.line))
             res = voicent_obj.importAndRunCampaign(file_name,
                                                    call_line.msgtype,
                                                    call_line.msginfo)
             # Delete the file on the filesystem
             shutil.rmtree(directory)
             if res.get('camp_id') and res.get('status') == 'OK':
                 rec.voicent_check_status(res.get('camp_id'), call_line)
             else:
                 message = _("""Call has been sent to <b>%s</b> but failed
                 with the following message: <b>%s</b>""" %
                             (call_line.backend_id.name, res))
                 rec.message_post(body=message)
                 raise RetryableJobError(res)
         else:
             message = _("Call has been cancelled because the stage has "
                         "changed.")
             rec.message_post(body=message)
Beispiel #11
0
 def fiscal_pos_map_to_csv(self):
     writer = pycompat.csv_writer(
         open(
             'account.fiscal.'
             'position.tax.template-%s.csv' % self.suffix, 'wb'))
     fiscal_pos_map_iterator = self.iter_fiscal_pos_map()
     keys = next(fiscal_pos_map_iterator)
     writer.writerow(keys)
     for row in fiscal_pos_map_iterator:
         writer.writerow([pycompat.to_text(s) for s in row.values()])
Beispiel #12
0
def _csv_write_rows(rows):
    f = io.BytesIO()
    writer = pycompat.csv_writer(f, delimiter=',', quotechar='"', quoting=2, dialect='excel')
    rows_length = len(rows)
    for i, row in enumerate(rows):
        writer.writerow(row)

    fvalue = f.getvalue()
    f.close()
    return fvalue
Beispiel #13
0
    def from_data(self, fields, rows):
        fp = io.BytesIO()
        writer = pycompat.csv_writer(fp, quoting=1)

        writer.writerow(fields)

        for data in rows:
            row = []
            for d in data:
                # Spreadsheet apps tend to detect formulas on leading =, + and -
                if isinstance(d, str)    and d.startswith(('=', '-', '+')):
                    d = "'" + d

                row.append(pycompat.to_text(d))
            writer.writerow(row)

        return fp.getvalue()
Beispiel #14
0
 def taxes_to_csv(self):
     writer = pycompat.csv_writer(
         open('account.tax.template-%s.csv' % self.suffix, 'wb'))
     taxes_iterator = self.iter_taxes()
     keys = next(taxes_iterator)
     writer.writerow(keys[3:] + ['sequence'])
     seq = 100
     for row in sorted(taxes_iterator, key=lambda r: r['description']):
         if not _is_true(row['active']):
             continue
         seq += 1
         if row['parent_id:id']:
             cur_seq = seq + 1000
         else:
             cur_seq = seq
         writer.writerow(
             [pycompat.to_text(v)
              for v in list(row.values())[3:]] + [cur_seq])
Beispiel #15
0
    def prepareCsv(self, mainData, invoice_type, gstToolName, gstType):
        attachment = False
        if mainData:
            fp = BytesIO()

            # writer = csv.writer(fp, quoting=csv.QUOTE_NONE,escapechar='\\')
            writer = pycompat.csv_writer(fp,
                                         quoting=csv.QUOTE_NONE,
                                         escapechar='\\')

            if invoice_type == 'b2b':
                columns = self.env['csv.column'].getB2BColumn(gstType)
                writer.writerow(columns)
            elif invoice_type == 'b2bur':
                columns = self.env['csv.column'].getB2BURColumn()
                writer.writerow(columns)
            elif invoice_type == 'b2cl':
                columns = self.env['csv.column'].getB2CLColumn()
                writer.writerow(columns)
            elif invoice_type == 'b2cs':
                columns = self.env['csv.column'].getB2CSColumn()
                writer.writerow(columns)
            elif invoice_type == 'imps':
                columns = self.env['csv.column'].getImpsColumn()
                writer.writerow(columns)
            elif invoice_type == 'impg':
                columns = self.env['csv.column'].getImpgColumn()
                writer.writerow(columns)
            elif invoice_type == 'export':
                columns = self.env['csv.column'].getExportColumn()
                writer.writerow(columns)
            elif invoice_type == 'hsn':
                columns = self.env['csv.column'].getHSNColumn()
                writer.writerow(columns)
            for lineData in mainData:
                _logger.info('Line Data (%s)', lineData)
                writer.writerow([_unescape(name) for name in lineData])
            fp.seek(0)
            data = fp.read()
            fp.close()
            attachment = self.generateAttachment(data, invoice_type,
                                                 gstToolName)
        return attachment
Beispiel #16
0
 def taxes_to_csv(self):
     writer = pycompat.csv_writer(open('account.tax.template-%s.csv' %
                                  self.suffix, 'wb'))
     taxes_iterator = self.iter_taxes()
     keys = next(taxes_iterator)
     writer.writerow(keys[3:] + ['sequence'])
     seq = 100
     for row in sorted(taxes_iterator, key=lambda r: r['description']):
         if not _is_true(row['active']):
             continue
         seq += 1
         if row['parent_id:id']:
             cur_seq = seq + 1000
         else:
             cur_seq = seq
         writer.writerow([
             pycompat.to_text(v)
             for v in list(row.values())[3:]
         ] + [cur_seq])
Beispiel #17
0
    def test_newline_import(self):
        """
        Ensure importing keep newlines
        """
        output = io.BytesIO()
        writer = pycompat.csv_writer(output, quoting=1)

        data_row = [u"\tfoo\n\tbar", u" \"hello\" \n\n 'world' "]

        writer.writerow([u"name", u"Some Value"])
        writer.writerow(data_row)

        import_wizard = self.env['base_import.import'].create({
            'res_model': 'base_import.tests.models.preview',
            'file': output.getvalue(),
            'file_type': 'text/csv',
        })
        data, _ = import_wizard._convert_import_data(
            ['name', 'somevalue'],
            {'quoting': '"', 'separator': ',', 'headers': True}
        )

        self.assertItemsEqual(data, [data_row])
Beispiel #18
0
    def tax_codes_to_csv(self):
        writer = pycompat.csv_writer(
            open('account.tax.code.template-%s.csv' % self.suffix, 'wb'))
        tax_codes_iterator = self.iter_tax_codes()
        keys = next(tax_codes_iterator)
        writer.writerow(keys)

        # write structure tax codes
        tax_codes = {}  # code: id
        for row in tax_codes_iterator:
            tax_code = row['code']
            if tax_code in tax_codes:
                raise RuntimeError('duplicate tax code %s' % tax_code)
            tax_codes[tax_code] = row['id']
            writer.writerow([pycompat.to_text(v) for v in row.values()])

        # read taxes and add leaf tax codes
        new_tax_codes = {}  # id: parent_code

        def add_new_tax_code(tax_code_id, new_name, new_parent_code):
            if not tax_code_id:
                return
            name, parent_code = new_tax_codes.get(tax_code_id, (None, None))
            if parent_code and parent_code != new_parent_code:
                raise RuntimeError('tax code "%s" already exist with '
                                   'parent %s while trying to add it with '
                                   'parent %s' %
                                   (tax_code_id, parent_code, new_parent_code))
            else:
                new_tax_codes[tax_code_id] = (new_name, new_parent_code)

        taxes_iterator = self.iter_taxes()
        next(taxes_iterator)
        for row in taxes_iterator:
            if not _is_true(row['active']):
                continue
            if row['child_depend'] and row['amount'] != 1:
                raise RuntimeError('amount must be one if child_depend '
                                   'for %s' % row['id'])
            # base parent
            base_code = row['BASE_CODE']
            if not base_code or base_code == '/':
                base_code = 'NA'
            if base_code not in tax_codes:
                raise RuntimeError('undefined tax code %s' % base_code)
            if base_code != 'NA':
                if row['child_depend']:
                    raise RuntimeError('base code specified '
                                       'with child_depend for %s' % row['id'])
            if not row['child_depend']:
                # ... in lux, we have the same code for invoice and refund
                if base_code != 'NA':
                    assert row[
                        'base_code_id:id'], 'missing base_code_id for %s' % row[
                            'id']
                assert row['ref_base_code_id:id'] == row['base_code_id:id']
                add_new_tax_code(row['base_code_id:id'],
                                 'Base - ' + row['name'], base_code)
            # tax parent
            tax_code = row['TAX_CODE']
            if not tax_code or tax_code == '/':
                tax_code = 'NA'
            if tax_code not in tax_codes:
                raise RuntimeError('undefined tax code %s' % tax_code)
            if tax_code == 'NA':
                if row['amount'] and not row['child_depend']:
                    raise RuntimeError('TAX_CODE not specified '
                                       'for non-zero tax %s' % row['id'])
                if row['tax_code_id:id']:
                    raise RuntimeError('tax_code_id specified '
                                       'for tax %s' % row['id'])
            else:
                if row['child_depend']:
                    raise RuntimeError('TAX_CODE specified '
                                       'with child_depend for %s' % row['id'])
                if not row['amount']:
                    raise RuntimeError('TAX_CODE specified '
                                       'for zero tax %s' % row['id'])
                if not row['tax_code_id:id']:
                    raise RuntimeError('tax_code_id not specified '
                                       'for tax %s' % row['id'])
            if not row['child_depend'] and row['amount']:
                # ... in lux, we have the same code for invoice and refund
                assert row[
                    'tax_code_id:id'], 'missing tax_code_id for %s' % row['id']
                assert row['ref_tax_code_id:id'] == row['tax_code_id:id']
                add_new_tax_code(row['tax_code_id:id'],
                                 'Taxe - ' + row['name'], tax_code)

        for tax_code_id in sorted(new_tax_codes):
            name, parent_code = new_tax_codes[tax_code_id]
            writer.writerow([
                tax_code_id, u'lu_tct_m' + parent_code,
                tax_code_id.replace('lu_tax_code_template_', u''), u'1', u'',
                pycompat.to_text(name), u''
            ])
Beispiel #19
0
    def _get_partner_list(self, options):
        date_from = fields.Date.from_string(
            options.get('date').get('date_from'))
        date_to = fields.Date.from_string(options.get('date').get('date_to'))
        fy = self.env.user.company_id.compute_fiscalyear_dates(date_to)

        date_from = datetime.strftime(date_from, '%Y%m%d')
        date_to = datetime.strftime(date_to, '%Y%m%d')
        fy = datetime.strftime(fy.get('date_from'), '%Y%m%d')
        datev_info = self._get_datev_client_number()

        output = io.BytesIO()
        writer = pycompat.csv_writer(output,
                                     delimiter=';',
                                     quotechar='"',
                                     quoting=2)

        preheader = [
            'EXTF', 510, 16, 'Debitoren/Kreditoren', 4, None, None, '', '', '',
            datev_info[0], datev_info[1], fy, 8, '', '', '', '', '', '', '',
            '', '', '', '', '', '', '', '', '', ''
        ]
        header = [
            'Konto', 'Name (AdressatentypUnternehmen)',
            'Name (Adressatentypnatürl. Person)', '', '', '', 'Adressatentyp'
        ]
        move_line_ids = self.with_context(self._set_context(options),
                                          print_mode=True,
                                          aml_only=True)._get_lines(options)
        lines = [preheader, header]

        if len(move_line_ids):
            self.env.cr.execute(
                """
                SELECT distinct(aml.partner_id) 
                FROM account_move_line aml 
                LEFT JOIN account_move m
                    ON aml.move_id = m.id
                WHERE aml.id IN %s 
                    AND aml.tax_line_id IS NULL
                    AND aml.debit != aml.credit
                    AND aml.account_id != m.l10n_de_datev_main_account_id""",
                (tuple(move_line_ids), ))
        partners = self.env['res.partner'].browse(
            [p.get('partner_id') for p in self.env.cr.dictfetchall()])
        for partner in partners:
            code = self._find_partner_account(
                partner.property_account_receivable_id, partner)
            line_value = {
                'code': code,
                'company_name': partner.name if partner.is_company else '',
                'person_name': '' if partner.is_company else partner.name,
                'natural': partner.is_company and '2' or '1'
            }
            # Idiotic program needs to have a line with 243 elements ordered in a given fashion as it
            # does not take into account the header and non mandatory fields
            array = ['' for x in range(243)]
            array[0] = line_value.get('code')
            array[1] = line_value.get('company_name')
            array[2] = line_value.get('person_name')
            array[6] = line_value.get('natural')
            lines.append(array)
            code_payable = self._find_partner_account(
                partner.property_account_payable_id, partner)
            if code_payable != code:
                line_value['code'] = code_payable
                array[0] = line_value.get('code')
                lines.append(array)
        writer.writerows(lines)
        return output.getvalue()
Beispiel #20
0
    def transfert_compta(self, **kw):
        #s = s[ beginning : beginning + LENGTH]
        date_d = self.date_start[0:4] + self.date_start[5:7] + self.date_start[
            8:10]
        date_f = self.date_end[0:4] + self.date_end[5:7] + self.date_end[8:10]
        query_args = {'date_start': date_d, 'date_end': date_f}

        #dirpath2 = os.path.dirname(os.path.realpath(__file__))   # c:\odoo\odoo11\addons_adinfo\hubi\models

        # General Settings
        company_id = self.env['res.company']._company_default_get(
            'hubi.general_settings')
        val_company_id = company_id.id
        val_name = 'General Settings'

        settings = self.env['hubi.general_settings'].search([
            ('name', '=', val_name), ('company_id', '=', val_company_id)
        ])
        if settings:
            auxiliary_account = settings.auxiliary_accounting
            length_account_gen = settings.length_account_general
            length_account_aux = settings.length_account_auxiliary
            complete_0_gen = settings.complete_0_account_general or False
            complete_0_aux = settings.complete_0_account_general or False
        else:
            auxiliary_account = False
            length_account_gen = 0
            length_account_aux = 0
            complete_0_gen = False
            complete_0_aux = False

        csv_path = self.path_account_transfer
        account_file = self.account_file_transfer
        writing_file = self.writing_file_transfer

        if csv_path is None:
            csv_path = os.environ.get(
                'LOCALAPPDATA') or os.getcwd()  # c:\odoo\odoo11
        if account_file is None:
            account_file = 'comptes.txt'
        if writing_file is None:
            writing_file = 'ecritures.txt'
        """    
        csv_path = os.path.normpath(csv_path)    
        if not os.path.exists(csv_path): 
            os.makedirs(csv_path)
        os.chdir(csv_path)
        """

        # Account File
        partner_file = io.BytesIO()
        w_p = pycompat.csv_writer(partner_file, delimiter=',')

        #fpc = io.open(account_file, 'w', encoding='utf-8')

        csv_p = ""
        ligne_p = ""

        sql_p = """SELECT distinct am.partner_id, res_partner.name,
                res_partner.street, res_partner.city, res_partner.zip,
                res_country.code as code_pays, res_country.name as country,
                res_partner.phone, res_partner.mobile, am.company_id
                from account_move as am
                INNER JOIN res_partner on res_partner.id = am.partner_id 
                INNER JOIN res_country on res_country.id = res_partner.country_id 
                WHERE am.state = 'posted' 
                AND to_char(am.date,'YYYYMMDD') BETWEEN %s AND %s
                AND am.company_id = %s
                AND am.journal_id IN %s
                ORDER BY am.partner_id"""

        self.env.cr.execute(sql_p, (
            date_d,
            date_f,
            val_company_id,
            tuple(self.journal_ids.ids),
        ))

        ids_p = [(r[0], r[1], r[2], r[3], r[4], r[5], r[6], r[7], r[8])
                 for r in self.env.cr.fetchall()]
        for partner_id, name, street, city, zip, code_pays, country, phone, mobile in ids_p:
            ret_p = self.ecrire_ligne_comptes_ebp(
                auxiliary_account, length_account_gen, length_account_aux,
                complete_0_gen, complete_0_aux, partner_id, name, street, city,
                zip, code_pays, country, phone, mobile)

            ligne_p = ret_p['ligne_p']
            listrow_p = ret_p['listrow_p']
            listrow_ps = ret_p['listrow_ps']

            csv_p += ligne_p
            w_p.writerow(listrow_p)
            if len(listrow_ps) != 0:
                w_p.writerow(listrow_ps)

        #fpc.write(csv_p)
        #fpc.close()

        # Transfert Invoices
        compta_file = io.BytesIO()
        w = pycompat.csv_writer(compta_file, delimiter=',')

        #fpi = io.open(writing_file, 'w', encoding='utf-8')

        sql = """SELECT aml.id, am.name as move_name, account_journal.code as journal,account_account.code as compte,
                res_partner.name as partner, aml.name as move_line_name,
                to_char(am.date,'DDMMYYYY') as date_ecr,
                to_char(aml.date_maturity,'DDMMYYYY') as date_ech,
                aml.debit, aml.credit, res_currency.name as currency, 
                aml.ref as ref, aaa.code as compte_anal, am.partner_id, aml.company_id
                from account_move_line as aml
                INNER JOIN account_move as am on am.id = aml.move_id
                INNER JOIN account_journal on account_journal.id = am.journal_id
                INNER JOIN res_currency on res_currency.id = am.currency_id
                INNER JOIN res_partner on res_partner.id = am.partner_id 
                INNER JOIN account_account on account_account.id = aml.account_id 
                LEFT JOIN account_analytic_account as aaa on aaa.id = aml.analytic_account_id 
                WHERE am.state = 'posted' 
                AND to_char(am.date,'YYYYMMDD') BETWEEN %s AND %s
                AND  aml.company_id = %s 
                AND am.journal_id IN %s 
                AND aml.transfer_accounting is %s
                ORDER BY account_journal.code, am.id, account_account.code"""
        #AND aml.transfer_accounting is not true

        self.env.cr.execute(sql, (
            date_d,
            date_f,
            val_company_id,
            tuple(self.journal_ids.ids),
            self.re_transfer,
        ))

        nb_lig = 0
        csv = ""
        ligne = ""
        ids = [(r[0], r[1], r[2], r[3], r[4], r[5], r[6], r[7], r[8], r[9],
                r[10], r[11], r[12], r[13]) for r in self.env.cr.fetchall()]
        for line_id, move_name, journal, compte, partner_name, move_line_name, date_ecr, date_ech, debit, credit, currency, ref, compte_anal, partner_id in ids:
            nb_lig += 1
            listrow = list()
            #ligne = self.ecrire_ligne_ebp(auxiliary_account,length_account_gen,length_account_aux,complete_0_gen,complete_0_aux, move_name, journal, compte, partner_name, move_line_name, date_ecr, date_ech, debit, credit, currency, ref, compte_anal, partner_id, nb_lig)
            ret = self.ecrire_ligne_ebp(
                auxiliary_account, length_account_gen, length_account_aux,
                complete_0_gen, complete_0_aux, move_name, journal, compte,
                partner_name, move_line_name, date_ecr, date_ech, debit,
                credit, currency, ref, compte_anal, partner_id, nb_lig)
            ligne = ret['ligne']
            listrow = ret['listrow']

            csv += ligne
            w.writerow(listrow)

            line = self.env['account.move.line'].browse(line_id)
            line.update({'transfer_accounting': True})

        #fpi.write(csv)
        #fpi.close()

        #controller_export_csv_order_line.SaleOrderController.transfert_compta_csv_download(self,csv,writing_file)
        if self.mail_accounting:
            # send EMail
            #return self.send_mail_template()
            attachments_ids = []
            comptavalue = compta_file.getvalue()
            partnervalue = partner_file.getvalue()
            if comptavalue is not None:
                attachment_w = {
                    'name': ("%s" % writing_file),
                    'datas_fname': writing_file,
                    'datas': base64.encodestring(comptavalue),
                    'type': 'binary'
                }
                id_w = self.env['ir.attachment'].create(attachment_w)
                attachments_ids.append(id_w.id)

            if partnervalue is not None:
                attachment_a = {
                    'name': ("%s" % account_file),
                    'datas_fname': account_file,
                    'datas': base64.encodestring(partnervalue),
                    'type': 'binary'
                }
                id_a = self.env['ir.attachment'].create(attachment_a)
                attachments_ids.append(id_a.id)

                email_template = self.env.ref(
                    'hubi.email_template_accounting_transfer')
                email_template.attachment_ids = False

            email_template.attachment_ids = attachments_ids

            ir_model_data = self.env['ir.model.data']
            try:
                template_id = ir_model_data.get_object_reference(
                    'hubi', 'email_template_accounting_transfer')[1]
            except ValueError:
                template_id = False
            try:
                compose_form_id = ir_model_data.get_object_reference(
                    'mail', 'email_compose_message_wizard_form')[1]
            except ValueError:
                compose_form_id = False

            ctx = {
                'default_model': 'wiz.transfertcompta',
                'default_res_id': self.ids[0],
                'default_use_template': bool(template_id),
                'default_template_id': template_id,
                'default_composition_mode': 'comment',
                'attachment_ids': attachments_ids,
                'force_email': True
            }
            return {
                'type': 'ir.actions.act_window',
                'view_type': 'form',
                'view_mode': 'form',
                'res_model': 'mail.compose.message',
                'views': [(compose_form_id, 'form')],
                'view_id': compose_form_id,
                'target': 'new',
                'context': ctx,
            }

        else:
            #return {'type': 'ir.actions.act_window_close'}
            """
            view_id = self.env["ir.model.data"].get_object_reference("hubi", "wiz_transfert_compta_step2")
            self.message = ("%s %s %s %s") % ("Create Accounting transfer for ",nb_lig, " lines. Sur ", csv_path )
            return {"type":"ir.actions.act_window",
                "view_mode":"form",
                "view_type":"form",
                "views":[(view_id[1], "form")],
                "res_id":self.id,
                "target":"new",
                "res_model":"wiz.transfertcompta"                
                } 
                   
            """
            comptavalue = compta_file.getvalue()
            partnervalue = partner_file.getvalue()

            self.write({
                'compta_data': base64.encodestring(comptavalue),
                'filename': writing_file,
                'partner_data': base64.encodestring(partnervalue),
                'partner_filename': account_file,
            })
            compta_file.close()

            action_writing = {
                'name':
                'hubi_transfert_compta',
                'type':
                'ir.actions.act_url',
                'url':
                "web/content/?model=wiz.transfertcompta&id=" + str(self.id) +
                "&filename_field=filename&field=compta_data&download=true&filename="
                + self.filename,
                'target':
                'self',
            }
            action_partner = {
                'name':
                'hubi_transfert_compta_partner',
                'type':
                'ir.actions.act_url',
                'url':
                "web/content/?model=wiz.transfertcompta&id=" + str(self.id) +
                "&filename_field=partner_filename&field=partner_data&download=true&filename="
                + self.partner_filename,
                'target':
                'self',
            }
            action = {
                'name':
                'hubi_transfert_compta',
                'type':
                'ir.actions.act_url',
                'url':
                "web/content/?model=wiz.transfertcompta&id=" + str(self.id) +
                "&filename_field=filename&field=compta_data&download=true&filename="
                + self.filename +
                "&filename_field=partner_filename&field=partner_data&download=true&filename="
                + self.partner_filename,
                'target':
                'self',
            }

            return action_writing
Beispiel #21
0
    def tax_codes_to_csv(self):
        writer = pycompat.csv_writer(open('account.tax.code.template-%s.csv' %
                                 self.suffix, 'wb'))
        tax_codes_iterator = self.iter_tax_codes()
        keys = next(tax_codes_iterator)
        writer.writerow(keys)

        # write structure tax codes
        tax_codes = {}  # code: id
        for row in tax_codes_iterator:
            tax_code = row['code']
            if tax_code in tax_codes:
                raise RuntimeError('duplicate tax code %s' % tax_code)
            tax_codes[tax_code] = row['id']
            writer.writerow([pycompat.to_text(v) for v in row.values()])

        # read taxes and add leaf tax codes
        new_tax_codes = {}  # id: parent_code

        def add_new_tax_code(tax_code_id, new_name, new_parent_code):
            if not tax_code_id:
                return
            name, parent_code = new_tax_codes.get(tax_code_id, (None, None))
            if parent_code and parent_code != new_parent_code:
                raise RuntimeError('tax code "%s" already exist with '
                                   'parent %s while trying to add it with '
                                   'parent %s' %
                                   (tax_code_id, parent_code, new_parent_code))
            else:
                new_tax_codes[tax_code_id] = (new_name, new_parent_code)

        taxes_iterator = self.iter_taxes()
        next(taxes_iterator)
        for row in taxes_iterator:
            if not _is_true(row['active']):
                continue
            if row['child_depend'] and row['amount'] != 1:
                raise RuntimeError('amount must be one if child_depend '
                                   'for %s' % row['id'])
            # base parent
            base_code = row['BASE_CODE']
            if not base_code or base_code == '/':
                base_code = 'NA'
            if base_code not in tax_codes:
                raise RuntimeError('undefined tax code %s' % base_code)
            if base_code != 'NA':
                if row['child_depend']:
                    raise RuntimeError('base code specified '
                                       'with child_depend for %s' % row['id'])
            if not row['child_depend']:
                # ... in lux, we have the same code for invoice and refund
                if base_code != 'NA':
                    assert row['base_code_id:id'], 'missing base_code_id for %s' % row['id']
                assert row['ref_base_code_id:id'] == row['base_code_id:id']
                add_new_tax_code(row['base_code_id:id'],
                                 'Base - ' + row['name'],
                                 base_code)
            # tax parent
            tax_code = row['TAX_CODE']
            if not tax_code or tax_code == '/':
                tax_code = 'NA'
            if tax_code not in tax_codes:
                raise RuntimeError('undefined tax code %s' % tax_code)
            if tax_code == 'NA':
                if row['amount'] and not row['child_depend']:
                    raise RuntimeError('TAX_CODE not specified '
                                       'for non-zero tax %s' % row['id'])
                if row['tax_code_id:id']:
                    raise RuntimeError('tax_code_id specified '
                                       'for tax %s' % row['id'])
            else:
                if row['child_depend']:
                    raise RuntimeError('TAX_CODE specified '
                                       'with child_depend for %s' % row['id'])
                if not row['amount']:
                    raise RuntimeError('TAX_CODE specified '
                                       'for zero tax %s' % row['id'])
                if not row['tax_code_id:id']:
                    raise RuntimeError('tax_code_id not specified '
                                       'for tax %s' % row['id'])
            if not row['child_depend'] and row['amount']:
                # ... in lux, we have the same code for invoice and refund
                assert row['tax_code_id:id'], 'missing tax_code_id for %s' % row['id']
                assert row['ref_tax_code_id:id'] == row['tax_code_id:id']
                add_new_tax_code(row['tax_code_id:id'],
                                 'Taxe - ' + row['name'],
                                 tax_code)

        for tax_code_id in sorted(new_tax_codes):
            name, parent_code = new_tax_codes[tax_code_id]
            writer.writerow([
                tax_code_id,
                u'lu_tct_m' + parent_code,
                tax_code_id.replace('lu_tax_code_template_', u''),
                u'1',
                u'',
                pycompat.to_text(name),
                u''
            ])
Beispiel #22
0
    def get_csv(self, options):
        # last 2 element of preheader should be filled by "consultant number" and "client number"
        date_from = fields.Date.from_string(
            options.get('date').get('date_from'))
        date_to = fields.Date.from_string(options.get('date').get('date_to'))
        fy = self.env.user.company_id.compute_fiscalyear_dates(date_to)

        date_from = datetime.strftime(date_from, '%Y%m%d')
        date_to = datetime.strftime(date_to, '%Y%m%d')
        fy = datetime.strftime(fy.get('date_from'), '%Y%m%d')
        datev_info = self._get_datev_client_number()

        output = io.BytesIO()
        writer = pycompat.csv_writer(output,
                                     delimiter=';',
                                     quotechar='"',
                                     quoting=2)
        preheader = [
            'EXTF', 510, 21, 'Buchungsstapel', 7, '', '', '', '', '',
            datev_info[0], datev_info[1], fy, 8, date_from, date_to, '', '',
            '', '', 0, 'EUR', '', '', '', '', '', '', '', '', ''
        ]
        header = [
            'Umsatz (ohne Soll/Haben-Kz)', 'Soll/Haben-Kennzeichen',
            'WKZ Umsatz', 'Kurs', 'Basis-Umsatz', 'WKZ Basis-Umsatz', 'Konto',
            'Gegenkonto (ohne BU-Schlüssel)', 'BU-Schlüssel', 'Belegdatum',
            'Belegfeld 1', 'Belegfeld 2', 'Skonto', 'Buchungstext'
        ]

        move_line_ids = self.with_context(self._set_context(options),
                                          print_mode=True,
                                          aml_only=True)._get_lines(options)
        lines = [preheader, header]

        moves = move_line_ids
        # find all account_move
        if len(move_line_ids):
            self.env.cr.execute(
                """SELECT distinct(move_id) FROM account_move_line WHERE id IN %s""",
                (tuple(move_line_ids), ))
            move_ids = [l.get('move_id') for l in self.env.cr.dictfetchall()]
            moves = self.env['account.move'].browse(move_ids)
        for m in moves:
            # Account-counterAccount-amount-partner
            move_line_set = {}
            for aml in m.line_ids:
                if aml.debit == aml.credit:
                    # Ignore debit = credit = 0
                    continue
                # If both account and counteraccount are the same, ignore the line
                if aml.account_id == aml.move_id.l10n_de_datev_main_account_id:
                    continue
                # If line is a tax ignore it as datev requires single line with gross amount and deduct tax itself based
                # on account or on the control key code
                if aml.tax_line_id:
                    continue

                amount = abs(aml.balance)
                # Finding back the amount with tax included from the move can be tricky
                # The line with the tax contains the tax_base_amount which is the gross value
                # However if we have 2 product lines with the same tax, we will have a move like this
                # Debit   Credit   Tax_line_id   Tax_ids   Tax_base_amount   Account_id
                #  10        0        false       false            0          Receivable
                #  0         3         1          false            7          Tax
                #  0        3,5       false        [1]             0          Income Account
                #  0        3,5       false        [1]             0          Income Account
                #
                # What we want to export for his move in datev is something like this:
                # Account             CounterAccount   Amount
                # Income Account      Receivable        10
                #
                # So when we are on an "Income Account" line linked to a tax, we try to find back
                # the line representing the tax and we use as amount the tax line balance + base_amount
                # This means that in the case we happen to have another line with same tax and income account,
                # (as described above), We have to skip it.
                # A Problem that might happen since tax are grouped is: if we have a line
                # with tax 1 and 2 specified on the line, and another line with only tax 1
                # specified on the line. This will result in a case where we can't easily get
                # back the gross amount for both lines. This case is not supported by this export
                # function and will result in incorrect exported lines for datev.
                code_correction = ''
                if aml.tax_ids:
                    amount = 0
                    for tax in aml.tax_ids:
                        # Find tax line in the move and get it's tax_base_amount
                        tax_line = m.line_ids.filtered(
                            lambda l: l.tax_line_id == tax and l.partner_id ==
                            aml.partner_id)
                        amount += abs(tax_line.balance) + abs(
                            tax_line.tax_base_amount)

                # account and counterpart account
                to_account_code = self._find_partner_account(
                    aml.move_id.l10n_de_datev_main_account_id, aml.partner_id)
                account_code = u'{code}'.format(
                    code=self._find_partner_account(aml.account_id,
                                                    aml.partner_id))
                if not aml.account_id.tax_ids and aml.tax_ids:
                    # Take the correction code from the tax
                    code_correction = aml.tax_ids[0].l10n_de_datev_code

                # If line is already to be add from this move, skip it
                match_key = '%s-%s-%s-%s' % (account_code, to_account_code,
                                             amount, aml.partner_id)
                if (move_line_set.get(match_key)):
                    continue
                else:
                    move_line_set[match_key] = True
                # reference
                receipt1 = aml.move_id.name
                if aml.move_id.journal_id.type == 'purchase' and aml.move_id.ref:
                    receipt1 = aml.move_id.ref

                # on receivable/payable aml of sales/purchases
                receipt2 = ''
                if to_account_code == account_code and aml.date_maturity:
                    receipt2 = aml.date

                currency = aml.company_id.currency_id
                partner_vat = aml.tax_ids and aml.move_id.partner_id.vat or ''
                line_value = {
                    'waehrung': currency.name,
                    'sollhaben': 's' if aml.balance >= 0 else 'h',
                    'amount': str(amount).replace('.', ','),
                    'buschluessel': code_correction,
                    'gegenkonto': to_account_code,
                    'belegfeld1': receipt1[-12:],
                    'belegfeld2': receipt2,
                    'datum': aml.move_id.date,
                    'konto': account_code or '',
                    'kurs': str(currency.rate).replace('.', ','),
                    'buchungstext': receipt1,
                }
                # Idiotic program needs to have a line with 116 elements ordered in a given fashion as it
                # does not take into account the header and non mandatory fields
                array = ['' for x in range(116)]
                array[0] = line_value.get('amount')
                array[1] = line_value.get('sollhaben')
                array[2] = line_value.get('waehrung')
                array[6] = line_value.get('konto')
                array[7] = line_value.get('gegenkonto')
                array[8] = line_value.get('buschluessel')
                array[9] = line_value.get('datum')
                array[10] = line_value.get('belegfeld1')
                array[11] = line_value.get('belegfeld2')
                array[13] = line_value.get('buchungstext')
                lines.append(array)
        writer.writerows(lines)
        return output.getvalue()
Beispiel #23
0
    def generate_fec(self):
        self.ensure_one()
        # We choose to implement the flat file instead of the XML
        # file for 2 reasons :
        # 1) the XSD file impose to have the label on the account.move
        # but Odoo has the label on the account.move.line, so that's a
        # problem !
        # 2) CSV files are easier to read/use for a regular accountant.
        # So it will be easier for the accountant to check the file before
        # sending it to the fiscal administration
        header = [
            u'JournalCode',  # 0
            u'JournalLib',  # 1
            u'EcritureNum',  # 2
            u'EcritureDate',  # 3
            u'CompteNum',  # 4
            u'CompteLib',  # 5
            u'CompAuxNum',  # 6  We use partner.id
            u'CompAuxLib',  # 7
            u'PieceRef',  # 8
            u'PieceDate',  # 9
            u'EcritureLib',  # 10
            u'Debit',  # 11
            u'Credit',  # 12
            u'EcritureLet',  # 13
            u'DateLet',  # 14
            u'ValidDate',  # 15
            u'Montantdevise',  # 16
            u'Idevise',  # 17
        ]

        company = self.env.user.company_id
        if not company.vat:
            raise Warning(
                _("Missing VAT number for company %s") % company.name)
        if company.vat[0:2] != 'FR':
            raise Warning(_("FEC is for French companies only !"))

        fecfile = io.BytesIO()
        w = pycompat.csv_writer(fecfile, delimiter='|')
        w.writerow(header)

        # INITIAL BALANCE
        unaffected_earnings_xml_ref = self.env.ref(
            'account.data_unaffected_earnings')
        unaffected_earnings_line = True  # used to make sure that we add the unaffected earning initial balance only once
        if unaffected_earnings_xml_ref:
            #compute the benefit/loss of last year to add in the initial balance of the current year earnings account
            unaffected_earnings_results = self.do_query_unaffected_earnings()
            unaffected_earnings_line = False

        sql_query = '''
        SELECT
            'OUV' AS JournalCode,
            'Balance initiale' AS JournalLib,
            'Balance initiale ' || MIN(aa.name) AS EcritureNum,
            %s AS EcritureDate,
            MIN(aa.code) AS CompteNum,
            replace(replace(MIN(aa.name), '|', '/'), '\t', '') AS CompteLib,
            '' AS CompAuxNum,
            '' AS CompAuxLib,
            '-' AS PieceRef,
            %s AS PieceDate,
            '/' AS EcritureLib,
            replace(CASE WHEN sum(aml.balance) <= 0 THEN '0,00' ELSE to_char(SUM(aml.balance), '999999999999999D99') END, '.', ',') AS Debit,
            replace(CASE WHEN sum(aml.balance) >= 0 THEN '0,00' ELSE to_char(-SUM(aml.balance), '999999999999999D99') END, '.', ',') AS Credit,
            '' AS EcritureLet,
            '' AS DateLet,
            %s AS ValidDate,
            '' AS Montantdevise,
            '' AS Idevise,
            MIN(aa.id) AS CompteID
        FROM
            account_move_line aml
            LEFT JOIN account_move am ON am.id=aml.move_id
            JOIN account_account aa ON aa.id = aml.account_id
            LEFT JOIN account_account_type aat ON aa.user_type_id = aat.id
        WHERE
            am.date < %s
            AND am.company_id = %s
            AND aat.include_initial_balance = 't'
            AND (aml.debit != 0 OR aml.credit != 0)
        '''

        # For official report: only use posted entries
        if self.export_type == "official":
            sql_query += '''
            AND am.state = 'posted'
            '''

        sql_query += '''
        GROUP BY aml.account_id
        HAVING sum(aml.balance) != 0
        '''
        formatted_date_from = self.date_from.replace('-', '')
        self._cr.execute(sql_query,
                         (formatted_date_from, formatted_date_from,
                          formatted_date_from, self.date_from, company.id))

        for row in self._cr.fetchall():
            listrow = list(row)
            account_id = listrow.pop()
            if not unaffected_earnings_line:
                account = self.env['account.account'].browse(account_id)
                if account.user_type_id.id == self.env.ref(
                        'account.data_unaffected_earnings').id:
                    #add the benefit/loss of previous fiscal year to the first unaffected earnings account found.
                    unaffected_earnings_line = True
                    current_amount = float(listrow[11].replace(
                        ',', '.')) - float(listrow[12].replace(',', '.'))
                    unaffected_earnings_amount = float(
                        unaffected_earnings_results[11].replace(
                            ',', '.')) - float(
                                unaffected_earnings_results[12].replace(
                                    ',', '.'))
                    listrow_amount = current_amount + unaffected_earnings_amount
                    if listrow_amount > 0:
                        listrow[11] = str(listrow_amount).replace('.', ',')
                        listrow[12] = '0,00'
                    else:
                        listrow[11] = '0,00'
                        listrow[12] = str(-listrow_amount).replace('.', ',')
            w.writerow(listrow)
        #if the unaffected earnings account wasn't in the selection yet: add it manually
        if (not unaffected_earnings_line and unaffected_earnings_results
                and (unaffected_earnings_results[11] != '0,00'
                     or unaffected_earnings_results[12] != '0,00')):
            #search an unaffected earnings account
            unaffected_earnings_account = self.env['account.account'].search(
                [('user_type_id', '=',
                  self.env.ref('account.data_unaffected_earnings').id)],
                limit=1)
            if unaffected_earnings_account:
                unaffected_earnings_results[
                    4] = unaffected_earnings_account.code
                unaffected_earnings_results[
                    5] = unaffected_earnings_account.name
            w.writerow(unaffected_earnings_results)

        # LINES
        sql_query = '''
        SELECT
            replace(replace(aj.code, '|', '/'), '\t', '') AS JournalCode,
            replace(replace(aj.name, '|', '/'), '\t', '') AS JournalLib,
            replace(replace(am.name, '|', '/'), '\t', '') AS EcritureNum,
            TO_CHAR(am.date, 'YYYYMMDD') AS EcritureDate,
            aa.code AS CompteNum,
            replace(replace(aa.name, '|', '/'), '\t', '') AS CompteLib,
            CASE WHEN rp.ref IS null OR rp.ref = ''
            THEN COALESCE('ID ' || rp.id, '')
            ELSE rp.ref
            END
            AS CompAuxNum,
            COALESCE(replace(replace(rp.name, '|', '/'), '\t', ''), '') AS CompAuxLib,
            CASE WHEN am.ref IS null OR am.ref = ''
            THEN '-'
            ELSE replace(replace(am.ref, '|', '/'), '\t', '')
            END
            AS PieceRef,
            TO_CHAR(am.date, 'YYYYMMDD') AS PieceDate,
            CASE WHEN aml.name IS NULL THEN '/' ELSE replace(replace(aml.name, '|', '/'), '\t', '') END AS EcritureLib,
            replace(CASE WHEN aml.debit = 0 THEN '0,00' ELSE to_char(aml.debit, '999999999999999D99') END, '.', ',') AS Debit,
            replace(CASE WHEN aml.credit = 0 THEN '0,00' ELSE to_char(aml.credit, '999999999999999D99') END, '.', ',') AS Credit,
            CASE WHEN rec.name IS NULL THEN '' ELSE rec.name END AS EcritureLet,
            CASE WHEN aml.full_reconcile_id IS NULL THEN '' ELSE TO_CHAR(rec.create_date, 'YYYYMMDD') END AS DateLet,
            TO_CHAR(am.date, 'YYYYMMDD') AS ValidDate,
            CASE
                WHEN aml.amount_currency IS NULL OR aml.amount_currency = 0 THEN ''
                ELSE replace(to_char(aml.amount_currency, '999999999999999D99'), '.', ',')
            END AS Montantdevise,
            CASE WHEN aml.currency_id IS NULL THEN '' ELSE rc.name END AS Idevise
        FROM
            account_move_line aml
            LEFT JOIN account_move am ON am.id=aml.move_id
            LEFT JOIN res_partner rp ON rp.id=aml.partner_id
            JOIN account_journal aj ON aj.id = am.journal_id
            JOIN account_account aa ON aa.id = aml.account_id
            LEFT JOIN res_currency rc ON rc.id = aml.currency_id
            LEFT JOIN account_full_reconcile rec ON rec.id = aml.full_reconcile_id
        WHERE
            am.date >= %s
            AND am.date <= %s
            AND am.company_id = %s
            AND (aml.debit != 0 OR aml.credit != 0)
        '''

        # For official report: only use posted entries
        if self.export_type == "official":
            sql_query += '''
            AND am.state = 'posted'
            '''

        sql_query += '''
        ORDER BY
            am.date,
            am.name,
            aml.id
        '''
        self._cr.execute(sql_query, (self.date_from, self.date_to, company.id))

        for row in self._cr.fetchall():
            w.writerow(list(row))

        siren = company.vat[4:13]
        end_date = self.date_to.replace('-', '')
        suffix = ''
        if self.export_type == "nonofficial":
            suffix = '-NONOFFICIAL'
        fecvalue = fecfile.getvalue()
        self.write({
            'fec_data': base64.encodestring(fecvalue),
            # Filename = <siren>FECYYYYMMDD where YYYMMDD is the closing date
            'filename': '%sFEC%s%s.csv' % (siren, end_date, suffix),
        })
        fecfile.close()

        action = {
            'name':
            'FEC',
            'type':
            'ir.actions.act_url',
            'url':
            "web/content/?model=account.fr.fec&id=" + str(self.id) +
            "&filename_field=filename&field=fec_data&download=true&filename=" +
            self.filename,
            'target':
            'self',
        }
        return action
Beispiel #24
0
        def _process(format, rows, buffer):
            this = self[0]
            if format == 'txt':
                writer = pycompat.csv_writer(buffer,
                                             delimiter='|',
                                             quotechar='|',
                                             quoting=csv.QUOTE_MINIMAL,
                                             dialect='UNIX')
                if this.type == 'ventas':

                    number_file = 1
                    for inv in rows:
                        fecha = inv.date_invoice.strftime('%d/%m/%Y')
                        writer.writerow(
                            (str('3'), str(number_file), str(fecha),
                             str(inv.n_factura), str(inv.n_autorizacion),
                             str(inv.state_sin), str(inv.nit_ci or ''),
                             str(inv.razon_social or ''), str(inv.amount_open),
                             str(inv.amount_ice_iehd), str(inv.amount_exe),
                             '0',
                             str(inv.amount_open - inv.amount_ice_iehd -
                                 inv.amount_exe), str(inv.amount_des),
                             str(inv.amount_open - inv.amount_ice_iehd -
                                 inv.amount_exe - inv.amount_des),
                             str(inv.amount_iva), str(inv.codigo_control)))

                        number_file = number_file + 1
                elif this.type == 'compras':
                    number_file = 1
                    for inv in rows:
                        fecha = inv.date_invoice.strftime('%d/%m/%Y')
                        writer.writerow(
                            (str('1'), str(number_file), str(fecha),
                             str(inv.nit_ci), str(inv.razon_social
                                                  or ''), str(inv.n_factura),
                             str(inv.n_dui
                                 or '0'), str(inv.n_autorizacion or '0'),
                             str(inv.amount_open), str(inv.amount_exe),
                             str(inv.amount_open - inv.amount_exe),
                             str(inv.amount_des),
                             str(inv.amount_open - inv.amount_exe -
                                 inv.amount_des), str(inv.amount_iva),
                             str(inv.codigo_control
                                 or '0'), str(inv.tipo_com or '')))

                        number_file = number_file + 1
            elif format == 'csv':
                writer = pycompat.csv_writer(buffer,
                                             delimiter=',',
                                             quotechar='|',
                                             quoting=csv.QUOTE_MINIMAL,
                                             dialect='UNIX')

                if this.type == 'ventas':

                    writer.writerow(
                        ("ESPECIFICACION", "NRO", "FECHA DE LA FACTURA",
                         "N DE LA FACTURA", "N DE AUTORIZACION", "ESTADO",
                         "NIT/CI CLIENTE", "NOMBRE O RAZON SOCIAL",
                         "IMPORTE TOTAL DE LA VENTA",
                         "IMPORTE ICE/IEHD/IPJ/TASAS/OTROS NO SUJETOS AL IVA",
                         "EXPORTACIONES Y OPERACIONES EXENTAS",
                         "VENTAS GRAVADAS A TASA CERO", "SUBTOTAL",
                         "DESCUENTOS BONIFICACIONES Y REBAJAS SUJETAS AL IVA",
                         "IMPORTE BASE PARA DEBITO FISCAL", "DEBITO FISCAL",
                         "CODIGO DE CONTROL"))
                    number_file = 1
                    for inv in rows:
                        fecha = inv.date_invoice.strftime('%d/%m/%Y')
                        writer.writerow(
                            (str('3'), str(number_file), str(fecha),
                             str(inv.n_factura), str(inv.n_autorizacion),
                             str(inv.state_sin), str(inv.nit_ci or ''),
                             str(inv.razon_social or ''), str(inv.amount_open),
                             str(inv.amount_ice_iehd), str(inv.amount_exe),
                             '0',
                             str(inv.amount_open - inv.amount_ice_iehd -
                                 inv.amount_exe), str(inv.amount_des),
                             str(inv.amount_open - inv.amount_ice_iehd -
                                 inv.amount_exe - inv.amount_des),
                             str(inv.amount_iva), str(inv.codigo_control)))
                        number_file = number_file + 1
                elif this.type == 'compras':
                    writer.writerow(
                        ("ESPECIFICACION", "NRO", "FECHA DE LA FACTURA",
                         "NIT PROVEEDOR", "NOMBRE Y APELLIDO/RAZON SOCIAL",
                         "N° DE LA FACTURA", "N° DE DUI", "N° DE AUTORIZACIÓN",
                         "IMPORTE TOTAL DE LA COMPRA",
                         "IMPORTE NO SUJETO A CREDITO FISCAL", "SUBTOTAL",
                         "DESCUENTOS BONIFICACIONES Y REBAJAS OBTENIDAS",
                         "IMPORTE BASE PARA CRÉDITO FISCAL", "CRÉDITO FISCAL",
                         "CODIGO DE CONTROL", "TIPO DE COMPRA"))
                    number_file = 1
                    for inv in rows:
                        fecha = inv.date_invoice.strftime('%d/%m/%Y')
                        writer.writerow(
                            (str('1'), str(number_file), str(fecha),
                             str(inv.nit_ci), str(inv.razon_social
                                                  or ''), str(inv.n_factura),
                             str(inv.n_dui
                                 or '0'), str(inv.n_autorizacion or '0'),
                             str(inv.amount_open), str(inv.amount_exe),
                             str(inv.amount_open - inv.amount_exe),
                             str(inv.amount_des),
                             str(inv.amount_open - inv.amount_exe -
                                 inv.amount_des), str(inv.amount_iva),
                             str(inv.codigo_control
                                 or '0'), str(inv.tipo_com or '')))
                        number_file = number_file + 1

            else:
                raise Exception(
                    _('Unrecognized extension: must be one of '
                      '.csv, .po, or .tgz (received .%s).') % format)
Beispiel #25
0
    def generate_fec(self):
        self.ensure_one()
        # We choose to implement the flat file instead of the XML
        # file for 2 reasons :
        # 1) the XSD file impose to have the label on the account.move
        # but Odoo has the label on the account.move.line, so that's a
        # problem !
        # 2) CSV files are easier to read/use for a regular accountant.
        # So it will be easier for the accountant to check the file before
        # sending it to the fiscal administration
        header = [
            u'JournalCode',    # 0
            u'JournalLib',     # 1
            u'EcritureNum',    # 2
            u'EcritureDate',   # 3
            u'CompteNum',      # 4
            u'CompteLib',      # 5
            u'CompAuxNum',     # 6  We use partner.id
            u'CompAuxLib',     # 7
            u'PieceRef',       # 8
            u'PieceDate',      # 9
            u'EcritureLib',    # 10
            u'Debit',          # 11
            u'Credit',         # 12
            u'EcritureLet',    # 13
            u'DateLet',        # 14
            u'ValidDate',      # 15
            u'Montantdevise',  # 16
            u'Idevise',        # 17
            ]

        company = self.env.user.company_id
        if not company.vat:
            raise Warning(
                _("Missing VAT number for company %s") % company.name)
        if company.vat[0:2] != 'FR':
            raise Warning(
                _("FEC is for French companies only !"))

        fecfile = io.BytesIO()
        w = pycompat.csv_writer(fecfile, delimiter='|')
        w.writerow(header)

        # INITIAL BALANCE
        unaffected_earnings_xml_ref = self.env.ref('account.data_unaffected_earnings')
        unaffected_earnings_line = True  # used to make sure that we add the unaffected earning initial balance only once
        if unaffected_earnings_xml_ref:
            #compute the benefit/loss of last year to add in the initial balance of the current year earnings account
            unaffected_earnings_results = self.do_query_unaffected_earnings()
            unaffected_earnings_line = False

        sql_query = '''
        SELECT
            'OUV' AS JournalCode,
            'Balance initiale' AS JournalLib,
            'OUVERTURE/' || %s AS EcritureNum,
            %s AS EcritureDate,
            MIN(aa.code) AS CompteNum,
            replace(replace(MIN(aa.name), '|', '/'), '\t', '') AS CompteLib,
            '' AS CompAuxNum,
            '' AS CompAuxLib,
            '-' AS PieceRef,
            %s AS PieceDate,
            '/' AS EcritureLib,
            replace(CASE WHEN sum(aml.balance) <= 0 THEN '0,00' ELSE to_char(SUM(aml.balance), '000000000000000D99') END, '.', ',') AS Debit,
            replace(CASE WHEN sum(aml.balance) >= 0 THEN '0,00' ELSE to_char(-SUM(aml.balance), '000000000000000D99') END, '.', ',') AS Credit,
            '' AS EcritureLet,
            '' AS DateLet,
            %s AS ValidDate,
            '' AS Montantdevise,
            '' AS Idevise,
            MIN(aa.id) AS CompteID
        FROM
            account_move_line aml
            LEFT JOIN account_move am ON am.id=aml.move_id
            JOIN account_account aa ON aa.id = aml.account_id
            LEFT JOIN account_account_type aat ON aa.user_type_id = aat.id
        WHERE
            am.date < %s
            AND am.company_id = %s
            AND aat.include_initial_balance = 't'
            AND (aml.debit != 0 OR aml.credit != 0)
        '''

        # For official report: only use posted entries
        if self.export_type == "official":
            sql_query += '''
            AND am.state = 'posted'
            '''

        sql_query += '''
        GROUP BY aml.account_id, aat.type
        HAVING sum(aml.balance) != 0
        AND aat.type not in ('receivable', 'payable')
        '''
        formatted_date_from = fields.Date.to_string(self.date_from).replace('-', '')
        date_from = self.date_from
        formatted_date_year = date_from.year
        self._cr.execute(
            sql_query, (formatted_date_year, formatted_date_from, formatted_date_from, formatted_date_from, self.date_from, company.id))

        for row in self._cr.fetchall():
            listrow = list(row)
            account_id = listrow.pop()
            if not unaffected_earnings_line:
                account = self.env['account.account'].browse(account_id)
                if account.user_type_id.id == self.env.ref('account.data_unaffected_earnings').id:
                    #add the benefit/loss of previous fiscal year to the first unaffected earnings account found.
                    unaffected_earnings_line = True
                    current_amount = float(listrow[11].replace(',', '.')) - float(listrow[12].replace(',', '.'))
                    unaffected_earnings_amount = float(unaffected_earnings_results[11].replace(',', '.')) - float(unaffected_earnings_results[12].replace(',', '.'))
                    listrow_amount = current_amount + unaffected_earnings_amount
                    if listrow_amount > 0:
                        listrow[11] = str(listrow_amount).replace('.', ',')
                        listrow[12] = '0,00'
                    else:
                        listrow[11] = '0,00'
                        listrow[12] = str(-listrow_amount).replace('.', ',')
            w.writerow(listrow)
        #if the unaffected earnings account wasn't in the selection yet: add it manually
        if (not unaffected_earnings_line
            and unaffected_earnings_results
            and (unaffected_earnings_results[11] != '0,00'
                 or unaffected_earnings_results[12] != '0,00')):
            #search an unaffected earnings account
            unaffected_earnings_account = self.env['account.account'].search([('user_type_id', '=', self.env.ref('account.data_unaffected_earnings').id)], limit=1)
            if unaffected_earnings_account:
                unaffected_earnings_results[4] = unaffected_earnings_account.code
                unaffected_earnings_results[5] = unaffected_earnings_account.name
            w.writerow(unaffected_earnings_results)

        # INITIAL BALANCE - receivable/payable
        sql_query = '''
        SELECT
            'OUV' AS JournalCode,
            'Balance initiale' AS JournalLib,
            'OUVERTURE/' || %s AS EcritureNum,
            %s AS EcritureDate,
            MIN(aa.code) AS CompteNum,
            replace(MIN(aa.name), '|', '/') AS CompteLib,
            CASE WHEN rp.ref IS null OR rp.ref = ''
            THEN COALESCE('ID ' || rp.id, '')
            ELSE replace(rp.ref, '|', '/')
            END
            AS CompAuxNum,
            COALESCE(replace(rp.name, '|', '/'), '') AS CompAuxLib,
            '-' AS PieceRef,
            %s AS PieceDate,
            '/' AS EcritureLib,
            replace(CASE WHEN sum(aml.balance) <= 0 THEN '0,00' ELSE to_char(SUM(aml.balance), '000000000000000D99') END, '.', ',') AS Debit,
            replace(CASE WHEN sum(aml.balance) >= 0 THEN '0,00' ELSE to_char(-SUM(aml.balance), '000000000000000D99') END, '.', ',') AS Credit,
            '' AS EcritureLet,
            '' AS DateLet,
            %s AS ValidDate,
            '' AS Montantdevise,
            '' AS Idevise,
            MIN(aa.id) AS CompteID
        FROM
            account_move_line aml
            LEFT JOIN account_move am ON am.id=aml.move_id
            LEFT JOIN res_partner rp ON rp.id=aml.partner_id
            JOIN account_account aa ON aa.id = aml.account_id
            LEFT JOIN account_account_type aat ON aa.user_type_id = aat.id
        WHERE
            am.date < %s
            AND am.company_id = %s
            AND aat.include_initial_balance = 't'
            AND (aml.debit != 0 OR aml.credit != 0)
        '''

        # For official report: only use posted entries
        if self.export_type == "official":
            sql_query += '''
            AND am.state = 'posted'
            '''

        sql_query += '''
        GROUP BY aml.account_id, aat.type, rp.ref, rp.id
        HAVING sum(aml.balance) != 0
        AND aat.type in ('receivable', 'payable')
        '''
        self._cr.execute(
            sql_query, (formatted_date_year, formatted_date_from, formatted_date_from, formatted_date_from, self.date_from, company.id))

        for row in self._cr.fetchall():
            listrow = list(row)
            account_id = listrow.pop()
            w.writerow([s.encode("utf-8") for s in listrow])

        # LINES
        sql_query = '''
        SELECT
            replace(replace(aj.code, '|', '/'), '\t', '') AS JournalCode,
            replace(replace(aj.name, '|', '/'), '\t', '') AS JournalLib,
            replace(replace(am.name, '|', '/'), '\t', '') AS EcritureNum,
            TO_CHAR(am.date, 'YYYYMMDD') AS EcritureDate,
            aa.code AS CompteNum,
            replace(replace(aa.name, '|', '/'), '\t', '') AS CompteLib,
            CASE WHEN rp.ref IS null OR rp.ref = ''
            THEN COALESCE('ID ' || rp.id, '')
            ELSE replace(rp.ref, '|', '/')
            END
            AS CompAuxNum,
            COALESCE(replace(replace(rp.name, '|', '/'), '\t', ''), '') AS CompAuxLib,
            CASE WHEN am.ref IS null OR am.ref = ''
            THEN '-'
            ELSE replace(replace(am.ref, '|', '/'), '\t', '')
            END
            AS PieceRef,
            TO_CHAR(am.date, 'YYYYMMDD') AS PieceDate,
            CASE WHEN aml.name IS NULL THEN '/' ELSE replace(replace(aml.name, '|', '/'), '\t', '') END AS EcritureLib,
            replace(CASE WHEN aml.debit = 0 THEN '0,00' ELSE to_char(aml.debit, '000000000000000D99') END, '.', ',') AS Debit,
            replace(CASE WHEN aml.credit = 0 THEN '0,00' ELSE to_char(aml.credit, '000000000000000D99') END, '.', ',') AS Credit,
            CASE WHEN rec.name IS NULL THEN '' ELSE rec.name END AS EcritureLet,
            CASE WHEN aml.full_reconcile_id IS NULL THEN '' ELSE TO_CHAR(rec.create_date, 'YYYYMMDD') END AS DateLet,
            TO_CHAR(am.date, 'YYYYMMDD') AS ValidDate,
            CASE
                WHEN aml.amount_currency IS NULL OR aml.amount_currency = 0 THEN ''
                ELSE replace(to_char(aml.amount_currency, '000000000000000D99'), '.', ',')
            END AS Montantdevise,
            CASE WHEN aml.currency_id IS NULL THEN '' ELSE rc.name END AS Idevise
        FROM
            account_move_line aml
            LEFT JOIN account_move am ON am.id=aml.move_id
            LEFT JOIN res_partner rp ON rp.id=aml.partner_id
            JOIN account_journal aj ON aj.id = am.journal_id
            JOIN account_account aa ON aa.id = aml.account_id
            LEFT JOIN res_currency rc ON rc.id = aml.currency_id
            LEFT JOIN account_full_reconcile rec ON rec.id = aml.full_reconcile_id
        WHERE
            am.date >= %s
            AND am.date <= %s
            AND am.company_id = %s
            AND (aml.debit != 0 OR aml.credit != 0)
        '''

        # For official report: only use posted entries
        if self.export_type == "official":
            sql_query += '''
            AND am.state = 'posted'
            '''

        sql_query += '''
        ORDER BY
            am.date,
            am.name,
            aml.id
        '''
        self._cr.execute(
            sql_query, (self.date_from, self.date_to, company.id))

        for row in self._cr.fetchall():
            w.writerow(list(row))

        siren = company.vat[4:13]
        end_date = fields.Date.to_string(self.date_to).replace('-', '')
        suffix = ''
        if self.export_type == "nonofficial":
            suffix = '-NONOFFICIAL'
        fecvalue = fecfile.getvalue()
        self.write({
            'fec_data': base64.encodestring(fecvalue),
            # Filename = <siren>FECYYYYMMDD where YYYMMDD is the closing date
            'filename': '%sFEC%s%s.csv' % (siren, end_date, suffix),
            })
        fecfile.close()

        action = {
            'name': 'FEC',
            'type': 'ir.actions.act_url',
            'url': "web/content/?model=account.fr.fec&id=" + str(self.id) + "&filename_field=filename&field=fec_data&download=true&filename=" + self.filename,
            'target': 'self',
            }
        return action