def test_import_from_csv_file(self):
        """Test the import from a single CSV file works"""
        with file_open('test_translation_import/i18n/dot.csv', 'rb') as f:
            po_file = base64.encodebytes(f.read())

        import_tlh = self.env["base.language.import"].create({
            'name':
            'Dothraki',
            'code':
            'dot',
            'data':
            po_file,
            'filename':
            'dot.csv',
        })
        with mute_logger('flectra.addons.base.models.res_lang'):
            import_tlh.import_lang()

        dot_lang = self.env['res.lang']._lang_get('dot')
        self.assertTrue(dot_lang, "The imported language was not creates")

        trans_count = self.env['ir.translation'].search_count([('lang', '=',
                                                                'dot')])
        self.assertEqual(trans_count, 1,
                         "The imported translations were not created")

        self.env.context = dict(self.env.context, lang="dot")
        self.assertEqual(_("Accounting"), "samva",
                         "The code translation was not applied")
    def test_import_from_po_file(self):
        """Test the import from a single po file works"""
        with file_open('test_translation_import/i18n/tlh.po', 'rb') as f:
            po_file = base64.encodebytes(f.read())

        import_tlh = self.env["base.language.import"].create({
            'name':
            'Klingon',
            'code':
            'tlh',
            'data':
            po_file,
            'filename':
            'tlh.po',
        })
        with mute_logger('flectra.addons.base.models.res_lang'):
            import_tlh.import_lang()

        tlh_lang = self.env['res.lang']._lang_get('tlh')
        self.assertTrue(tlh_lang, "The imported language was not creates")

        trans_count = self.env['ir.translation'].search_count([('lang', '=',
                                                                'tlh')])
        self.assertEqual(trans_count, 1,
                         "The imported translations were not created")

        self.env.context = dict(self.env.context, lang="tlh")
        self.assertEqual(_("Klingon"), "tlhIngan",
                         "The code translation was not applied")
Exemple #3
0
    def setUpClass(cls,
                   chart_template_ref='l10n_es.account_chart_template_full',
                   edi_format_ref='l10n_es_edi_sii.edi_es_sii'):
        super().setUpClass(chart_template_ref=chart_template_ref,
                           edi_format_ref=edi_format_ref)

        cls.frozen_today = datetime(year=2019,
                                    month=1,
                                    day=1,
                                    hour=0,
                                    minute=0,
                                    second=0,
                                    tzinfo=timezone('utc'))

        # Allow to see the full result of AssertionError.
        cls.maxDiff = None

        # ==== Config ====

        cls.certificate = cls.env['l10n_es_edi.certificate'].create({
            'content':
            base64.encodebytes(
                misc.file_open(
                    "l10n_es_edi_sii/demo/certificates/sello_entidad_act.p12",
                    'rb').read()),
            'password':
            '******',
        })

        cls.company_data['company'].write({
            'country_id':
            cls.env.ref('base.es').id,
            'state_id':
            cls.env.ref('base.state_es_z').id,
            'l10n_es_edi_certificate_id':
            cls.certificate.id,
            'vat':
            'ES59962470K',
            'l10n_es_edi_test_env':
            True,
        })

        # ==== Business ====

        cls.partner_a.write({
            'vat': 'BE0477472701',
            'country_id': cls.env.ref('base.be').id,
        })

        cls.partner_b.write({
            'vat': 'ESF35999705',
        })

        cls.product_t = cls.env["product.product"].create(
            {"name": "Test product"})
        cls.partner_t = cls.env["res.partner"].create({
            "name": "Test partner",
            "vat": "ESF35999705"
        })
    def test_export_pollution(self):
        """ Test that exporting the translation only exports the translations of the module """
        with file_open('test_translation_import/i18n/dot.csv', 'rb') as f:
            csv_file = base64.b64encode(f.read())

        # dot.csv only contains one term
        import_tlh = self.env["base.language.import"].create({
            'name':
            'Dothraki',
            'code':
            'dot',
            'data':
            csv_file,
            'filename':
            'dot.csv',
        })
        with mute_logger('flectra.addons.base.models.res_lang'):
            import_tlh.import_lang()

        # create a translation that has the same src as an existing field but no module
        # information and a different res_id that the real field
        # this translation should not be included in the export
        self.env['ir.translation'].create({
            'src':
            '1XBUO5PUYH2RYZSA1FTLRYS8SPCNU1UYXMEYMM25ASV7JC2KTJZQESZYRV9L8CGB',
            'value':
            '1XBUO5PUYH2RYZSA1FTLRYS8SPCNU1UYXMEYMM25ASV7JC2KTJZQESZYRV9L8CGB in Dothraki',
            'type': 'model',
            'name': 'ir.model.fields,field_description',
            'res_id': -1,
            'lang': 'dot',
        })
        module = self.env.ref('base.module_test_translation_import')
        export = self.env["base.language.export"].create({
            'lang':
            'dot',
            'format':
            'po',
            'modules': [(6, 0, [module.id])]
        })
        export.act_getfile()
        po_file = export.data
        reader = TranslationFileReader(base64.b64decode(po_file).decode(),
                                       fileformat='po')
        for row in reader:
            if row['value']:
                # should contains only one row from the csv, not the manual one
                self.assertEqual(row['src'], "Accounting")
                self.assertEqual(row['value'], "samva")
Exemple #5
0
 def _fetch_content(self):
     """ Fetch content from file or database"""
     try:
         self.stat()
         if self._filename:
             with closing(
                     file_open(self._filename, 'rb',
                               filter_ext=EXTENSIONS)) as fp:
                 return fp.read().decode('utf-8')
         else:
             return base64.b64decode(
                 self._ir_attach['datas']).decode('utf-8')
     except UnicodeDecodeError:
         raise AssetError('%s is not utf-8 encoded.' % self.name)
     except IOError:
         raise AssetNotFound('File %s does not exist.' % self.name)
     except:
         raise AssetError('Could not get content for %s.' % self.name)
    def test_lazy_translation(self):
        """Test the import from a single po file works"""
        with file_open('test_translation_import/i18n/tlh.po', 'rb') as f:
            po_file = base64.encodebytes(f.read())

        import_tlh = self.env["base.language.import"].create({
            'name':
            'Klingon',
            'code':
            'tlh',
            'data':
            po_file,
            'filename':
            'tlh.po',
        })
        with mute_logger('flectra.addons.base.models.res_lang'):
            import_tlh.import_lang()

        context = {'lang': "tlh"}
        self.assertEqual(_("Klingon"), "tlhIngan",
                         "The direct code translation was not applied")
        context = None

        # Comparison of lazy strings must be explicitely casted to string
        with self.assertRaises(NotImplementedError):
            TRANSLATED_TERM == "Klingon"
        self.assertEqual(str(TRANSLATED_TERM), "Klingon",
                         "The translation should not be applied yet")

        context = {'lang': "tlh"}
        self.assertEqual(str(TRANSLATED_TERM), "tlhIngan",
                         "The lazy code translation was not applied")

        self.assertEqual("Do you speak " + TRANSLATED_TERM,
                         "Do you speak tlhIngan",
                         "str + _lt concatenation failed")
        self.assertEqual(TRANSLATED_TERM + ", I speak it",
                         "tlhIngan, I speak it",
                         "_lt + str concatenation failed")
        self.assertEqual(TRANSLATED_TERM + TRANSLATED_TERM, "tlhIngantlhIngan",
                         "_lt + _lt concatenation failed")
Exemple #7
0
    def get_asset_content(self, url, url_info=None, custom_attachments=None):
        """
        Fetch the content of an asset (scss / js) file. That content is either
        the one of the related file on the disk or the one of the corresponding
        custom ir.attachment record.

        Params:
            url (str): the URL of the asset (scss / js) file/ir.attachment

            url_info (dict, optional):
                the related url info (see get_asset_info) (allows to optimize
                some code which already have the info and do not want this
                function to re-get it)

            custom_attachments (ir.attachment(), optional):
                the related custom ir.attachment records the function might need
                to search into (allows to optimize some code which already have
                that info and do not want this function to re-get it)

        Returns:
            utf-8 encoded content of the asset (scss / js)
        """
        if url_info is None:
            url_info = self.get_asset_info(url)

        if url_info["customized"]:
            # If the file is already customized, the content is found in the
            # corresponding attachment
            attachment = None
            if custom_attachments is None:
                attachment = self._get_custom_attachment(url)
            else:
                attachment = custom_attachments.filtered(
                    lambda r: r.url == url)
            return attachment and base64.b64decode(attachment.datas) or False

        # If the file is not yet customized, the content is found by reading
        # the local file
        with misc.file_open(url.strip('/'), 'rb', filter_ext=EXTENSIONS) as f:
            return f.read()
Exemple #8
0
    def convert_to_pdfa(self):
        """
        Transform the opened PDF file into a PDF/A compliant file
        """
        # Set the PDF version to 1.7 (as PDF/A-3 is based on version 1.7) and make it PDF/A compliant.
        # See https://github.com/veraPDF/veraPDF-validation-profiles/wiki/PDFA-Parts-2-and-3-rules#rule-612-1

        # " The file header shall begin at byte zero and shall consist of "%PDF-1.n" followed by a single EOL marker,
        # where 'n' is a single digit number between 0 (30h) and 7 (37h) "
        # " The aforementioned EOL marker shall be immediately followed by a % (25h) character followed by at least four
        # bytes, each of whose encoded byte values shall have a decimal value greater than 127 "
        self._header = b"%PDF-1.7\n%\xFF\xFF\xFF\xFF"

        # Add a document ID to the trailer. This is only needed when using encryption with regular PDF, but is required
        # when using PDF/A
        pdf_id = ByteStringObject(md5(self._reader.stream.getvalue()).digest())
        # The first string is based on the content at the time of creating the file, while the second is based on the
        # content of the file when it was last updated. When creating a PDF, both are set to the same value.
        self._ID = ArrayObject((pdf_id, pdf_id))

        with file_open('data/files/sRGB2014.icc', subdir='tools',
                       mode='rb') as icc_profile:
            icc_profile_file_data = compress(icc_profile.read())

        icc_profile_stream_obj = DecodedStreamObject()
        icc_profile_stream_obj.setData(icc_profile_file_data)
        icc_profile_stream_obj.update({
            NameObject("/Filter"):
            NameObject("/FlateDecode"),
            NameObject("/N"):
            NumberObject(3),
            NameObject("/Length"):
            NameObject(str(len(icc_profile_file_data))),
        })

        icc_profile_obj = self._addObject(icc_profile_stream_obj)

        output_intent_dict_obj = DictionaryObject()
        output_intent_dict_obj.update({
            NameObject("/S"):
            NameObject("/GTS_PDFA1"),
            NameObject("/OutputConditionIdentifier"):
            createStringObject("sRGB"),
            NameObject("/DestOutputProfile"):
            icc_profile_obj,
            NameObject("/Type"):
            NameObject("/OutputIntent"),
        })

        output_intent_obj = self._addObject(output_intent_dict_obj)
        self._root_object.update({
            NameObject("/OutputIntents"):
            ArrayObject([output_intent_obj]),
        })

        pages = self._root_object['/Pages']['/Kids']

        # PDF/A needs the glyphs width array embedded in the pdf to be consistent with the ones from the font file.
        # But it seems like it is not the case when exporting from wkhtmltopdf.
        if TTFont:
            fonts = {}
            # First browse through all the pages of the pdf file, to get a reference to all the fonts used in the PDF.
            for page in pages:
                for font in page.getObject()['/Resources']['/Font'].values():
                    for descendant in font.getObject()['/DescendantFonts']:
                        fonts[descendant.idnum] = descendant.getObject()

            # Then for each font, rewrite the width array with the information taken directly from the font file.
            # The new width are calculated such as width = round(1000 * font_glyph_width / font_units_per_em)
            # See: http://martin.hoppenheit.info/blog/2018/pdfa-validation-and-inconsistent-glyph-width-information/
            for font in fonts.values():
                font_file = font['/FontDescriptor']['/FontFile2']
                stream = io.BytesIO(decompress(font_file._data))
                ttfont = TTFont(stream)
                font_upm = ttfont['head'].unitsPerEm
                glyphs = ttfont.getGlyphSet()._hmtx.metrics
                glyph_widths = []
                for key, values in glyphs.items():
                    if key[:5] == 'glyph':
                        glyph_widths.append(
                            NumberObject(round(1000.0 * values[0] / font_upm)))

                font[NameObject('/W')] = ArrayObject(
                    [NumberObject(1),
                     ArrayObject(glyph_widths)])
                stream.close()
        else:
            _logger.warning(
                'The fonttools package is not installed. Generated PDF may not be PDF/A compliant.'
            )

        outlines = self._root_object['/Outlines'].getObject()
        outlines[NameObject('/Count')] = NumberObject(1)

        # Set flectra as producer
        self.addMetadata({
            '/Creator': "Flectra",
            '/Producer': "Flectra",
        })
        self.is_pdfa = True
Exemple #9
0
def extend_trans_generate(lang, modules, cr):
    dbname = cr.dbname

    registry = flectra.registry(dbname)
    trans_obj = registry['ir.translation']
    model_data_obj = registry['ir.model.data']
    uid = 1

    query = 'SELECT name, model, res_id, module' \
            '  FROM ir_model_data'

    query_models = """SELECT m.id, m.model, imd.module
            FROM ir_model AS m, ir_model_data AS imd
            WHERE m.id = imd.res_id AND imd.model = 'ir.model' """

    if 'all_installed' in modules:
        query += ' WHERE module IN ( SELECT name FROM ir_module_module WHERE state = \'installed\') '
        query_models += " AND imd.module in ( SELECT name FROM ir_module_module WHERE state = 'installed') "
    query_param = None
    if 'all' not in modules:
        query += ' WHERE module IN %s'
        query_models += ' AND imd.module in %s'
        query_param = (tuple(modules), )
    query += ' ORDER BY module, model, name'
    query_models += ' ORDER BY module, model'

    cr.execute(query, query_param)

    _to_translate = set()

    def push_translation(module, type, name, id, source, comments=None):
        # empty and one-letter terms are ignored, they probably are not meant to be
        # translated, and would be very hard to translate anyway.
        if not source or len(source.strip()) <= 1:
            return

        tnx = (module, source, name, id, type, tuple(comments or ()))
        _to_translate.add(tnx)

    def encode(s):
        if isinstance(s, unicode):
            return s.encode('utf8')
        return s

    def push(mod, type, name, res_id, term):
        term = (term or '').strip()
        if len(term) > 2 or term in ENGLISH_SMALL_WORDS:
            push_translation(mod, type, name, res_id, term)

    def get_root_view(xml_id):
        view = model_data_obj.xmlid_to_object(cr, uid, xml_id)
        if view:
            while view.mode != 'primary':
                view = view.inherit_id
        xml_id = view.get_external_id(cr, uid).get(view.id, xml_id)
        return xml_id

    for (xml_name, model, res_id, module) in cr.fetchall():
        module = encode(module)
        model = encode(model)
        xml_name = "%s.%s" % (module, encode(xml_name))

        if model not in registry:
            _logger.error("Unable to find object %r", model)
            continue

        Model = registry[model]
        if not Model._translate:
            # explicitly disabled
            continue

        obj = Model.browse(cr, uid, res_id)
        if not obj.exists():
            _logger.warning("Unable to find object %r with id %d", model,
                            res_id)
            continue

        if model == 'ir.ui.view':
            d = etree.XML(encode(obj.arch))
            if obj.type == 'qweb':
                view_id = get_root_view(xml_name)
                push_qweb = lambda t, l: push(module, 'view', 'website',
                                              view_id, t)
                _extract_translatable_qweb_terms(d, push_qweb)
            else:
                push_view = lambda t, l: push(module, 'view', obj.model,
                                              xml_name, t)
                trans_parse_view(d, push_view)
        elif model == 'ir.actions.wizard':
            pass  # TODO Can model really be 'ir.actions.wizard' ?

        elif model == 'ir.model.fields':
            try:
                field_name = encode(obj.name)
            except AttributeError, exc:
                _logger.error("name error in %s: %s", xml_name, str(exc))
                continue
            objmodel = registry.get(obj.model)
            if (objmodel is None or field_name not in objmodel._columns
                    or not objmodel._translate):
                continue
            field_def = objmodel._columns[field_name]

            name = "%s,%s" % (encode(obj.model), field_name)
            push_translation(module, 'field', name, 0,
                             encode(field_def.string))

            if field_def.help:
                push_translation(module, 'help', name, 0,
                                 encode(field_def.help))

            if field_def.translate:
                ids = objmodel.search(cr, uid, [])
                obj_values = objmodel.read(cr, uid, ids, [field_name])
                for obj_value in obj_values:
                    res_id = obj_value['id']
                    if obj.name in ('ir.model', 'ir.ui.menu'):
                        res_id = 0
                    model_data_ids = model_data_obj.search(
                        cr, uid, [
                            ('model', '=', model),
                            ('res_id', '=', res_id),
                        ])
                    if not model_data_ids:
                        push_translation(module, 'model', name, 0,
                                         encode(obj_value[field_name]))

            if hasattr(field_def, 'selection') and isinstance(
                    field_def.selection, (list, tuple)):
                for dummy, val in field_def.selection:
                    push_translation(module, 'selection', name, 0, encode(val))

        elif model == 'ir.actions.report':
            name = encode(obj.report_name)
            fname = ""
            ##### Changes for Aeroo ######
            if obj.report_type == 'aeroo':
                trans_ids = trans_obj.search(cr, uid,
                                             [('type', '=', 'report'),
                                              ('res_id', '=', obj.id)])
                for t in trans_obj.read(cr, uid, trans_ids, ['name', 'src']):
                    push_translation(module, "report", t['name'], xml_name,
                                     t['src'].encode('UTF-8'))
            ##############################
            else:
                if obj.report_rml:
                    fname = obj.report_rml
                    parse_func = trans_parse_rml
                    report_type = "report"
                elif obj.report_xsl:
                    fname = obj.report_xsl
                    parse_func = trans_parse_xsl
                    report_type = "xsl"
                if fname and obj.report_type in ('pdf', 'xsl'):
                    try:
                        report_file = misc.file_open(fname)
                        try:
                            d = etree.parse(report_file)
                            for t in parse_func(d.iter()):
                                push_translation(module, report_type, name, 0,
                                                 t)
                        finally:
                            report_file.close()
                    except (IOError, etree.XMLSyntaxError):
                        _logger.exception(
                            "couldn't export translation for report %s %s %s",
                            name, report_type, fname)
Exemple #10
0
 def _get_default_faq(self):
     with misc.file_open('website_forum/data/forum_default_faq.html', 'r') as f:
         return f.read()