Beispiel #1
0
 def fill_crt(self, data_value, ws):
     # #Image CRT Logo
     crt_img = os.path.join(get_module_path('servicio_base'),'img','crt_logo.png')
     self.add_image(ws, filepath=crt_img, anchors='B4')
     #Image RAS Logo
     ras_img = os.path.join(get_module_path('servicio_base'),'img','ras_logo.png')
     self.add_image(ws, filepath=ras_img, anchors='N13')
     #Image RAS Logo
     ras_img2 = os.path.join(get_module_path('servicio_base'),'img','ras_logo2.png')
     self.add_image(ws, filepath=ras_img2, anchors='F76')
     # #Bind Data
     # self.bind_data(ws, data_value, map_anchors=MAP_ANCHORS)
     for key, value in data_value.items():
         ws[key] = value
Beispiel #2
0
    def _binary_ir_attachment_redirect_content(cls, record, default_mimetype='application/octet-stream'):
        # mainly used for theme images attachemnts
        status = content = filename = filehash = None
        mimetype = getattr(record, 'mimetype', False)
        if record.type == 'url' and record.url:
            # if url in in the form /somehint server locally
            url_match = re.match("^/(\w+)/(.+)$", record.url)
            if url_match:
                module = url_match.group(1)
                module_path = get_module_path(module)
                module_resource_path = get_resource_path(module, url_match.group(2))

                if module_path and module_resource_path:
                    module_path = os.path.join(os.path.normpath(module_path), '')  # join ensures the path ends with '/'
                    module_resource_path = os.path.normpath(module_resource_path)
                    if module_resource_path.startswith(module_path):
                        with open(module_resource_path, 'rb') as f:
                            content = base64.b64encode(f.read())
                        status = 200
                        filename = os.path.basename(module_resource_path)
                        mimetype = guess_mimetype(base64.b64decode(content), default=default_mimetype)
                        filehash = '"%s"' % hashlib.md5(pycompat.to_text(content).encode('utf-8')).hexdigest()
            else:
                status = 301
                content = record.url



        return status, content, filename, mimetype, filehash
Beispiel #3
0
 def get_xml_records(self, module):
     """ Return all XML records from the given module """
     addon_dir = get_module_path(module)
     manifest = self._read_manifest(addon_dir)
     # The order of the keys are important.
     # Load files in the same order as in
     # module/loading.py:load_module_graph
     files = []
     for key in ["init_xml", "update_xml", "data"]:
         if not manifest.get(key):
             continue
         for xml_file in manifest[key]:
             if not xml_file.lower().endswith(".xml"):
                 continue
             parts = xml_file.split("/")
             try:
                 with open(os.path.join(addon_dir, *parts),
                           "r") as xml_handle:
                     files.append(xml_handle.read())
             except UnicodeDecodeError:
                 _logger.warning(
                     "Encoding error: Unable to read %s",
                     os.path.join(addon_dir, *parts),
                 )
                 continue
     return files
Beispiel #4
0
    def _binary_ir_attachment_redirect_content(cls, record, default_mimetype='application/octet-stream'):
        # mainly used for theme images attachemnts
        status = content = filename = filehash = None
        mimetype = getattr(record, 'mimetype', False)
        if record.type == 'url' and record.url:
            # if url in in the form /somehint server locally
            url_match = re.match("^/(\w+)/(.+)$", record.url)
            if url_match:
                module = url_match.group(1)
                module_path = get_module_path(module)
                module_resource_path = get_resource_path(module, url_match.group(2))

                if module_path and module_resource_path:
                    module_path = os.path.join(os.path.normpath(module_path), '')  # join ensures the path ends with '/'
                    module_resource_path = os.path.normpath(module_resource_path)
                    if module_resource_path.startswith(module_path):
                        with open(module_resource_path, 'rb') as f:
                            content = base64.b64encode(f.read())
                        status = 200
                        filename = os.path.basename(module_resource_path)
                        mimetype = guess_mimetype(base64.b64decode(content), default=default_mimetype)
                        filehash = '"%s"' % hashlib.md5(pycompat.to_text(content).encode('utf-8')).hexdigest()
            else:
                status = 301
                content = record.url



        return status, content, filename, mimetype, filehash
Beispiel #5
0
    def _binary_ir_attachment_redirect_content(
            cls, record, default_mimetype='application/octet-stream'):
        # mainly used for theme images attachemnts
        status = content = filename = filehash = None
        mimetype = getattr(record, 'mimetype', False)
        if record.type == 'url' and record.url:
            # if url in in the form /somehint server locally
            url_match = re.match("^/(\w+)/(.+)$", record.url)
            if url_match:
                module = url_match.group(1)
                module_path = get_module_path(module)
                module_resource_path = get_resource_path(
                    module, url_match.group(2))

                if module_path and module_resource_path:
                    module_path = os.path.join(
                        os.path.normpath(module_path),
                        '')  # join ensures the path ends with '/'
                    module_resource_path = os.path.normpath(
                        module_resource_path)
                    if module_resource_path.startswith(module_path):
                        with open(module_resource_path, 'rb') as f:
                            content = f.read()
                        status = 200
                        filename = os.path.basename(module_resource_path)
                        mimetype = record.mimetype
                        filehash = record.checksum

            if not content:
                status = 301
                content = record.url

        return status, content, filename, mimetype, filehash
Beispiel #6
0
def get_yaml_test_comments(modules):
    if LooseVersion(release.major_version) >= LooseVersion('12.0'):
        # YAML tests are not yet supported since version 12.0
        return {}
    ignore = eval(tools.config.get('ignored_tests') or '{}')
    test_comments_by_module = {}
    tests_by_module = _get_test_files_by_module(modules)
    for module in tests_by_module:
        module_path = get_module_path(module)
        if not _file_in_requested_directories(module_path) or \
                ignore.get(module) == 'all':
            continue
        res = []
        for file_path in tests_by_module[module]:
            if file_path in ignore.get(module, []):
                continue
            fp = os.path.join(module_path, file_path.replace('/', os.path.sep))
            if not os.path.exists(fp):
                _logger.error("No such file: %s", fp)
                continue
            with open(fp) as f_obj:
                root, ext = os.path.splitext(f_obj.name)
                if ext == '.yml':
                    comments = []
                    for node in yaml.load(f_obj.read()):
                        if isinstance(node, string_types):
                            comments.append(node)
                    res.append((os.path.basename(root),
                                os.path.join(module, file_path), comments))
        if res:
            test_comments_by_module[module] = res
    return test_comments_by_module
def run_tests(package, report):
    if package == '_deferred':
        name = 'deferred'
        tests_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                                 'tests_deferred')
    else:
        name = package.name
        tests_dir = os.path.join(
            get_module_path(name),
            'migrations',
            adapt_version(package.data['version']),
            'tests',
        )
    # check for an environment variable because we don't want to mess
    # with odoo's config.py, but we also don't want to run existing
    # tests
    if os.environ.get('OPENUPGRADE_TESTS') and os.path.exists(tests_dir):
        import unittest
        threading.currentThread().testing = True
        tests = unittest.defaultTestLoader.discover(tests_dir,
                                                    top_level_dir=tests_dir)
        report.record_result(
            unittest.TextTestRunner(
                verbosity=2,
                stream=TestStream(name)).run(tests).wasSuccessful())
        threading.currentThread().testing = False
Beispiel #8
0
    def button_analyse_code(self):
        IrModuleAuthor = self.env["ir.module.author"]
        IrModuleTypeRule = self.env["ir.module.type.rule"]
        rules = IrModuleTypeRule.search([])

        cfg = self.env["ir.config_parameter"]
        val = cfg.get_param("module_analysis.exclude_directories", "")
        exclude_directories = [x.strip() for x in val.split(",") if x.strip()]
        val = cfg.get_param("module_analysis.exclude_files", "")
        exclude_files = [x.strip() for x in val.split(",") if x.strip()]

        for module in self:
            _logger.info("Analysing Code for module %s ..." % (module.name))

            # Update Authors, based on manifest key
            authors = []
            if module.author and module.author[0] == "[":
                author_txt_list = safe_eval(module.author)
            else:
                author_txt_list = (module.author
                                   and module.author.split(",")) or []

            author_txt_list = [x.strip() for x in author_txt_list]
            author_txt_list = [x for x in author_txt_list if x]
            for author_txt in author_txt_list:
                authors.append(IrModuleAuthor._get_or_create(author_txt))

            author_ids = [x.id for x in authors]
            module.author_ids = author_ids

            # Update Module Type, based on rules
            module_type_id = rules._get_module_type_id_from_module(module)
            module.module_type_id = module_type_id

            # Get Path of module folder and parse the code
            module_path = get_module_path(module.name)

            # Get Files
            analysed_datas = self._get_analyse_data_dict()
            file_extensions = analysed_datas.keys()
            file_list = self._get_files_to_analyse(module_path,
                                                   file_extensions,
                                                   exclude_directories,
                                                   exclude_files)

            for file_path, file_ext in file_list:
                file_res = SourceAnalysis.from_file(
                    file_path,
                    "",
                    encoding=self._get_module_encoding(file_ext))
                for k, v in analysed_datas.get(file_ext).items():
                    v["value"] += getattr(file_res, k)

            # Update the module with the datas
            values = {}
            for analyses in analysed_datas.values():
                for v in analyses.values():
                    values[v["field"]] = v["value"]
            module.write(values)
Beispiel #9
0
 def get_content(self, url, xmlid):
     custom_url = self._get_custom_url(url, xmlid)
     custom_attachment = self._get_custom_attachment(custom_url)
     if custom_attachment.exists():
         return base64.b64decode(custom_attachment.datas).decode('utf-8')
     else:
         match = re.compile("^/(\w+)/(.+?)(\.custom\.(.+))?\.(\w+)$").match(url)
         module_path = module.get_module_path(match.group(1))
         resource_path = "%s.%s" % (match.group(2), match.group(5))
         module_resource_path = module.get_resource_path(module_path, resource_path)
         with open(module_resource_path, "rb") as file:
             return file.read().decode('utf-8')
Beispiel #10
0
    def _compute_checksum_dir(self):
        exclude = self.env["ir.config_parameter"].get_param(
            "module_auto_update.checksum_excluded_extensions",
            "pyc,pyo",
        ).split(",")

        for r in self:
            r.checksum_dir = dirhash(
                get_module_path(r.name),
                'sha1',
                excluded_extensions=exclude,
            )
Beispiel #11
0
 def load_xlsx_template(self, template_ids, addon=False):
     for template in self.browse(template_ids):
         if not addon:
             addon = list(template.get_external_id().values())[0].split(".")[0]
         addon_path = get_module_path(addon)
         file_path = False
         for root, _dirs, files in os.walk(addon_path):
             for name in files:
                 if name == template.fname:
                     file_path = os.path.abspath(opj(root, name))
         if file_path:
             template.datas = base64.b64encode(open(file_path, "rb").read())
     return True
Beispiel #12
0
    def get_asset_content(self, url, url_info=None, custom_attachments=None):
        """
        Fetch the content of an asset (scss / js) file. That content is either
        the one of the related file on the disk or the one of the corresponding
        custom ir.attachment record.

        Params:
            url (str): the URL of the asset (scss / js) file/ir.attachment

            url_info (dict, optional):
                the related url info (see get_asset_info) (allows to optimize
                some code which already have the info and do not want this
                function to re-get it)

            custom_attachments (ir.attachment(), optional):
                the related custom ir.attachment records the function might need
                to search into (allows to optimize some code which already have
                that info and do not want this function to re-get it)

        Returns:
            utf-8 encoded content of the asset (scss / js)
        """
        if url_info is None:
            url_info = self.get_asset_info(url)

        if url_info["customized"]:
            # If the file is already customized, the content is found in the
            # corresponding attachment
            attachment = None
            if custom_attachments is None:
                attachment = self._get_custom_attachment(url)
            else:
                attachment = custom_attachments.filtered(
                    lambda r: r.url == url)
            return attachment and base64.b64decode(attachment.datas) or False

        # If the file is not yet customized, the content is found by reading
        # the local scss file
        module = url_info["module"]
        module_path = get_module_path(module)
        module_resource_path = get_resource_path(module,
                                                 url_info["resource_path"])
        if module_path and module_resource_path:
            module_path = os.path.join(
                os.path.normpath(module_path),
                '')  # join ensures the path ends with '/'
            module_resource_path = os.path.normpath(module_resource_path)
            if module_resource_path.startswith(module_path):
                with open(module_resource_path, "rb") as f:
                    return f.read()
    def find(self):
        res = []
        for module in self.env['ir.module.module'].search([]):
            if get_module_path(module.name):
                continue
            if module.state == 'uninstalled':
                self.env['cleanup.purge.line.module'].create({
                    'name': module.name,
                }).purge()
                continue
            res.append((0, 0, {'name': module.name}))

        if not res:
            raise UserError(_('No modules found to purge'))
        return res
Beispiel #14
0
    def _get_checksum_dir(self):
        self.ensure_one()

        exclude_patterns = self.env["ir.config_parameter"].get_param(
            PARAM_EXCLUDE_PATTERNS, DEFAULT_EXCLUDE_PATTERNS,
        )
        exclude_patterns = [p.strip() for p in exclude_patterns.split(",")]
        keep_langs = self.env["res.lang"].search([]).mapped("code")

        module_path = get_module_path(self.name)
        if module_path and os.path.isdir(module_path):
            checksum_dir = addon_hash(module_path, exclude_patterns, keep_langs,)
        else:
            checksum_dir = False

        return checksum_dir
Beispiel #15
0
def _load_l10n_ar_demo_data(cr):
    env = api.Environment(cr, SUPERUSER_ID, {})
    for module_name in ['l10n_ar', 'l10n_ar_edi', 'l10n_ar_website_sale']:
        _logger.info('Loading demo data from %s' % module_name)
        manifest_file = module.module_manifest(
            module.get_module_path(module_name))
        f = tools.file_open(manifest_file, mode='rb')
        try:
            info = ast.literal_eval(tools.pycompat.to_text(f.read()))
        finally:
            f.close()
        for demo_data in info.get('demo'):
            _logger.info('Loading %s', (demo_data))
            tools.convert_file(cr, module_name, demo_data, {}, 'init', True,
                               'demo', None)
        env.cr.commit()
Beispiel #16
0
def get_unit_test_docstrings(modules):
    ignore = eval(tools.config.get('ignored_tests') or '{}')
    test_docstrings_by_module = {}
    for module in modules:
        module_path = get_module_path(module)
        if not _file_in_requested_directories(module_path) or \
                ignore.get(module) == 'all':
            continue
        res = []
        for module_test in get_test_modules(module):
            module_test_file = module_test.__file__
            if module_test_file.endswith('.pyc'):
                # convert extension from .pyc to .py
                module_test_file = module_test_file[:-1]
            filename = os.path.join('tests', '%s.py' %
                                    module_test_file.split('.')[-1])
            if filename in ignore.get(module, []):
                continue
            root, ext = os.path.splitext(os.path.basename(module_test_file))
            module_classes = [
                module_test.__getattribute__(attr)
                for attr in module_test.__dict__
                if isinstance(module_test.__getattribute__(attr), type)]
            for module_class in module_classes:
                comments = []
                test_methods = [
                    module_class.__dict__[attr]
                    for attr in module_class.__dict__
                    if callable(module_class.__dict__[attr]) and
                    attr.startswith('test')]
                if not test_methods:
                    continue
                if module_class.__dict__['__doc__']:
                    comments.append(module_class.__dict__[
                                    '__doc__'])  # class docstring
                for test_method in sorted(
                        test_methods, key=lambda x: x.__name__):
                    # method name and docstring
                    comment = '%s:\n%s' % (
                        test_method.__name__, test_method.__doc__ or '')
                    comments.append(comment)
                res.append(
                    (root, module_test_file[module_test_file.index(module):],
                     comments))
        if res:
            test_docstrings_by_module[module] = res
    return test_docstrings_by_module
Beispiel #17
0
    def get_asset_content(self, url, url_info=None, custom_attachments=None):
        """
        Fetch the content of an asset (scss / js) file. That content is either
        the one of the related file on the disk or the one of the corresponding
        custom ir.attachment record.

        Params:
            url (str): the URL of the asset (scss / js) file/ir.attachment

            url_info (dict, optional):
                the related url info (see get_asset_info) (allows to optimize
                some code which already have the info and do not want this
                function to re-get it)

            custom_attachments (ir.attachment(), optional):
                the related custom ir.attachment records the function might need
                to search into (allows to optimize some code which already have
                that info and do not want this function to re-get it)

        Returns:
            utf-8 encoded content of the asset (scss / js)
        """
        if url_info is None:
            url_info = self.get_asset_info(url)

        if url_info["customized"]:
            # If the file is already customized, the content is found in the
            # corresponding attachment
            attachment = None
            if custom_attachments is None:
                attachment = self._get_custom_attachment(url)
            else:
                attachment = custom_attachments.filtered(lambda r: r.url == url)
            return attachment and base64.b64decode(attachment.datas) or False

        # If the file is not yet customized, the content is found by reading
        # the local scss file
        module = url_info["module"]
        module_path = get_module_path(module)
        module_resource_path = get_resource_path(module, url_info["resource_path"])
        if module_path and module_resource_path:
            module_path = os.path.join(os.path.normpath(module_path), '')  # join ensures the path ends with '/'
            module_resource_path = os.path.normpath(module_resource_path)
            if module_resource_path.startswith(module_path):
                with open(module_resource_path, "rb") as f:
                    return f.read()
Beispiel #18
0
    def find(self):
        res = []
        IrModule = self.env['ir.module.module']
        for module in IrModule.search([('to_buy', '=', False)]):
            if get_module_path(module.name, display_warning=False):
                continue
            if module.state == 'uninstalled':
                self.env['cleanup.purge.line.module'].create({
                    'name':
                    module.name,
                }).purge()
                continue
            res.append((0, 0, {'name': module.name}))

        if not res:
            raise UserError(_('No modules found to purge'))
        return res
Beispiel #19
0
    def setUpClass(cls):
        super().setUpClass()
        image_path = module.get_module_path(
            'report_aeroo') + '/static/img/logo.png'

        cls.partner_1 = cls.env['res.partner'].create({
            'name':
            'Partner 1',
            'lang':
            'en_US',
            'image':
            base64.b64encode(open(image_path, 'rb').read())
        })

        cls.partner_2 = cls.partner_1.copy()

        cls.report = cls.env.ref('report_aeroo.aeroo_sample_report_multi')
Beispiel #20
0
    def _compute_checksum_dir(self):
        exclude = self.env["ir.config_parameter"].get_param(
            "module_auto_update.checksum_excluded_extensions",
            "pyc,pyo",
        ).split(",")

        for r in self:
            try:
                r.checksum_dir = dirhash(
                    get_module_path(r.name),
                    'sha1',
                    excluded_extensions=exclude,
                )
            except TypeError:
                _logger.debug(
                    "Cannot compute dir hash for %s, module not found",
                    r.display_name)
    def setUpClass(cls):
        super(TestAerooReport, cls).setUpClass()
        image_path = (module.get_module_path('report_aeroo') +
                      '/static/img/logo.png')

        cls.company = cls.env['res.company'].create({
            'name': 'My Company',
        })
        cls.company_2 = cls.env['res.company'].create({
            'name': 'My Company 2',
        })

        cls.partner = cls.env['res.partner'].create({
            'name':
            'My Partner',
            'lang':
            'en_US',
            'company_id':
            cls.company.id,
            'image':
            open(image_path, 'rb').read().encode('base64')
        })

        cls.lang_en = cls.env.ref('base.lang_en').id
        cls.lang_fr = cls.env.ref('base.lang_fr').id

        cls.partner_2 = cls.env['res.partner'].create({
            'name': 'My Partner 2',
            'lang': 'en_US',
        })

        cls.report = cls.env.ref('report_aeroo.aeroo_sample_report_id')
        cls.report.write({
            'attachment': None,
            'attachment_use': False,
        })

        cls.env['ir.config_parameter'].set_param(
            'report_aeroo.libreoffice_location', 'libreoffice')

        cls.env['ir.config_parameter'].set_param('report_aeroo.pdftk_location',
                                                 'pdftk')

        cls.env['ir.config_parameter'].set_param(
            'report_aeroo.libreoffice_timeout', '60')
    def setUpClass(cls):
        super(TestAerooReport, cls).setUpClass()
        image_path = module.get_module_path(
            'report_aeroo') + '/static/img/logo.png'

        cls.company = cls.env['res.company'].create({
            'name': 'My Company',
        })
        cls.company_2 = cls.env['res.company'].create({
            'name': 'My Company 2',
        })

        cls.partner = cls.env['res.partner'].create({
            'name':
            'My Partner',
            'lang':
            'en_US',
            'company_id':
            cls.company.id,
            'image':
            base64.b64encode(open(image_path, 'rb').read())
        })

        cls.lang_en = cls.env.ref('base.lang_en')
        cls.lang_fr = cls.env.ref('base.lang_fr')

        cls.partner_2 = cls.env['res.partner'].create({
            'name': 'My Partner 2',
            'lang': 'en_US',
        })

        cls.report = cls.env.ref('report_aeroo.aeroo_sample_report')
        cls.report.write({
            'attachment':
            None,
            'attachment_use':
            False,
            'aeroo_lang_eval':
            'o.lang',
            'aeroo_company_eval':
            'o.company_id',
            'aeroo_out_format_id':
            cls.env.ref('report_aeroo.aeroo_mimetype_pdf_odt').id,
        })
    def do_writeback(self, m, dbname):
        i18n_path = os.path.join(module.get_module_path(m.name), 'i18n')
        pot_path = os.path.join(i18n_path, m.name + '.pot')
        lang_path = os.path.join(i18n_path, 'zh_TW.po')
        fix_terms = ""

        if not os.path.exists(i18n_path):
            return fix_terms
        if not os.path.exists(lang_path):
            open(lang_path, 'a').close()
        with open(lang_path, "r+") as buf:
            registry = odoo.modules.registry.RegistryManager.new(dbname)
            with odoo.api.Environment.manage():
                with registry.cursor() as cr:

                    output = cStringIO.StringIO()
                    odoo.tools.trans_export("zh_TW", [m.name], output, "po",
                                            cr)
                    contents = output.getvalue()
                    current = buf.read()
                    # contents = buf.read()
                    diff = set(contents.split("\n")).difference(
                        current.split("\n"))
                    writeback = False
                    for line in diff:
                        if "POT-Creation-Date:" not in line and "PO-Revision-Date:" not in line:
                            writeback = True
                            if "msgstr " in line:
                                fix_terms += "%s\n" % line.replace(
                                    "msgstr ", "")
                    if writeback:
                        # msg += "\n%s: \n%s" % (m.name, fix_terms)
                        buf.seek(0)
                        buf.write(contents)
                        buf.truncate()
                        if self.rewrite_pot:
                            with open(pot_path, "w") as pot:
                                odoo.tools.trans_export(
                                    None, [m.name], pot, "po", cr)

                    output.close()

        _logger.info('Translation writebacked: %s ' % m.name)
        return fix_terms
    def find(self):
        res = []
        purge_lines = self.env["cleanup.purge.line.module"]
        for module in self.env["ir.module.module"].search([]):
            if get_module_path(module.name):
                continue
            if module.state == "uninstalled":
                purge_lines += self.env["cleanup.purge.line.module"].create({
                    "name":
                    module.name,
                })
                continue
            res.append((0, 0, {"name": module.name}))

        purge_lines.purge()

        if not res:
            raise UserError(_("No modules found to purge"))
        return res
Beispiel #25
0
    def find(self):
        res = []
        purge_lines = self.env["cleanup.purge.line.module"]
        IrModule = self.env["ir.module.module"]
        for module in IrModule.search([("to_buy", "=", False),
                                       ("name", "!=", "studio_customization")
                                       ]):
            if get_module_path(module.name, display_warning=False):
                continue
            if module.state == "uninstalled":
                purge_lines += self.env["cleanup.purge.line.module"].create(
                    {"name": module.name})
                continue
            res.append((0, 0, {"name": module.name}))

        purge_lines.purge()

        if not res:
            raise UserError(_("No modules found to purge"))
        return res
 def test_base_manifest_extension(self):
     # write a test manifest
     module_path = tempfile.mkdtemp(dir=os.path.join(
         get_module_path('base_manifest_extension'), 'static'))
     manifest_path = os.path.join(module_path, '__manifest__.py')
     with open(manifest_path, 'w') as manifest:
         manifest.write(
             repr({
                 'depends_if_installed': [
                     'base_manifest_extension',
                     'not installed',
                 ],
             }))
     # parse it
     parsed = load_information_from_description_file(
         # this name won't really be used, but avoids a warning
         'base',
         mod_path=module_path,
     )
     self.assertIn('base_manifest_extension', parsed['depends'])
     self.assertNotIn('not installed', parsed['depends'])
     self.assertNotIn('depends_if_installed', parsed)
Beispiel #27
0
def run_other_tests(dbname, modules):
    _logger.info('Running tests other than unit...')
    ignore = eval(tools.config.get('ignored_tests') or '{}')
    db = sql_db.db_connect(dbname)
    with closing(db.cursor()) as cr:
        test_files_by_module = _get_test_files_by_module(modules)
        for module in test_files_by_module:
            ignored_files = ignore.get(module, [])
            if ignored_files == 'all':
                ignored_files = test_files_by_module[module]
            for filename in test_files_by_module[module]:
                vals = {
                    'module': module,
                    'file': filename,
                }
                if filename in ignored_files or \
                        not _file_in_requested_directories(
                            get_module_path(module)) or \
                        LooseVersion(
                            release.major_version) >= LooseVersion('12.0'):
                    vals['result'] = 'ignored'
                    _write_log(vals)
                    continue
                start = time.time()
                try:
                    _run_test(cr, module, filename)
                except Exception as e:
                    vals['duration'] = time.time() - start
                    vals['result'] = 'error'
                    vals['code'] = e.__class__.__name__
                    vals['exception'] = '\n%s' % _get_exception_message(e)
                    if filename.endswith('.yml'):
                        vals['exception'] += '\n%s' % _build_error_message()
                    _write_log(vals)
                else:
                    vals['duration'] = time.time() - start
                    vals['result'] = 'success'
                    _write_log(vals)
            cr.rollback()
Beispiel #28
0
 def get_xml_records(self, module):
     """ Return all XML records from the given module """
     addon_dir = get_module_path(module)
     manifest = self._read_manifest(addon_dir)
     # The order of the keys are important.
     # Load files in the same order as in
     # module/loading.py:load_module_graph
     files = []
     for key in ['init_xml', 'update_xml', 'data']:
         if not manifest.get(key):
             continue
         for xml_file in manifest[key]:
             if not xml_file.lower().endswith('.xml'):
                 continue
             parts = xml_file.split('/')
             try:
                 with open(os.path.join(addon_dir, *parts),
                           'r') as xml_handle:
                     files.append(xml_handle.read())
             except UnicodeDecodeError:
                 continue
     return files
Beispiel #29
0
 def create_crt_report(self):
     for row in self:
         if row.xls_file or row.xls_name:
             row.write({'xls_file': False, 'xls_name': False})
         filepath = os.path.join(get_module_path('servicio_base'), 'xlsx_tpl', 'CRT.xlsx')
         wb = load_workbook(filepath)
         if not wb._sheets:
             return
         count = 0
         #ws = self.clone_worksheet(wb, wb._sheets[0], title="CRT"+str(count))
         #source = wb.active
         #ws = wb.copy_worksheet(source)
         ws = self.clone_worksheet(wb, wb._sheets[0], title="CRT"+str(count))
         data_value = self._create_crt_data()
         self.fill_crt(data_value, ws)
         wb._sheets.append(ws)
         # Delete CRT template, first position
         #if len(wb._sheets) > 1:
         #wb.remove_sheet(wb._sheets[0])
         #wb.active = 0
         xls_io = BytesIO()
         wb.save(xls_io)
         row.write({'xls_file': base64.encodestring(xls_io.getvalue()), 'xls_name': 'CRT' + '.xlsx'})
         return True
Beispiel #30
0
    def get_assets_editor_resources(self, key, get_views=True, get_less=True, bundles=False, bundles_restriction=[]):
        # Related views must be fetched if the user wants the views and/or the style
        views = request.env["ir.ui.view"].get_related_views(key, bundles=bundles)
        views = views.read(['name', 'id', 'key', 'xml_id', 'arch', 'active', 'inherit_id'])

        less_files_data_by_bundle = []

        # Load less only if asked by the user
        if get_less:
            # Compile regex outside of the loop
            # This will used to exclude library less files from the result
            excluded_url_matcher = re.compile("^(.+/lib/.+)|(.+import_bootstrap.less)$")

            # Load already customized less files attachments
            custom_attachments = request.env["ir.attachment"].search([("url", "=like", self._make_custom_less_file_url("%%.%%", "%%"))])

            # First check the t-call-assets used in the related views
            url_infos = dict()
            for v in views:
                for asset_call_node in etree.fromstring(v["arch"]).xpath("//t[@t-call-assets]"):
                    if asset_call_node.get("t-css") == "false":
                        continue
                    asset_name = asset_call_node.get("t-call-assets")

                    # Loop through bundle files to search for LESS file info
                    less_files_data = []
                    for file_info in request.env["ir.qweb"]._get_asset_content(asset_name, {})[0]:
                        if file_info["atype"] != "text/less":
                            continue
                        url = file_info["url"]

                        # Exclude library files (see regex above)
                        if excluded_url_matcher.match(url):
                            continue

                        # Check if the file is customized and get bundle/path info
                        less_file_data = self._match_less_file_url(url)
                        if not less_file_data:
                            continue

                        # Save info (arch will be fetched later)
                        url_infos[url] = less_file_data
                        less_files_data.append(url)

                    # Less data is returned sorted by bundle, with the bundles names and xmlids
                    if len(less_files_data):
                        less_files_data_by_bundle.append([dict(xmlid=asset_name, name=request.env.ref(asset_name).name), less_files_data])

            # Filter bundles/files:
            # - A file which appears in multiple bundles only appears in the first one (the first in the DOM)
            # - Only keep bundles with files which appears in the asked bundles and only keep those files
            for i in range(0, len(less_files_data_by_bundle)):
                bundle_1 = less_files_data_by_bundle[i]
                for j in range(0, len(less_files_data_by_bundle)):
                    bundle_2 = less_files_data_by_bundle[j]
                    # In unwanted bundles, keep only the files which are in wanted bundles too (less_helpers)
                    if bundle_1[0]["xmlid"] not in bundles_restriction and bundle_2[0]["xmlid"] in bundles_restriction:
                        bundle_1[1] = [item_1 for item_1 in bundle_1[1] if item_1 in bundle_2[1]]
            for i in range(0, len(less_files_data_by_bundle)):
                bundle_1 = less_files_data_by_bundle[i]
                for j in range(i+1, len(less_files_data_by_bundle)):
                    bundle_2 = less_files_data_by_bundle[j]
                    # In every bundle, keep only the files which were not found in previous bundles
                    bundle_2[1] = [item_2 for item_2 in bundle_2[1] if item_2 not in bundle_1[1]]

            # Only keep bundles which still have files and that were requested
            less_files_data_by_bundle = [
                data for data in less_files_data_by_bundle
                if (len(data[1]) > 0 and (not bundles_restriction or data[0]["xmlid"] in bundles_restriction))
            ]

            # Fetch the arch of each kept file, in each bundle
            for bundle_data in less_files_data_by_bundle:
                for i in range(0, len(bundle_data[1])):
                    url = bundle_data[1][i]
                    url_info = url_infos[url]

                    content = None
                    if url_info["customized"]:
                        # If the file is already customized, the content is found in the corresponding attachment
                        content = base64.b64decode(custom_attachments.filtered(lambda a: a.url == url).datas)
                    else:
                        # If the file is not yet customized, the content is found by reading the local less file
                        module = url_info["module"]
                        module_path = get_module_path(module)
                        module_resource_path = get_resource_path(module, url_info["resource_path"])
                        if module_path and module_resource_path:
                            module_path = os.path.join(os.path.normpath(module_path), '') # join ensures the path ends with '/'
                            module_resource_path = os.path.normpath(module_resource_path)
                            if module_resource_path.startswith(module_path):
                                with open(module_resource_path, "rb") as f:
                                    content = f.read()

                    bundle_data[1][i] = dict(
                        url = "/%s/%s" % (url_info["module"], url_info["resource_path"]),
                        arch = content,
                        customized = url_info["customized"],
                    )

        return dict(
            views = get_views and views or [],
            less = get_less and less_files_data_by_bundle or [],
        )
Beispiel #31
0
    def binary_content(cls, xmlid=None, model='ir.attachment', id=None, field='datas', unique=False, filename=None, filename_field='datas_fname', download=False, mimetype=None, default_mimetype='application/octet-stream', env=None):
        """ Get file, attachment or downloadable content

        If the ``xmlid`` and ``id`` parameter is omitted, fetches the default value for the
        binary field (via ``default_get``), otherwise fetches the field for
        that precise record.

        :param str xmlid: xmlid of the record
        :param str model: name of the model to fetch the binary from
        :param int id: id of the record from which to fetch the binary
        :param str field: binary field
        :param bool unique: add a max-age for the cache control
        :param str filename: choose a filename
        :param str filename_field: if not create an filename with model-id-field
        :param bool download: apply headers to download the file
        :param str mimetype: mintype of the field (for headers)
        :param str default_mimetype: default mintype if no mintype found
        :param Environment env: by default use request.env
        :returns: (status, headers, content)
        """
        env = env or request.env
        # get object and content
        obj = None
        if xmlid:
            obj = env.ref(xmlid, False)
        elif id and model in env.registry:
            obj = env[model].browse(int(id))

        # obj exists
        if not obj or not obj.exists() or field not in obj:
            return (404, [], None)

        # check read access
        try:
            last_update = obj['__last_update']
        except AccessError:
            return (403, [], None)

        status, headers, content = None, [], None

        # attachment by url check
        module_resource_path = None
        if model == 'ir.attachment' and obj.type == 'url' and obj.url:
            url_match = re.match("^/(\w+)/(.+)$", obj.url)
            if url_match:
                module = url_match.group(1)
                module_path = get_module_path(module)
                module_resource_path = get_resource_path(module, url_match.group(2))
                if module_path and module_resource_path:
                    module_path = os.path.join(os.path.normpath(module_path), '')  # join ensures the path ends with '/'
                    module_resource_path = os.path.normpath(module_resource_path)
                    if module_resource_path.startswith(module_path):
                        with open(module_resource_path, 'rb') as f:
                            content = base64.b64encode(f.read())
                        last_update = pycompat.text_type(os.path.getmtime(module_resource_path))

            if not module_resource_path:
                module_resource_path = obj.url

            if not content:
                status = 301
                content = module_resource_path
        else:
            content = obj[field] or ''

        # filename
        if not filename:
            if filename_field in obj:
                filename = obj[filename_field]
            elif module_resource_path:
                filename = os.path.basename(module_resource_path)
            else:
                filename = "%s-%s-%s" % (obj._name, obj.id, field)

        # mimetype
        mimetype = 'mimetype' in obj and obj.mimetype or False
        if not mimetype:
            if filename:
                mimetype = mimetypes.guess_type(filename)[0]
            if not mimetype and getattr(env[model]._fields[field], 'attachment', False):
                # for binary fields, fetch the ir_attachement for mimetype check
                attach_mimetype = env['ir.attachment'].search_read(domain=[('res_model', '=', model), ('res_id', '=', id), ('res_field', '=', field)], fields=['mimetype'], limit=1)
                mimetype = attach_mimetype and attach_mimetype[0]['mimetype']
            if not mimetype:
                mimetype = guess_mimetype(base64.b64decode(content), default=default_mimetype)

        headers += [('Content-Type', mimetype), ('X-Content-Type-Options', 'nosniff')]

        # cache
        etag = bool(request) and request.httprequest.headers.get('If-None-Match')
        retag = '"%s"' % hashlib.md5(last_update.encode('utf-8')).hexdigest()
        status = status or (304 if etag == retag else 200)
        headers.append(('ETag', retag))
        headers.append(('Cache-Control', 'max-age=%s' % (STATIC_CACHE if unique else 0)))

        # content-disposition default name
        if download:
            headers.append(('Content-Disposition', cls.content_disposition(filename)))
        return (status, headers, content)
Beispiel #32
0
    def load_resources(self, file_type, views, bundles_restriction, only_user_custom_files):
        files_data_by_bundle = []
        resources_type_info = {'t_call_assets_attribute': 't-js', 'mimetype': 'text/javascript'}
        if file_type == 'scss':
            resources_type_info = {'t_call_assets_attribute': 't-css', 'mimetype': 'text/scss'}

        # Compile regex outside of the loop
        # This will used to exclude library scss files from the result
        excluded_url_matcher = re.compile("^(.+/lib/.+)|(.+import_bootstrap.+\.scss)$")

        # Load already customized files attachments
        custom_url = self._make_custom_scss_or_js_file_url("%%.%%", "%%")
        custom_attachments = self.get_custom_attachment(custom_url, op='=like')

        # First check the t-call-assets used in the related views
        url_infos = dict()
        for v in views:
            for asset_call_node in etree.fromstring(v["arch"]).xpath("//t[@t-call-assets]"):
                if asset_call_node.get(resources_type_info['t_call_assets_attribute']) == "false":
                    continue
                asset_name = asset_call_node.get("t-call-assets")

                # Loop through bundle files to search for file info
                files_data = []
                for file_info in request.env["ir.qweb"]._get_asset_content(asset_name, {})[0]:
                    if file_info["atype"] != resources_type_info['mimetype']:
                        continue
                    url = file_info["url"]

                    # Exclude library files (see regex above)
                    if excluded_url_matcher.match(url):
                        continue

                    # Check if the file is customized and get bundle/path info
                    file_data = self._match_scss_or_js_file_url(url)
                    if not file_data:
                        continue

                    # Save info according to the filter (arch will be fetched later)
                    url_infos[url] = file_data

                    if '/user_custom_' in url \
                            or file_data['customized'] \
                            or file_type == 'scss' and not only_user_custom_files:
                        files_data.append(url)

                # File data is returned sorted by bundle, with the bundles names and xmlids
                if len(files_data):
                    files_data_by_bundle.append([dict(xmlid=asset_name, name=request.env.ref(asset_name).name), files_data])

        # Filter bundles/files:
        # - A file which appears in multiple bundles only appears in the first one (the first in the DOM)
        # - Only keep bundles with files which appears in the asked bundles and only keep those files
        for i in range(0, len(files_data_by_bundle)):
            bundle_1 = files_data_by_bundle[i]
            for j in range(0, len(files_data_by_bundle)):
                bundle_2 = files_data_by_bundle[j]
                # In unwanted bundles, keep only the files which are in wanted bundles too (_assets_helpers)
                if bundle_1[0]["xmlid"] not in bundles_restriction and bundle_2[0]["xmlid"] in bundles_restriction:
                    bundle_1[1] = [item_1 for item_1 in bundle_1[1] if item_1 in bundle_2[1]]
        for i in range(0, len(files_data_by_bundle)):
            bundle_1 = files_data_by_bundle[i]
            for j in range(i + 1, len(files_data_by_bundle)):
                bundle_2 = files_data_by_bundle[j]
                # In every bundle, keep only the files which were not found in previous bundles
                bundle_2[1] = [item_2 for item_2 in bundle_2[1] if item_2 not in bundle_1[1]]

        # Only keep bundles which still have files and that were requested
        files_data_by_bundle = [
            data for data in files_data_by_bundle
            if (len(data[1]) > 0 and (not bundles_restriction or data[0]["xmlid"] in bundles_restriction))
        ]

        # Fetch the arch of each kept file, in each bundle
        for bundle_data in files_data_by_bundle:
            for i in range(0, len(bundle_data[1])):
                url = bundle_data[1][i]
                url_info = url_infos[url]

                content = None
                if url_info["customized"]:
                    # If the file is already customized, the content is found in the corresponding attachment
                    content = base64.b64decode(custom_attachments.filtered(lambda a: a.url == url).datas)
                else:
                    # If the file is not yet customized, the content is found by reading the local scss file
                    module = url_info["module"]
                    module_path = get_module_path(module)
                    module_resource_path = get_resource_path(module, url_info["resource_path"])
                    if module_path and module_resource_path:
                        module_path = os.path.join(os.path.normpath(module_path), '') # join ensures the path ends with '/'
                        module_resource_path = os.path.normpath(module_resource_path)
                        if module_resource_path.startswith(module_path):
                            with open(module_resource_path, "rb") as f:
                                content = f.read()

                bundle_data[1][i] = dict(
                    url="/%s/%s" % (url_info["module"], url_info["resource_path"]),
                    arch=content,
                    customized=url_info["customized"],
                )
        return files_data_by_bundle
Beispiel #33
0
    def binary_content(cls,
                       xmlid=None,
                       model='ir.attachment',
                       id=None,
                       field='datas',
                       unique=False,
                       filename=None,
                       filename_field='datas_fname',
                       download=False,
                       mimetype=None,
                       default_mimetype='application/octet-stream',
                       env=None):
        """ Get file, attachment or downloadable content

        If the ``xmlid`` and ``id`` parameter is omitted, fetches the default value for the
        binary field (via ``default_get``), otherwise fetches the field for
        that precise record.

        :param str xmlid: xmlid of the record
        :param str model: name of the model to fetch the binary from
        :param int id: id of the record from which to fetch the binary
        :param str field: binary field
        :param bool unique: add a max-age for the cache control
        :param str filename: choose a filename
        :param str filename_field: if not create an filename with model-id-field
        :param bool download: apply headers to download the file
        :param str mimetype: mintype of the field (for headers)
        :param str default_mimetype: default mintype if no mintype found
        :param Environment env: by default use request.env
        :returns: (status, headers, content)
        """
        env = env or request.env
        # get object and content
        obj = None
        if xmlid:
            obj = env.ref(xmlid, False)
        elif id and model in env.registry:
            obj = env[model].browse(int(id))

        # obj exists
        if not obj or not obj.exists() or field not in obj:
            return (404, [], None)

        # check read access
        try:
            last_update = obj['__last_update']
        except AccessError:
            return (403, [], None)

        status, headers, content = None, [], None

        # attachment by url check
        module_resource_path = None
        if model == 'ir.attachment' and obj.type == 'url' and obj.url:
            url_match = re.match(r"^/(\w+)/(.+)$", obj.url)
            if url_match:
                module = url_match.group(1)
                module_path = get_module_path(module)
                module_resource_path = get_resource_path(
                    module, url_match.group(2))
                if module_path and module_resource_path:
                    module_path = os.path.join(
                        os.path.normpath(module_path),
                        '')  # join ensures the path ends with '/'
                    module_resource_path = os.path.normpath(
                        module_resource_path)
                    if module_resource_path.startswith(module_path):
                        with open(module_resource_path, 'rb') as f:
                            content = base64.b64encode(f.read())
                        last_update = str(
                            os.path.getmtime(module_resource_path))

            if not module_resource_path:
                module_resource_path = obj.url

            if not content:
                status = 301
                content = module_resource_path
        else:
            # begin redefined part of original binary_content of odoo/base/addons/ir/ir_http
            is_attachment = env[model]._fields[field].attachment
            if is_attachment:
                domain = [
                    ('res_model', '=', model),
                    ('res_field', '=', field),
                    ('res_id', '=', obj.id),
                    ('type', '=', 'url'),
                ]
                att = env['ir.attachment'].sudo().search(domain)
                if att:
                    content = att.url
                    status = 301
            if not content:
                content = obj[field] or ''
            # end redefined part of original binary_content
        # filename
        if not filename:
            if filename_field in obj:
                filename = obj[filename_field]
            elif module_resource_path:
                filename = os.path.basename(module_resource_path)
            else:
                filename = "%s-%s-%s" % (obj._name, obj.id, field)

        # mimetype
        mimetype = 'mimetype' in obj and obj.mimetype or False
        if not mimetype:
            if filename:
                mimetype = mimetypes.guess_type(filename)[0]
            if not mimetype and getattr(env[model]._fields[field],
                                        'attachment', False):
                # for binary fields, fetch the ir_attachement for mimetype check
                attach_mimetype = env['ir.attachment'].search_read(
                    domain=[('res_model', '=', model), ('res_id', '=', id),
                            ('res_field', '=', field)],
                    fields=['mimetype'],
                    limit=1)
                mimetype = attach_mimetype and attach_mimetype[0]['mimetype']
            if not mimetype:
                mimetype = guess_mimetype(base64.b64decode(content),
                                          default=default_mimetype)

        headers += [('Content-Type', mimetype),
                    ('X-Content-Type-Options', 'nosniff')]

        # cache
        etag = hasattr(
            request,
            'httprequest') and request.httprequest.headers.get('If-None-Match')
        retag = '"%s"' % hashlib.md5(last_update).hexdigest()
        status = status or (304 if etag == retag else 200)
        headers.append(('ETag', retag))
        headers.append(
            ('Cache-Control', 'max-age=%s' % (STATIC_CACHE if unique else 0)))

        # content-disposition default name
        if download:
            headers.append(
                ('Content-Disposition', cls.content_disposition(filename)))
        return (status, headers, content)
Beispiel #34
0
    def get_assets_editor_resources(self,
                                    key,
                                    get_views=True,
                                    get_scss=True,
                                    bundles=False,
                                    bundles_restriction=[]):
        # Related views must be fetched if the user wants the views and/or the style
        views = request.env["ir.ui.view"].get_related_views(key,
                                                            bundles=bundles)
        views = views.read(
            ['name', 'id', 'key', 'xml_id', 'arch', 'active', 'inherit_id'])

        scss_files_data_by_bundle = []

        # Load scss only if asked by the user
        if get_scss:
            # Compile regex outside of the loop
            # This will used to exclude library scss files from the result
            excluded_url_matcher = re.compile(
                "^(.+/lib/.+)|(.+import_bootstrap.+\.scss)$")

            # Load already customized scss files attachments
            custom_url = self._make_custom_scss_file_url("%%.%%", "%%")
            custom_attachments = self.get_custom_attachment(custom_url,
                                                            op='=like')

            # First check the t-call-assets used in the related views
            url_infos = dict()
            for v in views:
                for asset_call_node in etree.fromstring(
                        v["arch"]).xpath("//t[@t-call-assets]"):
                    if asset_call_node.get("t-css") == "false":
                        continue
                    asset_name = asset_call_node.get("t-call-assets")

                    # Loop through bundle files to search for scss file info
                    scss_files_data = []
                    for file_info in request.env["ir.qweb"]._get_asset_content(
                            asset_name, {})[0]:
                        if file_info["atype"] != "text/scss":
                            continue
                        url = file_info["url"]

                        # Exclude library files (see regex above)
                        if excluded_url_matcher.match(url):
                            continue

                        # Check if the file is customized and get bundle/path info
                        scss_file_data = self._match_scss_file_url(url)
                        if not scss_file_data:
                            continue

                        # Save info (arch will be fetched later)
                        url_infos[url] = scss_file_data
                        scss_files_data.append(url)

                    # scss data is returned sorted by bundle, with the bundles names and xmlids
                    if len(scss_files_data):
                        scss_files_data_by_bundle.append([
                            dict(xmlid=asset_name,
                                 name=request.env.ref(asset_name).name),
                            scss_files_data
                        ])

            # Filter bundles/files:
            # - A file which appears in multiple bundles only appears in the first one (the first in the DOM)
            # - Only keep bundles with files which appears in the asked bundles and only keep those files
            for i in range(0, len(scss_files_data_by_bundle)):
                bundle_1 = scss_files_data_by_bundle[i]
                for j in range(0, len(scss_files_data_by_bundle)):
                    bundle_2 = scss_files_data_by_bundle[j]
                    # In unwanted bundles, keep only the files which are in wanted bundles too (_assets_helpers)
                    if bundle_1[0][
                            "xmlid"] not in bundles_restriction and bundle_2[
                                0]["xmlid"] in bundles_restriction:
                        bundle_1[1] = [
                            item_1 for item_1 in bundle_1[1]
                            if item_1 in bundle_2[1]
                        ]
            for i in range(0, len(scss_files_data_by_bundle)):
                bundle_1 = scss_files_data_by_bundle[i]
                for j in range(i + 1, len(scss_files_data_by_bundle)):
                    bundle_2 = scss_files_data_by_bundle[j]
                    # In every bundle, keep only the files which were not found in previous bundles
                    bundle_2[1] = [
                        item_2 for item_2 in bundle_2[1]
                        if item_2 not in bundle_1[1]
                    ]

            # Only keep bundles which still have files and that were requested
            scss_files_data_by_bundle = [
                data for data in scss_files_data_by_bundle
                if (len(data[1]) > 0 and (not bundles_restriction or data[0]
                                          ["xmlid"] in bundles_restriction))
            ]

            # Fetch the arch of each kept file, in each bundle
            for bundle_data in scss_files_data_by_bundle:
                for i in range(0, len(bundle_data[1])):
                    url = bundle_data[1][i]
                    url_info = url_infos[url]

                    content = None
                    if url_info["customized"]:
                        # If the file is already customized, the content is found in the corresponding attachment
                        content = base64.b64decode(
                            custom_attachments.filtered(
                                lambda a: a.url == url).datas)
                    else:
                        # If the file is not yet customized, the content is found by reading the local scss file
                        module = url_info["module"]
                        module_path = get_module_path(module)
                        module_resource_path = get_resource_path(
                            module, url_info["resource_path"])
                        if module_path and module_resource_path:
                            module_path = os.path.join(
                                os.path.normpath(module_path),
                                '')  # join ensures the path ends with '/'
                            module_resource_path = os.path.normpath(
                                module_resource_path)
                            if module_resource_path.startswith(module_path):
                                with open(module_resource_path, "rb") as f:
                                    content = f.read()

                    bundle_data[1][i] = dict(
                        url="/%s/%s" %
                        (url_info["module"], url_info["resource_path"]),
                        arch=content,
                        customized=url_info["customized"],
                    )

        return dict(
            views=get_views and views or [],
            scss=get_scss and scss_files_data_by_bundle or [],
        )
def add_module_dependencies(cr, module_list):
    """
    Select (new) dependencies from the modules in the list
    so that we can inject them into the graph at upgrade
    time. Used in the modified OpenUpgrade Server,
    not to be called from migration scripts

    Also take the OpenUpgrade configuration directives 'forced_deps'
    and 'autoinstall' into account. From any additional modules
    that these directives can add, the dependencies are added as
    well (but these directives are not checked for the occurrence
    of any of the dependencies).
    """
    if not module_list:
        return module_list

    modules_in = list(module_list)
    forced_deps = safe_eval(
        config.get_misc('openupgrade', 'forced_deps_' + release.version,
                        config.get_misc('openupgrade', 'forced_deps', '{}')))

    autoinstall = safe_eval(
        config.get_misc('openupgrade', 'autoinstall_' + release.version,
                        config.get_misc('openupgrade', 'autoinstall', '{}')))

    for module in list(module_list):
        module_list += forced_deps.get(module, [])
        module_list += autoinstall.get(module, [])

    module_list = list(set(module_list))

    dependencies = module_list
    while dependencies:
        cr.execute(
            """
            SELECT DISTINCT dep.name
            FROM
                ir_module_module,
                ir_module_module_dependency dep
            WHERE
                module_id = ir_module_module.id
                AND ir_module_module.name in %s
                AND dep.name not in %s
            """, (
                tuple(dependencies),
                tuple(module_list),
            ))

        dependencies = [x[0] for x in cr.fetchall()]
        module_list += dependencies

    # Select auto_install modules of which all dependencies
    # are fulfilled based on the modules we know are to be
    # installed
    cr.execute(
        """
        SELECT name from ir_module_module WHERE state IN %s
        """, (('installed', 'to install', 'to upgrade'), ))
    modules = list(set(module_list + [row[0] for row in cr.fetchall()]))
    cr.execute(
        """
        SELECT name from ir_module_module m
        WHERE auto_install IS TRUE
            AND state = 'uninstalled'
            AND NOT EXISTS(
                SELECT id FROM ir_module_module_dependency d
                WHERE d.module_id = m.id
                AND name NOT IN %s)
         """, (tuple(modules), ))
    auto_modules = [row[0] for row in cr.fetchall() if get_module_path(row[0])]
    if auto_modules:
        logger.info("Selecting autoinstallable modules %s",
                    ','.join(auto_modules))
        module_list += auto_modules

    # Set proper state for new dependencies so that any init scripts are run
    cr.execute(
        """
        UPDATE ir_module_module SET state = 'to install'
        WHERE name IN %s AND name NOT IN %s AND state = 'uninstalled'
        """, (tuple(module_list), tuple(modules_in)))
    return module_list