Beispiel #1
0
class BinarySvg(models.Model):
    _name = 'test_new_api.binary_svg'
    _description = 'Test SVG upload'

    name = fields.Char(required=True)
    image_attachment = fields.Binary(attachment=True)
    image_wo_attachment = fields.Binary(attachment=False)
Beispiel #2
0
class ModelBinary(models.Model):
    _name = 'test_new_api.model_binary'
    _description = 'Test Image field'

    binary = fields.Binary()
    binary_related_store = fields.Binary("Binary Related Store", related='binary', store=True, readonly=False)
    binary_related_no_store = fields.Binary("Binary Related No Store", related='binary', store=False, readonly=False)
    binary_computed = fields.Binary(compute='_compute_binary')

    @api.depends('binary')
    def _compute_binary(self):
        # arbitrary value: 'bin_size' must have no effect
        for record in self:
            record.binary_computed = [(record.id, bool(record.binary))]
Beispiel #3
0
class ConverterTest(models.Model):
    _name = 'web_editor.converter.test'
    _description = 'Web Editor Converter Test'

    # disable translation export for those brilliant field labels and values
    _translate = False

    char = fields.Char()
    integer = fields.Integer()
    float = fields.Float()
    numeric = fields.Float(digits=(16, 2))
    many2one = fields.Many2one('web_editor.converter.test.sub')
    binary = fields.Binary(attachment=False)
    date = fields.Date()
    datetime = fields.Datetime()
    selection_str = fields.Selection(
        [
            ('A', "Qu'il n'est pas arrivé à Toronto"),
            ('B', "Qu'il était supposé arriver à Toronto"),
            ('C', "Qu'est-ce qu'il fout ce maudit pancake, tabernacle ?"),
            ('D', "La réponse D"),
        ],
        string=u"Lorsqu'un pancake prend l'avion à destination de Toronto et "
        u"qu'il fait une escale technique à St Claude, on dit:")
    html = fields.Html()
    text = fields.Text()
Beispiel #4
0
class test_model(models.Model):
    _name = 'test_converter.test_model'
    _description = 'Test Converter Model'

    char = fields.Char()
    integer = fields.Integer()
    float = fields.Float()
    numeric = fields.Float(digits=(16, 2))
    many2one = fields.Many2one('test_converter.test_model.sub',
                               group_expand='_gbf_m2o')
    binary = fields.Binary(attachment=False)
    date = fields.Date()
    datetime = fields.Datetime()
    selection_str = fields.Selection(
        [
            ('A', u"Qu'il n'est pas arrivé à Toronto"),
            ('B', u"Qu'il était supposé arriver à Toronto"),
            ('C', u"Qu'est-ce qu'il fout ce maudit pancake, tabernacle ?"),
            ('D', u"La réponse D"),
        ],
        string=u"Lorsqu'un pancake prend l'avion à destination de Toronto et "
        u"qu'il fait une escale technique à St Claude, on dit:")
    html = fields.Html()
    text = fields.Text()

    # `base` module does not contains any model that implement the functionality
    # `group_expand`; test this feature here...

    @api.model
    def _gbf_m2o(self, subs, domain, order):
        sub_ids = subs._search([], order=order, access_rights_uid=SUPERUSER_ID)
        return subs.browse(sub_ids)
class BaseLanguageExport(models.TransientModel):
    _name = "base.language.export"
    _description = 'Language Export'

    @api.model
    def _get_languages(self):
        langs = self.env['res.lang'].get_installed()
        return [(NEW_LANG_KEY, _('New Language (Empty translation template)'))] + \
               langs

    name = fields.Char('File Name', readonly=True)
    lang = fields.Selection(_get_languages,
                            string='Language',
                            required=True,
                            default=NEW_LANG_KEY)
    format = fields.Selection([('csv', 'CSV File'), ('po', 'PO File'),
                               ('tgz', 'TGZ Archive')],
                              string='File Format',
                              required=True,
                              default='csv')
    modules = fields.Many2many('ir.module.module',
                               'rel_modules_langexport',
                               'wiz_id',
                               'module_id',
                               string='Apps To Export',
                               domain=[('state', '=', 'installed')])
    data = fields.Binary('File', readonly=True, attachment=False)
    state = fields.Selection(
        [('choose', 'choose'),
         ('get', 'get')],  # choose language or get the file
        default='choose')

    def act_getfile(self):
        this = self[0]
        lang = this.lang if this.lang != NEW_LANG_KEY else False
        mods = sorted(this.mapped('modules.name')) or ['all']

        with contextlib.closing(io.BytesIO()) as buf:
            tools.trans_export(lang, mods, buf, this.format, self._cr)
            out = base64.encodestring(buf.getvalue())

        filename = 'new'
        if lang:
            filename = tools.get_iso_codes(lang)
        elif len(mods) == 1:
            filename = mods[0]
        extension = this.format
        if not lang and extension == 'po':
            extension = 'pot'
        name = "%s.%s" % (filename, extension)
        this.write({'state': 'get', 'data': out, 'name': name})
        return {
            'type': 'ir.actions.act_window',
            'res_model': 'base.language.export',
            'view_mode': 'form',
            'res_id': this.id,
            'views': [(False, 'form')],
            'target': 'new',
        }
Beispiel #6
0
class pos_cache(models.Model):
    _name = 'pos.cache'
    _description = 'Point of Sale Cache'

    cache = fields.Binary(attachment=True)
    product_domain = fields.Text(required=True)
    product_fields = fields.Text(required=True)

    config_id = fields.Many2one('pos.config',
                                ondelete='cascade',
                                required=True)
    compute_user_id = fields.Many2one('res.users',
                                      'Cache compute user',
                                      required=True)

    @api.model
    def refresh_all_caches(self):
        self.env['pos.cache'].search([]).refresh_cache()

    def refresh_cache(self):
        for cache in self:
            Product = self.env['product.product'].with_user(
                cache.compute_user_id.id)
            products = Product.search(cache.get_product_domain())
            prod_ctx = products.with_context(
                pricelist=cache.config_id.pricelist_id.id,
                display_default_code=False,
                lang=cache.compute_user_id.lang)
            res = prod_ctx.read(cache.get_product_fields())
            cache.write({
                'cache':
                base64.encodestring(json.dumps(res).encode('utf-8')),
            })

    @api.model
    def get_product_domain(self):
        return literal_eval(self.product_domain)

    @api.model
    def get_product_fields(self):
        return literal_eval(self.product_fields)

    @api.model
    def get_cache(self, domain, fields):
        if not self.cache or domain != self.get_product_domain(
        ) or fields != self.get_product_fields():
            self.product_domain = str(domain)
            self.product_fields = str(fields)
            self.refresh_cache()

        return json.loads(base64.decodestring(self.cache).decode('utf-8'))
Beispiel #7
0
class BaseImportModule(models.TransientModel):
    """ Import Module """
    _name = "base.import.module"
    _description = "Import Module"

    module_file = fields.Binary(string='Module .ZIP file',
                                required=True,
                                attachment=False)
    state = fields.Selection([('init', 'init'), ('done', 'done')],
                             string='Status',
                             readonly=True,
                             default='init')
    import_message = fields.Text()
    force = fields.Boolean(
        string='Force init',
        help=
        "Force init mode even if installed. (will update `noupdate='1'` records)"
    )

    def import_module(self):
        self.ensure_one()
        IrModule = self.env['ir.module.module']
        zip_data = base64.decodestring(self.module_file)
        fp = BytesIO()
        fp.write(zip_data)
        res = IrModule.import_zipfile(fp, force=self.force)
        self.write({'state': 'done', 'import_message': res[0]})
        context = dict(self.env.context, module_name=res[1])
        # Return wizard otherwise it will close wizard and will not show result message to user.
        return {
            'name': 'Import Module',
            'view_mode': 'form',
            'target': 'new',
            'res_id': self.id,
            'res_model': 'base.import.module',
            'type': 'ir.actions.act_window',
            'context': context,
        }

    def action_module_open(self):
        self.ensure_one()
        return {
            'domain': [('name', 'in', self.env.context.get('module_name',
                                                           []))],
            'name': 'Modules',
            'view_mode': 'tree,form',
            'res_model': 'ir.module.module',
            'view_id': False,
            'type': 'ir.actions.act_window',
        }
Beispiel #8
0
class RestaurantFloor(models.Model):

    _name = 'restaurant.floor'
    _description = 'Restaurant Floor'

    name = fields.Char('Floor Name', required=True, help='An internal identification of the restaurant floor')
    pos_config_id = fields.Many2one('pos.config', string='Point of Sale')
    background_image = fields.Binary('Background Image', help='A background image used to display a floor layout in the point of sale interface')
    background_color = fields.Char('Background Color', help='The background color of the floor layout, (must be specified in a html-compatible format)', default='rgb(210, 210, 210)')
    table_ids = fields.One2many('restaurant.table', 'floor_id', string='Tables', help='The list of tables in this floor')
    sequence = fields.Integer('Sequence', help='Used to sort Floors', default=1)

    def unlink(self):
        confs = self.mapped('pos_config_id').filtered(lambda c: c.is_table_management == True)
        opened_session = self.env['pos.session'].search([('config_id', 'in', confs.ids), ('state', '!=', 'closed')])
        if opened_session:
            error_msg = _("You cannot remove a floor that is used in a PoS session, close the session(s) first: \n")
            for floor in self:
                for session in opened_session:
                    if floor in session.config_id.floor_ids:
                        error_msg += _("Floor: %s - PoS Config: %s \n") % (floor.name, session.config_id.name)
            if confs:
                raise UserError(error_msg)
        return super(RestaurantFloor, self).unlink()
Beispiel #9
0
class BlogPost(models.Model):
    _name = "blog.post"
    _description = "Blog Post"
    _inherit = ['mail.thread', 'website.seo.metadata', 'website.published.multi.mixin']
    _order = 'id DESC'
    _mail_post_access = 'read'

    def _compute_website_url(self):
        super(BlogPost, self)._compute_website_url()
        for blog_post in self:
            blog_post.website_url = "/blog/%s/post/%s" % (slug(blog_post.blog_id), slug(blog_post))

    def _default_content(self):
        return '''
            <p class="o_default_snippet_text">''' + _("Start writing here...") + '''</p>
        '''
    name = fields.Char('Title', required=True, translate=True, default='')
    subtitle = fields.Char('Sub Title', translate=True)
    author_id = fields.Many2one('res.partner', 'Author', default=lambda self: self.env.user.partner_id)
    active = fields.Boolean('Active', default=True)
    cover_properties = fields.Text(
        'Cover Properties',
        default='{"background-image": "none", "background-color": "oe_black", "opacity": "0.2", "resize_class": "cover_mid"}')
    blog_id = fields.Many2one('blog.blog', 'Blog', required=True, ondelete='cascade')
    tag_ids = fields.Many2many('blog.tag', string='Tags')
    content = fields.Html('Content', default=_default_content, translate=html_translate, sanitize=False)
    teaser = fields.Text('Teaser', compute='_compute_teaser', inverse='_set_teaser')
    teaser_manual = fields.Text(string='Teaser Content')

    website_message_ids = fields.One2many(domain=lambda self: [('model', '=', self._name), ('message_type', '=', 'comment')])

    # creation / update stuff
    create_date = fields.Datetime('Created on', index=True, readonly=True)
    published_date = fields.Datetime('Published Date')
    post_date = fields.Datetime('Publishing date', compute='_compute_post_date', inverse='_set_post_date', store=True,
                                help="The blog post will be visible for your visitors as of this date on the website if it is set as published.")
    create_uid = fields.Many2one('res.users', 'Created by', index=True, readonly=True)
    write_date = fields.Datetime('Last Updated on', index=True, readonly=True)
    write_uid = fields.Many2one('res.users', 'Last Contributor', index=True, readonly=True)
    author_avatar = fields.Binary(related='author_id.image_128', string="Avatar", readonly=False)
    visits = fields.Integer('No of Views', copy=False)
    website_id = fields.Many2one(related='blog_id.website_id', readonly=True)

    @api.depends('content', 'teaser_manual')
    def _compute_teaser(self):
        for blog_post in self:
            if blog_post.teaser_manual:
                blog_post.teaser = blog_post.teaser_manual
            else:
                content = html2plaintext(blog_post.content).replace('\n', ' ')
                blog_post.teaser = content[:200] + '...'

    def _set_teaser(self):
        for blog_post in self:
            blog_post.teaser_manual = blog_post.teaser

    @api.depends('create_date', 'published_date')
    def _compute_post_date(self):
        for blog_post in self:
            if blog_post.published_date:
                blog_post.post_date = blog_post.published_date
            else:
                blog_post.post_date = blog_post.create_date

    def _set_post_date(self):
        for blog_post in self:
            blog_post.published_date = blog_post.post_date
            if not blog_post.published_date:
                blog_post._write(dict(post_date=blog_post.create_date)) # dont trigger inverse function

    def _check_for_publication(self, vals):
        if vals.get('is_published'):
            for post in self:
                post.blog_id.message_post_with_view(
                    'website_blog.blog_post_template_new_post',
                    subject=post.name,
                    values={'post': post},
                    subtype_id=self.env['ir.model.data'].xmlid_to_res_id('website_blog.mt_blog_blog_published'))
            return True
        return False

    @api.model
    def create(self, vals):
        post_id = super(BlogPost, self.with_context(mail_create_nolog=True)).create(vals)
        post_id._check_for_publication(vals)
        return post_id

    def write(self, vals):
        result = True
        for post in self:
            copy_vals = dict(vals)
            published_in_vals = set(vals.keys()) & {'is_published', 'website_published'}
            if (published_in_vals and 'published_date' not in vals and
                    (not post.published_date or post.published_date <= fields.Datetime.now())):
                copy_vals['published_date'] = vals[list(published_in_vals)[0]] and fields.Datetime.now() or False
            result &= super(BlogPost, self).write(copy_vals)
        self._check_for_publication(vals)
        return result

    def get_access_action(self, access_uid=None):
        """ Instead of the classic form view, redirect to the post on website
        directly if user is an employee or if the post is published. """
        self.ensure_one()
        user = access_uid and self.env['res.users'].sudo().browse(access_uid) or self.env.user
        if user.share and not self.sudo().website_published:
            return super(BlogPost, self).get_access_action(access_uid)
        return {
            'type': 'ir.actions.act_url',
            'url': self.website_url,
            'target': 'self',
            'target_type': 'public',
            'res_id': self.id,
        }

    def _notify_get_groups(self):
        """ Add access button to everyone if the document is published. """
        groups = super(BlogPost, self)._notify_get_groups()

        if self.website_published:
            for group_name, group_method, group_data in groups:
                group_data['has_button_access'] = True

        return groups

    def _notify_record_by_inbox(self, message, recipients_data, msg_vals=False, **kwargs):
        """ Override to avoid keeping all notified recipients of a comment.
        We avoid tracking needaction on post comments. Only emails should be
        sufficient. """
        if msg_vals.get('message_type', message.message_type) == 'comment':
            return
        return super(BlogPost, self)._notify_record_by_inbox(message, recipients_data, msg_vals=msg_vals, **kwargs)

    def _default_website_meta(self):
        res = super(BlogPost, self)._default_website_meta()
        res['default_opengraph']['og:description'] = res['default_twitter']['twitter:description'] = self.subtitle
        res['default_opengraph']['og:type'] = 'article'
        res['default_opengraph']['article:published_time'] = self.post_date
        res['default_opengraph']['article:modified_time'] = self.write_date
        res['default_opengraph']['article:tag'] = self.tag_ids.mapped('name')
        # background-image might contain single quotes eg `url('/my/url')`
        res['default_opengraph']['og:image'] = res['default_twitter']['twitter:image'] = json.loads(self.cover_properties).get('background-image', 'none')[4:-1].strip("'")
        res['default_opengraph']['og:title'] = res['default_twitter']['twitter:title'] = self.name
        res['default_meta_description'] = self.subtitle
        return res
Beispiel #10
0
class RatingMixin(models.AbstractModel):
    _name = 'rating.mixin'
    _description = "Rating Mixin"

    rating_ids = fields.One2many('rating.rating', 'res_id', string='Rating', domain=lambda self: [('res_model', '=', self._name)], auto_join=True)
    rating_last_value = fields.Float('Rating Last Value', compute='_compute_rating_last_value', compute_sudo=True, store=True)
    rating_last_feedback = fields.Text('Rating Last Feedback', related='rating_ids.feedback')
    rating_last_image = fields.Binary('Rating Last Image', related='rating_ids.rating_image')
    rating_count = fields.Integer('Rating count', compute="_compute_rating_stats")
    rating_avg = fields.Float("Rating Average", compute='_compute_rating_stats')

    @api.depends('rating_ids.rating')
    def _compute_rating_last_value(self):
        for record in self:
            ratings = self.env['rating.rating'].search([('res_model', '=', self._name), ('res_id', '=', record.id)], limit=1)
            record.rating_last_value = ratings and ratings.rating or 0

    @api.depends('rating_ids')
    def _compute_rating_stats(self):
        """ Compute avg and count in one query, as thoses fields will be used together most of the time. """
        domain = expression.AND([self._rating_domain(), [('rating', '>=', RATING_LIMIT_MIN)]])
        read_group_res = self.env['rating.rating'].read_group(domain, ['rating:avg'], groupby=['res_id'], lazy=False)  # force average on rating column
        mapping = {item['res_id']: {'rating_count': item['__count'], 'rating_avg': item['rating']} for item in read_group_res}
        for record in self:
            record.rating_count = mapping.get(record.id, {}).get('rating_count', 0)
            record.rating_avg = mapping.get(record.id, {}).get('rating_avg', 0)

    def write(self, values):
        """ If the rated ressource name is modified, we should update the rating res_name too.
            If the rated ressource parent is changed we should update the parent_res_id too"""
        with self.env.norecompute():
            result = super(RatingMixin, self).write(values)
            for record in self:
                if record._rec_name in values:  # set the res_name of ratings to be recomputed
                    res_name_field = self.env['rating.rating']._fields['res_name']
                    self.env.add_to_compute(res_name_field, record.rating_ids)
                if record._rating_get_parent_field_name() in values:
                    record.rating_ids.write({'parent_res_id': record[record._rating_get_parent_field_name()].id})

        return result

    def unlink(self):
        """ When removing a record, its rating should be deleted too. """
        record_ids = self.ids
        result = super(RatingMixin, self).unlink()
        self.env['rating.rating'].sudo().search([('res_model', '=', self._name), ('res_id', 'in', record_ids)]).unlink()
        return result

    def _rating_get_parent_field_name(self):
        """Return the parent relation field name
           Should return a Many2One"""
        return None

    def _rating_domain(self):
        """ Returns a normalized domain on rating.rating to select the records to
            include in count, avg, ... computation of current model.
        """
        return ['&', '&', ('res_model', '=', self._name), ('res_id', 'in', self.ids), ('consumed', '=', True)]

    def rating_get_partner_id(self):
        if hasattr(self, 'partner_id') and self.partner_id:
            return self.partner_id
        return self.env['res.partner']

    def rating_get_rated_partner_id(self):
        if hasattr(self, 'user_id') and self.user_id.partner_id:
            return self.user_id.partner_id
        return self.env['res.partner']

    def rating_get_access_token(self, partner=None):
        if not partner:
            partner = self.rating_get_partner_id()
        rated_partner = self.rating_get_rated_partner_id()
        ratings = self.rating_ids.filtered(lambda x: x.partner_id.id == partner.id and not x.consumed)
        if not ratings:
            record_model_id = self.env['ir.model'].sudo().search([('model', '=', self._name)], limit=1).id
            rating = self.env['rating.rating'].create({
                'partner_id': partner.id,
                'rated_partner_id': rated_partner.id,
                'res_model_id': record_model_id,
                'res_id': self.id
            })
        else:
            rating = ratings[0]
        return rating.access_token

    def rating_send_request(self, template, lang=False, subtype_id=False, force_send=True, composition_mode='comment', notif_layout=None):
        """ This method send rating request by email, using a template given
        in parameter.

         :param template: a mail.template record used to compute the message body;
         :param lang: optional lang; it can also be specified directly on the template
           itself in the lang field;
         :param subtype_id: optional subtype to use when creating the message; is
           a note by default to avoid spamming followers;
         :param force_send: whether to send the request directly or use the mail
           queue cron (preferred option);
         :param composition_mode: comment (message_post) or mass_mail (template.send_mail);
         :param notif_layout: layout used to encapsulate the content when sending email;
        """
        if lang:
            template = template.with_context(lang=lang)
        if subtype_id is False:
            subtype_id = self.env['ir.model.data'].xmlid_to_res_id('mail.mt_note')
        if force_send:
            self = self.with_context(mail_notify_force_send=True)  # default value is True, should be set to false if not?
        for record in self:
            record.message_post_with_template(
                template.id,
                composition_mode=composition_mode,
                email_layout_xmlid=notif_layout if notif_layout is not None else 'mail.mail_notification_light',
                subtype_id=subtype_id
            )

    def rating_apply(self, rate, token=None, feedback=None, subtype=None):
        """ Apply a rating given a token. If the current model inherits from
        mail.thread mixing, a message is posted on its chatter.
        :param rate : the rating value to apply
        :type rate : float
        :param token : access token
        :param feedback : additional feedback
        :type feedback : string
        :param subtype : subtype for mail
        :type subtype : string
        :returns rating.rating record
        """
        Rating, rating = self.env['rating.rating'], None
        if token:
            rating = self.env['rating.rating'].search([('access_token', '=', token)], limit=1)
        else:
            rating = Rating.search([('res_model', '=', self._name), ('res_id', '=', self.ids[0])], limit=1)
        if rating:
            rating.write({'rating': rate, 'feedback': feedback, 'consumed': True})
            if hasattr(self, 'message_post'):
                feedback = tools.plaintext2html(feedback or '')
                self.message_post(
                    body="<img src='/rating/static/src/img/rating_%s.png' alt=':%s/10' style='width:18px;height:18px;float:left;margin-right: 5px;'/>%s"
                    % (rate, rate, feedback),
                    subtype=subtype or "mail.mt_comment",
                    author_id=rating.partner_id and rating.partner_id.id or None  # None will set the default author in mail_thread.py
                )
            if hasattr(self, 'stage_id') and self.stage_id and hasattr(self.stage_id, 'auto_validation_kanban_state') and self.stage_id.auto_validation_kanban_state:
                if rating.rating > 5:
                    self.write({'kanban_state': 'done'})
                if rating.rating < 5:
                    self.write({'kanban_state': 'blocked'})
        return rating

    def rating_get_repartition(self, add_stats=False, domain=None):
        """ get the repatition of rating grade for the given res_ids.
            :param add_stats : flag to add stat to the result
            :type add_stats : boolean
            :param domain : optional extra domain of the rating to include/exclude in repartition
            :return dictionnary
                if not add_stats, the dict is like
                    - key is the rating value (integer)
                    - value is the number of object (res_model, res_id) having the value
                otherwise, key is the value of the information (string) : either stat name (avg, total, ...) or 'repartition'
                containing the same dict if add_stats was False.
        """
        base_domain = expression.AND([self._rating_domain(), [('rating', '>=', 1)]])
        if domain:
            base_domain += domain
        data = self.env['rating.rating'].read_group(base_domain, ['rating'], ['rating', 'res_id'])
        # init dict with all posible rate value, except 0 (no value for the rating)
        values = dict.fromkeys(range(1, 11), 0)
        values.update((d['rating'], d['rating_count']) for d in data)
        # add other stats
        if add_stats:
            rating_number = sum(values.values())
            result = {
                'repartition': values,
                'avg': sum(float(key * values[key]) for key in values) / rating_number if rating_number > 0 else 0,
                'total': sum(it['rating_count'] for it in data),
            }
            return result
        return values

    def rating_get_grades(self, domain=None):
        """ get the repatition of rating grade for the given res_ids.
            :param domain : optional domain of the rating to include/exclude in grades computation
            :return dictionnary where the key is the grade (great, okay, bad), and the value, the number of object (res_model, res_id) having the grade
                    the grade are compute as    0-30% : Bad
                                                31-69%: Okay
                                                70-100%: Great
        """
        data = self.rating_get_repartition(domain=domain)
        res = dict.fromkeys(['great', 'okay', 'bad'], 0)
        for key in data:
            if key >= RATING_LIMIT_SATISFIED:
                res['great'] += data[key]
            elif key > RATING_LIMIT_OK:
                res['okay'] += data[key]
            else:
                res['bad'] += data[key]
        return res

    def rating_get_stats(self, domain=None):
        """ get the statistics of the rating repatition
            :param domain : optional domain of the rating to include/exclude in statistic computation
            :return dictionnary where
                - key is the the name of the information (stat name)
                - value is statistic value : 'percent' contains the repartition in percentage, 'avg' is the average rate
                  and 'total' is the number of rating
        """
        data = self.rating_get_repartition(domain=domain, add_stats=True)
        result = {
            'avg': data['avg'],
            'total': data['total'],
            'percent': dict.fromkeys(range(1, 11), 0),
        }
        for rate in data['repartition']:
            result['percent'][rate] = (data['repartition'][rate] * 100) / data['total'] if data['total'] > 0 else 0
        return result
Beispiel #11
0
class IrActionsActWindow(models.Model):
    _name = 'ir.actions.act_window'
    _description = 'Action Window'
    _table = 'ir_act_window'
    _inherit = 'ir.actions.actions'
    _sequence = 'ir_actions_id_seq'
    _order = 'name'

    @api.constrains('res_model', 'binding_model_id')
    def _check_model(self):
        for action in self:
            if action.res_model not in self.env:
                raise ValidationError(
                    _('Invalid model name %r in action definition.') %
                    action.res_model)
            if action.binding_model_id and action.binding_model_id.model not in self.env:
                raise ValidationError(
                    _('Invalid model name %r in action definition.') %
                    action.binding_model_id.model)

    @api.depends('view_ids.view_mode', 'view_mode', 'view_id.type')
    def _compute_views(self):
        """ Compute an ordered list of the specific view modes that should be
            enabled when displaying the result of this action, along with the
            ID of the specific view to use for each mode, if any were required.

            This function hides the logic of determining the precedence between
            the view_modes string, the view_ids o2m, and the view_id m2o that
            can be set on the action.
        """
        for act in self:
            act.views = [(view.view_id.id, view.view_mode)
                         for view in act.view_ids]
            got_modes = [view.view_mode for view in act.view_ids]
            all_modes = act.view_mode.split(',')
            missing_modes = [
                mode for mode in all_modes if mode not in got_modes
            ]
            if missing_modes:
                if act.view_id.type in missing_modes:
                    # reorder missing modes to put view_id first if present
                    missing_modes.remove(act.view_id.type)
                    act.views.append((act.view_id.id, act.view_id.type))
                act.views.extend([(False, mode) for mode in missing_modes])

    @api.depends('res_model', 'search_view_id')
    def _compute_search_view(self):
        for act in self:
            fvg = self.env[act.res_model].fields_view_get(
                act.search_view_id.id, 'search')
            act.search_view = str(fvg)

    name = fields.Char(string='Action Name', translate=True)
    type = fields.Char(default="ir.actions.act_window")
    view_id = fields.Many2one('ir.ui.view',
                              string='View Ref.',
                              ondelete='set null')
    domain = fields.Char(
        string='Domain Value',
        help=
        "Optional domain filtering of the destination data, as a Python expression"
    )
    context = fields.Char(
        string='Context Value',
        default={},
        required=True,
        help=
        "Context dictionary as Python expression, empty by default (Default: {})"
    )
    res_id = fields.Integer(
        string='Record ID',
        help=
        "Database ID of record to open in form view, when ``view_mode`` is set to 'form' only"
    )
    res_model = fields.Char(
        string='Destination Model',
        required=True,
        help="Model name of the object to open in the view window")
    target = fields.Selection([('current', 'Current Window'),
                               ('new', 'New Window'),
                               ('inline', 'Inline Edit'),
                               ('fullscreen', 'Full Screen'),
                               ('main', 'Main action of Current Window')],
                              default="current",
                              string='Target Window')
    view_mode = fields.Char(
        required=True,
        default='tree,form',
        help=
        "Comma-separated list of allowed view modes, such as 'form', 'tree', 'calendar', etc. (Default: tree,form)"
    )
    usage = fields.Char(
        string='Action Usage',
        help="Used to filter menu and home actions from the user form.")
    view_ids = fields.One2many('ir.actions.act_window.view',
                               'act_window_id',
                               string='No of Views')
    views = fields.Binary(compute='_compute_views',
                          help="This function field computes the ordered list of views that should be enabled " \
                               "when displaying the result of an action, federating view mode, views and " \
                               "reference view. The result is returned as an ordered list of pairs (view_id,view_mode).")
    limit = fields.Integer(default=80, help='Default limit for the list view')
    groups_id = fields.Many2many('res.groups',
                                 'ir_act_window_group_rel',
                                 'act_id',
                                 'gid',
                                 string='Groups')
    search_view_id = fields.Many2one('ir.ui.view', string='Search View Ref.')
    filter = fields.Boolean()
    search_view = fields.Text(compute='_compute_search_view')

    def read(self, fields=None, load='_classic_read'):
        """ call the method get_empty_list_help of the model and set the window action help message
        """
        result = super(IrActionsActWindow, self).read(fields, load=load)
        if not fields or 'help' in fields:
            for values in result:
                model = values.get('res_model')
                if model in self.env:
                    eval_ctx = dict(self.env.context)
                    try:
                        ctx = safe_eval(values.get('context', '{}'), eval_ctx)
                    except:
                        ctx = {}
                    values['help'] = self.with_context(
                        **ctx).env[model].get_empty_list_help(
                            values.get('help', ''))
        return result

    @api.model
    def for_xml_id(self, module, xml_id):
        """ Returns the act_window object created for the provided xml_id

        :param module: the module the act_window originates in
        :param xml_id: the namespace-less id of the action (the @id
                       attribute from the XML file)
        :return: A read() view of the ir.actions.act_window
        """
        record = self.env.ref("%s.%s" % (module, xml_id))
        return record.read()[0]

    @api.model_create_multi
    def create(self, vals_list):
        self.clear_caches()
        for vals in vals_list:
            if not vals.get('name') and vals.get('res_model'):
                vals['name'] = self.env[vals['res_model']]._description
        return super(IrActionsActWindow, self).create(vals_list)

    def unlink(self):
        self.clear_caches()
        return super(IrActionsActWindow, self).unlink()

    def exists(self):
        ids = self._existing()
        existing = self.filtered(lambda rec: rec.id in ids)
        return existing

    @api.model
    @tools.ormcache()
    def _existing(self):
        self._cr.execute("SELECT id FROM %s" % self._table)
        return set(row[0] for row in self._cr.fetchall())
Beispiel #12
0
class res_config(models.TransientModel):
    _inherit = "res.config.settings"

    facebook_sharing = fields.Boolean(string='Facebook',
                                      related='website_id.facebook_sharing',
                                      readonly=False)
    twitter_sharing = fields.Boolean(string='Twitter',
                                     related='website_id.twitter_sharing',
                                     readonly=False)
    linkedin_sharing = fields.Boolean(string='Linkedin',
                                      related='website_id.linkedin_sharing',
                                      readonly=False)
    mail_sharing = fields.Boolean(string='Mail',
                                  related='website_id.mail_sharing',
                                  readonly=False)
    is_load_more = fields.Boolean(string='Load More',
                                  related='website_id.is_load_more',
                                  readonly=False,
                                  help="Load next page products with Ajax")
    load_more_image = fields.Binary(
        string='Load More Image',
        related='website_id.load_more_image',
        readonly=False,
        help="Display this image while load more applies.")
    button_or_scroll = fields.Selection(
        [('automatic', 'Automatic- on page scroll'),
         ('button', 'Button- on click button')],
        string="Loading type for products",
        related='website_id.button_or_scroll',
        required=True,
        default='automatic',
        readonly=False,
        help=
        "Define how to show the pagination of products in a shop page with on scroll or button."
    )
    prev_button_label = fields.Char(string='Label for the Prev Button',
                                    related='website_id.prev_button_label',
                                    readonly=False,
                                    translate=True)
    next_button_label = fields.Char(string='Label for the Next Button',
                                    related='website_id.next_button_label',
                                    readonly=False,
                                    translate=True)
    is_lazy_load = fields.Boolean(string='Lazyload',
                                  related='website_id.is_lazy_load',
                                  readonly=False,
                                  help="Lazy load will be enabled.")
    lazy_load_image = fields.Binary(
        string='Lazyload Image',
        related='website_id.lazy_load_image',
        readonly=False,
        help="Display this image while lazy load applies.")
    banner_video_url = fields.Char(string='Video URL',
                                   related='website_id.banner_video_url',
                                   help='URL of a video for banner.',
                                   readonly=False)
    number_of_product_line = fields.Selection(
        [('1', '1'), ('2', '2'), ('3', '3')],
        string="Number of lines for product name",
        related='website_id.number_of_product_line',
        default='1',
        readonly=False,
        help="Number of lines to show in product name for shop.")
    is_auto_play = fields.Boolean(string='Slider Auto Play',
                                  related='website_id.is_auto_play',
                                  default=True,
                                  readonly=False)

    @api.onchange('is_load_more')
    def get_value_icon_load_more(self):
        if self.is_load_more == False:
            img_path = get_resource_path('theme_clarico_vega',
                                         'static/src/img/Loadmore.gif')
            with tools.file_open(img_path, 'rb') as f:
                self.load_more_image = base64.b64encode(f.read())

    @api.onchange('is_lazy_load')
    def get_value_icon_lazy_load(self):
        if self.is_lazy_load == False:
            img_path = get_resource_path('theme_clarico_vega',
                                         'static/src/img/Lazyload.gif')
            with tools.file_open(img_path, 'rb') as f:
                self.lazy_load_image = base64.b64encode(f.read())

    @api.onchange('module_sale_product_configurator')
    def install_child_modules(self):
        if self.module_sale_product_configurator:
            irModuleObject = self.env['ir.module.module']
            irModuleObject.update_list()
            emiproInheritModuleId = irModuleObject.search([
                ('state', '!=', 'installed'),
                ('name', '=', 'emipro_theme_sale_product_configurator')
            ])
            if emiproInheritModuleId:
                emiproInheritModuleId[0].button_immediate_install()
Beispiel #13
0
class Rating(models.Model):
    _name = "rating.rating"
    _description = "Rating"
    _order = 'write_date desc'
    _rec_name = 'res_name'
    _sql_constraints = [
        ('rating_range', 'check(rating >= 0 and rating <= 10)', 'Rating should be between 0 to 10'),
    ]

    @api.depends('res_model', 'res_id')
    def _compute_res_name(self):
        for rating in self:
            name = self.env[rating.res_model].sudo().browse(rating.res_id).name_get()
            rating.res_name = name and name[0][1] or ('%s/%s') % (rating.res_model, rating.res_id)

    @api.model
    def _default_access_token(self):
        return uuid.uuid4().hex

    res_name = fields.Char(string='Resource name', compute='_compute_res_name', store=True, help="The name of the rated resource.")
    res_model_id = fields.Many2one('ir.model', 'Related Document Model', index=True, ondelete='cascade', help='Model of the followed resource')
    res_model = fields.Char(string='Document Model', related='res_model_id.model', store=True, index=True, readonly=True)
    res_id = fields.Integer(string='Document', required=True, help="Identifier of the rated object", index=True)
    parent_res_name = fields.Char('Parent Document Name', compute='_compute_parent_res_name', store=True)
    parent_res_model_id = fields.Many2one('ir.model', 'Parent Related Document Model', index=True, ondelete='cascade')
    parent_res_model = fields.Char('Parent Document Model', store=True, related='parent_res_model_id.model', index=True, readonly=False)
    parent_res_id = fields.Integer('Parent Document', index=True)
    rated_partner_id = fields.Many2one('res.partner', string="Rated person", help="Owner of the rated resource")
    partner_id = fields.Many2one('res.partner', string='Customer', help="Author of the rating")
    rating = fields.Float(string="Rating Number", group_operator="avg", default=0, help="Rating value: 0=Unhappy, 10=Happy")
    rating_image = fields.Binary('Image', compute='_compute_rating_image')
    rating_text = fields.Selection([
        ('satisfied', 'Satisfied'),
        ('not_satisfied', 'Not satisfied'),
        ('highly_dissatisfied', 'Highly dissatisfied'),
        ('no_rating', 'No Rating yet')], string='Rating', store=True, compute='_compute_rating_text', readonly=True)
    feedback = fields.Text('Comment', help="Reason of the rating")
    message_id = fields.Many2one(
        'mail.message', string="Linked message",
        index=True, ondelete='cascade',
        help="Associated message when posting a review. Mainly used in website addons.")
    access_token = fields.Char('Security Token', default=_default_access_token, help="Access token to set the rating of the value")
    consumed = fields.Boolean(string="Filled Rating", help="Enabled if the rating has been filled.")

    @api.depends('parent_res_model', 'parent_res_id')
    def _compute_parent_res_name(self):
        for rating in self:
            name = False
            if rating.parent_res_model and rating.parent_res_id:
                name = self.env[rating.parent_res_model].sudo().browse(rating.parent_res_id).name_get()
                name = name and name[0][1] or ('%s/%s') % (rating.parent_res_model, rating.parent_res_id)
            rating.parent_res_name = name

    @api.depends('rating')
    def _compute_rating_image(self):
        # Due to some new widgets, we may have ratings different from 0/1/5/10 (e.g. slide.channel review)
        # Let us have some custom rounding while finding a better solution for images.
        for rating in self:
            rating_for_img = 0
            if rating.rating >= 8:
                rating_for_img = 10
            elif rating.rating > 3:
                rating_for_img = 5
            elif rating.rating >= 1:
                rating_for_img = 1
            try:
                image_path = get_resource_path('rating', 'static/src/img', 'rating_%s.png' % rating_for_img)
                rating.rating_image = base64.b64encode(open(image_path, 'rb').read())
            except (IOError, OSError):
                rating.rating_image = False

    @api.depends('rating')
    def _compute_rating_text(self):
        for rating in self:
            if rating.rating >= RATING_LIMIT_SATISFIED:
                rating.rating_text = 'satisfied'
            elif rating.rating > RATING_LIMIT_OK:
                rating.rating_text = 'not_satisfied'
            elif rating.rating >= RATING_LIMIT_MIN:
                rating.rating_text = 'highly_dissatisfied'
            else:
                rating.rating_text = 'no_rating'

    @api.model
    def create(self, values):
        if values.get('res_model_id') and values.get('res_id'):
            values.update(self._find_parent_data(values))
        return super(Rating, self).create(values)

    def write(self, values):
        if values.get('res_model_id') and values.get('res_id'):
            values.update(self._find_parent_data(values))
        return super(Rating, self).write(values)

    def unlink(self):
        # OPW-2181568: Delete the chatter message too
        self.env['mail.message'].search([('rating_ids', 'in', self.ids)]).unlink()
        return super(Rating, self).unlink()

    def _find_parent_data(self, values):
        """ Determine the parent res_model/res_id, based on the values to create or write """
        current_model_name = self.env['ir.model'].sudo().browse(values['res_model_id']).model
        current_record = self.env[current_model_name].browse(values['res_id'])
        data = {
            'parent_res_model_id': False,
            'parent_res_id': False,
        }
        if hasattr(current_record, '_rating_get_parent_field_name'):
            current_record_parent = current_record._rating_get_parent_field_name()
            if current_record_parent:
                parent_res_model = getattr(current_record, current_record_parent)
                data['parent_res_model_id'] = self.env['ir.model']._get(parent_res_model._name).id
                data['parent_res_id'] = parent_res_model.id
        return data

    def reset(self):
        for record in self:
            record.write({
                'rating': 0,
                'access_token': record._default_access_token(),
                'feedback': False,
                'consumed': False,
            })

    def action_open_rated_object(self):
        self.ensure_one()
        return {
            'type': 'ir.actions.act_window',
            'res_model': self.res_model,
            'res_id': self.res_id,
            'views': [[False, 'form']]
        }
Beispiel #14
0
class MrpWorkorder(models.Model):
    _name = 'mrp.workorder'
    _description = 'Work Order'
    _inherit = ['mail.thread', 'mail.activity.mixin', 'mrp.abstract.workorder']

    def _read_group_workcenter_id(self, workcenters, domain, order):
        workcenter_ids = self.env.context.get('default_workcenter_id')
        if not workcenter_ids:
            workcenter_ids = workcenters._search(
                [], order=order, access_rights_uid=SUPERUSER_ID)
        return workcenters.browse(workcenter_ids)

    name = fields.Char('Work Order',
                       required=True,
                       states={
                           'done': [('readonly', True)],
                           'cancel': [('readonly', True)]
                       })
    company_id = fields.Many2one('res.company',
                                 'Company',
                                 default=lambda self: self.env.company,
                                 required=True,
                                 index=True,
                                 readonly=True)
    workcenter_id = fields.Many2one('mrp.workcenter',
                                    'Work Center',
                                    required=True,
                                    states={
                                        'done': [('readonly', True)],
                                        'cancel': [('readonly', True)]
                                    },
                                    group_expand='_read_group_workcenter_id',
                                    check_company=True)
    working_state = fields.Selection('Workcenter Status',
                                     related='workcenter_id.working_state',
                                     readonly=False,
                                     help='Technical: used in views only')
    production_availability = fields.Selection(
        'Stock Availability',
        readonly=True,
        related='production_id.reservation_state',
        store=True,
        help='Technical: used in views and domains only.')
    production_state = fields.Selection('Production State',
                                        readonly=True,
                                        related='production_id.state',
                                        help='Technical: used in views only.')
    qty_production = fields.Float('Original Production Quantity',
                                  readonly=True,
                                  related='production_id.product_qty')
    qty_remaining = fields.Float('Quantity To Be Produced',
                                 compute='_compute_qty_remaining',
                                 digits='Product Unit of Measure')
    qty_produced = fields.Float(
        'Quantity',
        default=0.0,
        readonly=True,
        digits='Product Unit of Measure',
        help="The number of products already handled by this work order")
    is_produced = fields.Boolean(string="Has Been Produced",
                                 compute='_compute_is_produced')
    state = fields.Selection([('pending', 'Waiting for another WO'),
                              ('ready', 'Ready'), ('progress', 'In Progress'),
                              ('done', 'Finished'), ('cancel', 'Cancelled')],
                             string='Status',
                             default='pending')
    leave_id = fields.Many2one(
        'resource.calendar.leaves',
        help='Slot into workcenter calendar once planned',
        check_company=True)
    date_planned_start = fields.Datetime('Scheduled Date Start',
                                         compute='_compute_dates_planned',
                                         inverse='_set_dates_planned',
                                         states={
                                             'done': [('readonly', True)],
                                             'cancel': [('readonly', True)]
                                         },
                                         store=True,
                                         tracking=True)
    date_planned_finished = fields.Datetime('Scheduled Date Finished',
                                            compute='_compute_dates_planned',
                                            inverse='_set_dates_planned',
                                            states={
                                                'done': [('readonly', True)],
                                                'cancel': [('readonly', True)]
                                            },
                                            store=True,
                                            tracking=True)
    date_start = fields.Datetime('Effective Start Date',
                                 states={
                                     'done': [('readonly', True)],
                                     'cancel': [('readonly', True)]
                                 })
    date_finished = fields.Datetime('Effective End Date',
                                    states={
                                        'done': [('readonly', True)],
                                        'cancel': [('readonly', True)]
                                    })

    duration_expected = fields.Float('Expected Duration',
                                     digits=(16, 2),
                                     states={
                                         'done': [('readonly', True)],
                                         'cancel': [('readonly', True)]
                                     },
                                     help="Expected duration (in minutes)")
    duration = fields.Float('Real Duration',
                            compute='_compute_duration',
                            readonly=True,
                            store=True)
    duration_unit = fields.Float('Duration Per Unit',
                                 compute='_compute_duration',
                                 readonly=True,
                                 store=True)
    duration_percent = fields.Integer('Duration Deviation (%)',
                                      compute='_compute_duration',
                                      group_operator="avg",
                                      readonly=True,
                                      store=True)
    progress = fields.Float('Progress Done (%)',
                            digits=(16, 2),
                            compute='_compute_progress')

    operation_id = fields.Many2one('mrp.routing.workcenter',
                                   'Operation',
                                   check_company=True)
    # Should be used differently as BoM can change in the meantime
    worksheet = fields.Binary('Worksheet',
                              related='operation_id.worksheet',
                              readonly=True)
    worksheet_type = fields.Selection('Worksheet Type',
                                      related='operation_id.worksheet_type',
                                      readonly=True)
    worksheet_google_slide = fields.Char(
        'Worksheet URL',
        related='operation_id.worksheet_google_slide',
        readonly=True)
    move_raw_ids = fields.One2many('stock.move',
                                   'workorder_id',
                                   'Raw Moves',
                                   domain=[('raw_material_production_id', '!=',
                                            False),
                                           ('production_id', '=', False)])
    move_finished_ids = fields.One2many('stock.move',
                                        'workorder_id',
                                        'Finished Moves',
                                        domain=[('raw_material_production_id',
                                                 '=', False),
                                                ('production_id', '!=', False)
                                                ])
    move_line_ids = fields.One2many(
        'stock.move.line',
        'workorder_id',
        'Moves to Track',
        help=
        "Inventory moves for which you must scan a lot number at this work order"
    )
    finished_lot_id = fields.Many2one(
        'stock.production.lot',
        'Lot/Serial Number',
        domain="[('id', 'in', allowed_lots_domain)]",
        states={
            'done': [('readonly', True)],
            'cancel': [('readonly', True)]
        },
        check_company=True)
    time_ids = fields.One2many('mrp.workcenter.productivity', 'workorder_id')
    is_user_working = fields.Boolean(
        'Is the Current User Working',
        compute='_compute_working_users',
        help="Technical field indicating whether the current user is working. "
    )
    working_user_ids = fields.One2many(
        'res.users',
        string='Working user on this work order.',
        compute='_compute_working_users')
    last_working_user_id = fields.One2many(
        'res.users',
        string='Last user that worked on this work order.',
        compute='_compute_working_users')

    next_work_order_id = fields.Many2one('mrp.workorder',
                                         "Next Work Order",
                                         check_company=True)
    scrap_ids = fields.One2many('stock.scrap', 'workorder_id')
    scrap_count = fields.Integer(compute='_compute_scrap_move_count',
                                 string='Scrap Move')
    production_date = fields.Datetime(
        'Production Date',
        related='production_id.date_planned_start',
        store=True,
        readonly=False)
    color = fields.Integer('Color', compute='_compute_color')
    capacity = fields.Float(
        'Capacity',
        default=1.0,
        help="Number of pieces that can be produced in parallel.")
    raw_workorder_line_ids = fields.One2many('mrp.workorder.line',
                                             'raw_workorder_id',
                                             string='Components')
    finished_workorder_line_ids = fields.One2many('mrp.workorder.line',
                                                  'finished_workorder_id',
                                                  string='By-products')
    allowed_lots_domain = fields.One2many(
        comodel_name='stock.production.lot',
        compute="_compute_allowed_lots_domain")

    # Both `date_planned_start` and `date_planned_finished` are related fields on `leave_id`. Let's say
    # we slide a workorder on a gantt view, a single call to write is made with both
    # fields Changes. As the ORM doesn't batch the write on related fields and instead
    # makes multiple call, the constraint check_dates() is raised.
    # That's why the compute and set methods are needed. to ensure the dates are updated
    # in the same time.
    @api.depends('leave_id')
    def _compute_dates_planned(self):
        for workorder in self:
            workorder.date_planned_start = workorder.leave_id.date_from
            workorder.date_planned_finished = workorder.leave_id.date_to

    def _set_dates_planned(self):
        date_from = self[0].date_planned_start
        date_to = self[0].date_planned_finished
        self.mapped('leave_id').write({
            'date_from': date_from,
            'date_to': date_to,
        })

    @api.onchange('finished_lot_id')
    def _onchange_finished_lot_id(self):
        """When the user changes the lot being currently produced, suggest
        a quantity to produce consistent with the previous workorders. """
        previous_wo = self.env['mrp.workorder'].search([('next_work_order_id',
                                                         '=', self.id)])
        if previous_wo:
            line = previous_wo.finished_workorder_line_ids.filtered(
                lambda line: line.product_id == self.product_id and line.lot_id
                == self.finished_lot_id)
            if line:
                self.qty_producing = line.qty_done

    @api.onchange('date_planned_finished')
    def _onchange_date_planned_finished(self):
        if self.date_planned_start and self.date_planned_finished:
            diff = self.date_planned_finished - self.date_planned_start
            self.duration_expected = diff.total_seconds() / 60

    @api.depends(
        'production_id.workorder_ids.finished_workorder_line_ids',
        'production_id.workorder_ids.finished_workorder_line_ids.qty_done',
        'production_id.workorder_ids.finished_workorder_line_ids.lot_id')
    def _compute_allowed_lots_domain(self):
        """ Check if all the finished products has been assigned to a serial
        number or a lot in other workorders. If yes, restrict the selectable lot
        to the lot/sn used in other workorders.
        """
        productions = self.mapped('production_id')
        treated = self.browse()
        for production in productions:
            if production.product_id.tracking == 'none':
                continue

            rounding = production.product_uom_id.rounding
            finished_workorder_lines = production.workorder_ids.mapped(
                'finished_workorder_line_ids').filtered(
                    lambda wl: wl.product_id == production.product_id)
            qties_done_per_lot = defaultdict(list)
            for finished_workorder_line in finished_workorder_lines:
                # It is possible to have finished workorder lines without a lot (eg using the dummy
                # test type). Ignore them when computing the allowed lots.
                if finished_workorder_line.lot_id:
                    qties_done_per_lot[
                        finished_workorder_line.lot_id.id].append(
                            finished_workorder_line.qty_done)

            qty_to_produce = production.product_qty
            allowed_lot_ids = self.env['stock.production.lot']
            qty_produced = sum(
                [max(qty_dones) for qty_dones in qties_done_per_lot.values()])
            if float_compare(qty_produced,
                             qty_to_produce,
                             precision_rounding=rounding) < 0:
                # If we haven't produced enough, all lots are available
                allowed_lot_ids = self.env['stock.production.lot'].search([
                    ('product_id', '=', production.product_id.id),
                    ('company_id', '=', production.company_id.id),
                ])
            else:
                # If we produced enough, only the already produced lots are available
                allowed_lot_ids = self.env['stock.production.lot'].browse(
                    qties_done_per_lot.keys())
            workorders = production.workorder_ids.filtered(
                lambda wo: wo.state not in ('done', 'cancel'))
            for workorder in workorders:
                if workorder.product_tracking == 'serial':
                    workorder.allowed_lots_domain = allowed_lot_ids - workorder.finished_workorder_line_ids.filtered(
                        lambda wl: wl.product_id == production.product_id
                    ).mapped('lot_id')
                else:
                    workorder.allowed_lots_domain = allowed_lot_ids
                treated |= workorder
        (self - treated).allowed_lots_domain = False

    def name_get(self):
        return [(wo.id, "%s - %s - %s" %
                 (wo.production_id.name, wo.product_id.name, wo.name))
                for wo in self]

    def unlink(self):
        # Removes references to workorder to avoid Validation Error
        (self.mapped('move_raw_ids') | self.mapped('move_finished_ids')).write(
            {'workorder_id': False})
        self.mapped('leave_id').unlink()
        return super(MrpWorkorder, self).unlink()

    @api.depends('production_id.product_qty', 'qty_produced')
    def _compute_is_produced(self):
        self.is_produced = False
        for order in self.filtered(lambda p: p.production_id):
            rounding = order.production_id.product_uom_id.rounding
            order.is_produced = float_compare(order.qty_produced,
                                              order.production_id.product_qty,
                                              precision_rounding=rounding) >= 0

    @api.depends('time_ids.duration', 'qty_produced')
    def _compute_duration(self):
        for order in self:
            order.duration = sum(order.time_ids.mapped('duration'))
            order.duration_unit = round(order.duration /
                                        max(order.qty_produced, 1),
                                        2)  # rounding 2 because it is a time
            if order.duration_expected:
                order.duration_percent = 100 * (
                    order.duration_expected -
                    order.duration) / order.duration_expected
            else:
                order.duration_percent = 0

    @api.depends('duration', 'duration_expected', 'state')
    def _compute_progress(self):
        for order in self:
            if order.state == 'done':
                order.progress = 100
            elif order.duration_expected:
                order.progress = order.duration * 100 / order.duration_expected
            else:
                order.progress = 0

    def _compute_working_users(self):
        """ Checks whether the current user is working, all the users currently working and the last user that worked. """
        for order in self:
            order.working_user_ids = [
                (4, order.id) for order in order.time_ids.filtered(
                    lambda time: not time.date_end).sorted(
                        'date_start').mapped('user_id')
            ]
            if order.working_user_ids:
                order.last_working_user_id = order.working_user_ids[-1]
            elif order.time_ids:
                order.last_working_user_id = order.time_ids.sorted(
                    'date_end')[-1].user_id
            else:
                order.last_working_user_id = False
            if order.time_ids.filtered(
                    lambda x: (x.user_id.id == self.env.user.id) and
                (not x.date_end) and (x.loss_type in
                                      ('productive', 'performance'))):
                order.is_user_working = True
            else:
                order.is_user_working = False

    def _compute_scrap_move_count(self):
        data = self.env['stock.scrap'].read_group(
            [('workorder_id', 'in', self.ids)], ['workorder_id'],
            ['workorder_id'])
        count_data = dict((item['workorder_id'][0], item['workorder_id_count'])
                          for item in data)
        for workorder in self:
            workorder.scrap_count = count_data.get(workorder.id, 0)

    @api.depends('date_planned_finished',
                 'production_id.date_planned_finished')
    def _compute_color(self):
        late_orders = self.filtered(
            lambda x: x.production_id.date_planned_finished and x.
            date_planned_finished and x.date_planned_finished > x.production_id
            .date_planned_finished)
        for order in late_orders:
            order.color = 4
        for order in (self - late_orders):
            order.color = 2

    @api.onchange('date_planned_start', 'duration_expected')
    def _onchange_date_planned_start(self):
        if self.date_planned_start and self.duration_expected:
            self.date_planned_finished = self.date_planned_start + relativedelta(
                minutes=self.duration_expected)

    def write(self, values):
        if 'production_id' in values:
            raise UserError(
                _('You cannot link this work order to another manufacturing order.'
                  ))
        if 'workcenter_id' in values:
            for workorder in self:
                if workorder.workcenter_id.id != values['workcenter_id']:
                    if workorder.state in ('progress', 'done', 'cancel'):
                        raise UserError(
                            _('You cannot change the workcenter of a work order that is in progress or done.'
                              ))
                    workorder.leave_id.resource_id = self.env[
                        'mrp.workcenter'].browse(
                            values['workcenter_id']).resource_id
        if list(values.keys()) != ['time_ids'] and any(
                workorder.state == 'done' for workorder in self):
            raise UserError(_('You can not change the finished work order.'))
        if 'date_planned_start' in values or 'date_planned_finished' in values:
            for workorder in self:
                start_date = fields.Datetime.to_datetime(
                    values.get(
                        'date_planned_start')) or workorder.date_planned_start
                end_date = fields.Datetime.to_datetime(
                    values.get('date_planned_finished')
                ) or workorder.date_planned_finished
                if start_date and end_date and start_date > end_date:
                    raise UserError(
                        _('The planned end date of the work order cannot be prior to the planned start date, please correct this to save the work order.'
                          ))
                # Update MO dates if the start date of the first WO or the
                # finished date of the last WO is update.
                if workorder == workorder.production_id.workorder_ids[
                        0] and 'date_planned_start' in values:
                    workorder.production_id.with_context(
                        force_date=True).write({
                            'date_planned_start':
                            fields.Datetime.to_datetime(
                                values['date_planned_start'])
                        })
                if workorder == workorder.production_id.workorder_ids[
                        -1] and 'date_planned_finished' in values:
                    workorder.production_id.with_context(
                        force_date=True).write({
                            'date_planned_finished':
                            fields.Datetime.to_datetime(
                                values['date_planned_finished'])
                        })
        return super(MrpWorkorder, self).write(values)

    def _generate_wo_lines(self):
        """ Generate workorder line """
        self.ensure_one()
        moves = (self.move_raw_ids | self.move_finished_ids
                 ).filtered(lambda move: move.state not in ('done', 'cancel'))
        for move in moves:
            qty_to_consume = self._prepare_component_quantity(
                move, self.qty_producing)
            line_values = self._generate_lines_values(move, qty_to_consume)
            self.env['mrp.workorder.line'].create(line_values)

    def _apply_update_workorder_lines(self):
        """ update existing line on the workorder. It could be trigger manually
        after a modification of qty_producing.
        """
        self.ensure_one()
        line_values = self._update_workorder_lines()
        self.env['mrp.workorder.line'].create(line_values['to_create'])
        if line_values['to_delete']:
            line_values['to_delete'].unlink()
        for line, vals in line_values['to_update'].items():
            line.write(vals)

    def _refresh_wo_lines(self):
        """ Modify exisiting workorder line in order to match the reservation on
        stock move line. The strategy is to remove the line that were not
        processed yet then call _generate_lines_values that recreate workorder
        line depending the reservation.
        """
        for workorder in self:
            raw_moves = workorder.move_raw_ids.filtered(
                lambda move: move.state not in ('done', 'cancel'))
            wl_to_unlink = self.env['mrp.workorder.line']
            for move in raw_moves:
                rounding = move.product_uom.rounding
                qty_already_consumed = 0.0
                workorder_lines = workorder.raw_workorder_line_ids.filtered(
                    lambda w: w.move_id == move)
                for wl in workorder_lines:
                    if not wl.qty_done:
                        wl_to_unlink |= wl
                        continue

                    qty_already_consumed += wl.qty_done
                qty_to_consume = self._prepare_component_quantity(
                    move, workorder.qty_producing)
                wl_to_unlink.unlink()
                if float_compare(qty_to_consume,
                                 qty_already_consumed,
                                 precision_rounding=rounding) > 0:
                    line_values = workorder._generate_lines_values(
                        move, qty_to_consume - qty_already_consumed)
                    self.env['mrp.workorder.line'].create(line_values)

    def _defaults_from_finished_workorder_line(self, reference_lot_lines):
        for r_line in reference_lot_lines:
            # see which lot we could suggest and its related qty_producing
            if not r_line.lot_id:
                continue
            candidates = self.finished_workorder_line_ids.filtered(
                lambda line: line.lot_id == r_line.lot_id)
            rounding = self.product_uom_id.rounding
            if not candidates:
                self.write({
                    'finished_lot_id': r_line.lot_id.id,
                    'qty_producing': r_line.qty_done,
                })
                return True
            elif float_compare(candidates.qty_done,
                               r_line.qty_done,
                               precision_rounding=rounding) < 0:
                self.write({
                    'finished_lot_id':
                    r_line.lot_id.id,
                    'qty_producing':
                    r_line.qty_done - candidates.qty_done,
                })
                return True
        return False

    def record_production(self):
        if not self:
            return True

        self.ensure_one()
        self._check_company()
        if float_compare(self.qty_producing,
                         0,
                         precision_rounding=self.product_uom_id.rounding) <= 0:
            raise UserError(
                _('Please set the quantity you are currently producing. It should be different from zero.'
                  ))

        # If last work order, then post lots used
        if not self.next_work_order_id:
            self._update_finished_move()

        # Transfer quantities from temporary to final move line or make them final
        self._update_moves()

        # Transfer lot (if present) and quantity produced to a finished workorder line
        if self.product_tracking != 'none':
            self._create_or_update_finished_line()

        # Update workorder quantity produced
        self.qty_produced += self.qty_producing

        # Suggest a finished lot on the next workorder
        if self.next_work_order_id and self.product_tracking != 'none' and (
                not self.next_work_order_id.finished_lot_id
                or self.next_work_order_id.finished_lot_id
                == self.finished_lot_id):
            self.next_work_order_id._defaults_from_finished_workorder_line(
                self.finished_workorder_line_ids)
            # As we may have changed the quantity to produce on the next workorder,
            # make sure to update its wokorder lines
            self.next_work_order_id._apply_update_workorder_lines()

        # One a piece is produced, you can launch the next work order
        self._start_nextworkorder()

        # Test if the production is done
        rounding = self.production_id.product_uom_id.rounding
        if float_compare(self.qty_produced,
                         self.production_id.product_qty,
                         precision_rounding=rounding) < 0:
            previous_wo = self.env['mrp.workorder']
            if self.product_tracking != 'none':
                previous_wo = self.env['mrp.workorder'].search([
                    ('next_work_order_id', '=', self.id)
                ])
            candidate_found_in_previous_wo = False
            if previous_wo:
                candidate_found_in_previous_wo = self._defaults_from_finished_workorder_line(
                    previous_wo.finished_workorder_line_ids)
            if not candidate_found_in_previous_wo:
                # self is the first workorder
                self.qty_producing = self.qty_remaining
                self.finished_lot_id = False
                if self.product_tracking == 'serial':
                    self.qty_producing = 1

            self._apply_update_workorder_lines()
        else:
            self.qty_producing = 0
            self.button_finish()
        return True

    def _get_byproduct_move_to_update(self):
        return self.production_id.move_finished_ids.filtered(
            lambda x: (x.product_id.id != self.production_id.product_id.id) and
            (x.state not in ('done', 'cancel')))

    def _create_or_update_finished_line(self):
        """
        1. Check that the final lot and the quantity producing is valid regarding
            other workorders of this production
        2. Save final lot and quantity producing to suggest on next workorder
        """
        self.ensure_one()
        final_lot_quantity = self.qty_production
        rounding = self.product_uom_id.rounding
        # Get the max quantity possible for current lot in other workorders
        for workorder in (self.production_id.workorder_ids - self):
            # We add the remaining quantity to the produced quantity for the
            # current lot. For 5 finished products: if in the first wo it
            # creates 4 lot A and 1 lot B and in the second it create 3 lot A
            # and it remains 2 units to product, it could produce 5 lot A.
            # In this case we select 4 since it would conflict with the first
            # workorder otherwise.
            line = workorder.finished_workorder_line_ids.filtered(
                lambda line: line.lot_id == self.finished_lot_id)
            line_without_lot = workorder.finished_workorder_line_ids.filtered(
                lambda line: line.product_id == workorder.product_id and
                not line.lot_id)
            quantity_remaining = workorder.qty_remaining + line_without_lot.qty_done
            quantity = line.qty_done + quantity_remaining
            if line and float_compare(quantity,
                                      final_lot_quantity,
                                      precision_rounding=rounding) <= 0:
                final_lot_quantity = quantity
            elif float_compare(quantity_remaining,
                               final_lot_quantity,
                               precision_rounding=rounding) < 0:
                final_lot_quantity = quantity_remaining

        # final lot line for this lot on this workorder.
        current_lot_lines = self.finished_workorder_line_ids.filtered(
            lambda line: line.lot_id == self.finished_lot_id)

        # this lot has already been produced
        if float_compare(final_lot_quantity,
                         current_lot_lines.qty_done + self.qty_producing,
                         precision_rounding=rounding) < 0:
            raise UserError(
                _('You have produced %s %s of lot %s in the previous workorder. You are trying to produce %s in this one'
                  ) % (final_lot_quantity, self.product_id.uom_id.name,
                       self.finished_lot_id.name,
                       current_lot_lines.qty_done + self.qty_producing))

        # Update workorder line that regiter final lot created
        if not current_lot_lines:
            current_lot_lines = self.env['mrp.workorder.line'].create({
                'finished_workorder_id':
                self.id,
                'product_id':
                self.product_id.id,
                'lot_id':
                self.finished_lot_id.id,
                'qty_done':
                self.qty_producing,
            })
        else:
            current_lot_lines.qty_done += self.qty_producing

    def _start_nextworkorder(self):
        rounding = self.product_id.uom_id.rounding
        if self.next_work_order_id.state == 'pending' and (
            (self.operation_id.batch == 'no'
             and float_compare(self.qty_production,
                               self.qty_produced,
                               precision_rounding=rounding) <= 0) or
            (self.operation_id.batch == 'yes'
             and float_compare(self.operation_id.batch_size,
                               self.qty_produced,
                               precision_rounding=rounding) <= 0)):
            self.next_work_order_id.state = 'ready'

    def button_start(self):
        self.ensure_one()
        # As button_start is automatically called in the new view
        if self.state in ('done', 'cancel'):
            return True

        # Need a loss in case of the real time exceeding the expected
        timeline = self.env['mrp.workcenter.productivity']
        if self.duration < self.duration_expected:
            loss_id = self.env['mrp.workcenter.productivity.loss'].search(
                [('loss_type', '=', 'productive')], limit=1)
            if not len(loss_id):
                raise UserError(
                    _("You need to define at least one productivity loss in the category 'Productivity'. Create one from the Manufacturing app, menu: Configuration / Productivity Losses."
                      ))
        else:
            loss_id = self.env['mrp.workcenter.productivity.loss'].search(
                [('loss_type', '=', 'performance')], limit=1)
            if not len(loss_id):
                raise UserError(
                    _("You need to define at least one productivity loss in the category 'Performance'. Create one from the Manufacturing app, menu: Configuration / Productivity Losses."
                      ))
        if self.production_id.state != 'progress':
            self.production_id.write({
                'date_start': datetime.now(),
            })
        timeline.create({
            'workorder_id': self.id,
            'workcenter_id': self.workcenter_id.id,
            'description': _('Time Tracking: ') + self.env.user.name,
            'loss_id': loss_id[0].id,
            'date_start': datetime.now(),
            'user_id':
            self.env.user.id,  # FIXME sle: can be inconsistent with company_id
            'company_id': self.company_id.id,
        })
        if self.state == 'progress':
            return True
        else:
            start_date = datetime.now()
            vals = {
                'state': 'progress',
                'date_start': start_date,
                'date_planned_start': start_date,
            }
            if self.date_planned_finished < start_date:
                vals['date_planned_finished'] = start_date
            return self.write(vals)

    def button_finish(self):
        self.ensure_one()
        self.end_all()
        end_date = datetime.now()
        return self.write({
            'state': 'done',
            'date_finished': end_date,
            'date_planned_finished': end_date
        })

    def end_previous(self, doall=False):
        """
        @param: doall:  This will close all open time lines on the open work orders when doall = True, otherwise
        only the one of the current user
        """
        # TDE CLEANME
        timeline_obj = self.env['mrp.workcenter.productivity']
        domain = [('workorder_id', 'in', self.ids), ('date_end', '=', False)]
        if not doall:
            domain.append(('user_id', '=', self.env.user.id))
        not_productive_timelines = timeline_obj.browse()
        for timeline in timeline_obj.search(domain,
                                            limit=None if doall else 1):
            wo = timeline.workorder_id
            if wo.duration_expected <= wo.duration:
                if timeline.loss_type == 'productive':
                    not_productive_timelines += timeline
                timeline.write({'date_end': fields.Datetime.now()})
            else:
                maxdate = fields.Datetime.from_string(
                    timeline.date_start) + relativedelta(
                        minutes=wo.duration_expected - wo.duration)
                enddate = datetime.now()
                if maxdate > enddate:
                    timeline.write({'date_end': enddate})
                else:
                    timeline.write({'date_end': maxdate})
                    not_productive_timelines += timeline.copy({
                        'date_start':
                        maxdate,
                        'date_end':
                        enddate
                    })
        if not_productive_timelines:
            loss_id = self.env['mrp.workcenter.productivity.loss'].search(
                [('loss_type', '=', 'performance')], limit=1)
            if not len(loss_id):
                raise UserError(
                    _("You need to define at least one unactive productivity loss in the category 'Performance'. Create one from the Manufacturing app, menu: Configuration / Productivity Losses."
                      ))
            not_productive_timelines.write({'loss_id': loss_id.id})
        return True

    def end_all(self):
        return self.end_previous(doall=True)

    def button_pending(self):
        self.end_previous()
        return True

    def button_unblock(self):
        for order in self:
            order.workcenter_id.unblock()
        return True

    def action_cancel(self):
        self.leave_id.unlink()
        return self.write({'state': 'cancel'})

    def button_done(self):
        if any([x.state in ('done', 'cancel') for x in self]):
            raise UserError(
                _('A Manufacturing Order is already done or cancelled.'))
        self.end_all()
        end_date = datetime.now()
        return self.write({
            'state': 'done',
            'date_finished': end_date,
            'date_planned_finished': end_date,
        })

    def button_scrap(self):
        self.ensure_one()
        return {
            'name': _('Scrap'),
            'view_mode': 'form',
            'res_model': 'stock.scrap',
            'view_id': self.env.ref('stock.stock_scrap_form_view2').id,
            'type': 'ir.actions.act_window',
            'context': {
                'default_company_id':
                self.production_id.company_id.id,
                'default_workorder_id':
                self.id,
                'default_production_id':
                self.production_id.id,
                'product_ids':
                (self.production_id.move_raw_ids.filtered(
                    lambda x: x.state not in ('done', 'cancel'))
                 | self.production_id.move_finished_ids.filtered(
                     lambda x: x.state == 'done')).mapped('product_id').ids
            },
            'target': 'new',
        }

    def action_see_move_scrap(self):
        self.ensure_one()
        action = self.env.ref('stock.action_stock_scrap').read()[0]
        action['domain'] = [('workorder_id', '=', self.id)]
        return action

    @api.depends('qty_production', 'qty_produced')
    def _compute_qty_remaining(self):
        for wo in self:
            wo.qty_remaining = float_round(
                wo.qty_production - wo.qty_produced,
                precision_rounding=wo.production_id.product_uom_id.rounding)
Beispiel #15
0
class Slide(models.Model):
    _name = 'slide.slide'
    _inherit = [
        'mail.thread', 'image.mixin', 'website.seo.metadata',
        'website.published.mixin'
    ]
    _description = 'Slides'
    _mail_post_access = 'read'
    _order_by_strategy = {
        'sequence': 'sequence asc, id asc',
        'most_viewed': 'total_views desc',
        'most_voted': 'likes desc',
        'latest': 'date_published desc',
    }
    _order = 'sequence asc, is_category asc, id asc'

    # description
    name = fields.Char('Title', required=True, translate=True)
    active = fields.Boolean(default=True)
    sequence = fields.Integer('Sequence', default=0)
    user_id = fields.Many2one('res.users',
                              string='Uploaded by',
                              default=lambda self: self.env.uid)
    description = fields.Text('Description', translate=True)
    channel_id = fields.Many2one('slide.channel',
                                 string="Course",
                                 required=True)
    tag_ids = fields.Many2many('slide.tag',
                               'rel_slide_tag',
                               'slide_id',
                               'tag_id',
                               string='Tags')
    is_preview = fields.Boolean(
        'Allow Preview',
        default=False,
        help=
        "The course is accessible by anyone : the users don't need to join the channel to access the content of the course."
    )
    completion_time = fields.Float(
        'Duration',
        digits=(10, 4),
        help="The estimated completion time for this slide")
    # Categories
    is_category = fields.Boolean('Is a category', default=False)
    category_id = fields.Many2one('slide.slide',
                                  string="Section",
                                  compute="_compute_category_id",
                                  store=True)
    slide_ids = fields.One2many('slide.slide', "category_id", string="Slides")
    # subscribers
    partner_ids = fields.Many2many('res.partner',
                                   'slide_slide_partner',
                                   'slide_id',
                                   'partner_id',
                                   string='Subscribers',
                                   groups='website.group_website_publisher',
                                   copy=False)
    slide_partner_ids = fields.One2many(
        'slide.slide.partner',
        'slide_id',
        string='Subscribers information',
        groups='website.group_website_publisher',
        copy=False)
    user_membership_id = fields.Many2one(
        'slide.slide.partner',
        string="Subscriber information",
        compute='_compute_user_membership_id',
        compute_sudo=False,
        help="Subscriber information for the current logged in user")
    # Quiz related fields
    question_ids = fields.One2many("slide.question",
                                   "slide_id",
                                   string="Questions")
    questions_count = fields.Integer(string="Numbers of Questions",
                                     compute='_compute_questions_count')
    quiz_first_attempt_reward = fields.Integer("First attempt reward",
                                               default=10)
    quiz_second_attempt_reward = fields.Integer("Second attempt reward",
                                                default=7)
    quiz_third_attempt_reward = fields.Integer(
        "Third attempt reward",
        default=5,
    )
    quiz_fourth_attempt_reward = fields.Integer(
        "Reward for every attempt after the third try", default=2)
    # content
    slide_type = fields.Selection(
        [('infographic', 'Infographic'), ('webpage', 'Web Page'),
         ('presentation', 'Presentation'), ('document', 'Document'),
         ('video', 'Video'), ('quiz', "Quiz")],
        string='Type',
        required=True,
        default='document',
        help=
        "The document type will be set automatically based on the document URL and properties (e.g. height and width for presentation and document)."
    )
    datas = fields.Binary('Content', attachment=True)
    url = fields.Char('Document URL', help="Youtube or Google Document URL")
    document_id = fields.Char('Document ID',
                              help="Youtube or Google Document ID")
    link_ids = fields.One2many('slide.slide.link',
                               'slide_id',
                               string="External URL for this slide")
    mime_type = fields.Char('Mime-type')
    html_content = fields.Html(
        "HTML Content",
        help="Custom HTML content for slides of type 'Web Page'.",
        translate=True)
    # website
    website_id = fields.Many2one(related='channel_id.website_id',
                                 readonly=True)
    date_published = fields.Datetime('Publish Date',
                                     readonly=True,
                                     tracking=True)
    likes = fields.Integer('Likes',
                           compute='_compute_user_info',
                           store=True,
                           compute_sudo=False)
    dislikes = fields.Integer('Dislikes',
                              compute='_compute_user_info',
                              store=True,
                              compute_sudo=False)
    user_vote = fields.Integer('User vote',
                               compute='_compute_user_info',
                               compute_sudo=False)
    embed_code = fields.Text('Embed Code',
                             readonly=True,
                             compute='_compute_embed_code')
    # views
    embedcount_ids = fields.One2many('slide.embed',
                                     'slide_id',
                                     string="Embed Count")
    slide_views = fields.Integer('# of Website Views',
                                 store=True,
                                 compute="_compute_slide_views")
    public_views = fields.Integer('# of Public Views', copy=False)
    total_views = fields.Integer("Views",
                                 default="0",
                                 compute='_compute_total',
                                 store=True)
    # comments
    comments_count = fields.Integer('Number of comments',
                                    compute="_compute_comments_count")
    # channel
    channel_type = fields.Selection(related="channel_id.channel_type",
                                    string="Channel type")
    channel_allow_comment = fields.Boolean(related="channel_id.allow_comment",
                                           string="Allows comment")
    # Statistics in case the slide is a category
    nbr_presentation = fields.Integer("Number of Presentations",
                                      compute='_compute_slides_statistics',
                                      store=True)
    nbr_document = fields.Integer("Number of Documents",
                                  compute='_compute_slides_statistics',
                                  store=True)
    nbr_video = fields.Integer("Number of Videos",
                               compute='_compute_slides_statistics',
                               store=True)
    nbr_infographic = fields.Integer("Number of Infographics",
                                     compute='_compute_slides_statistics',
                                     store=True)
    nbr_webpage = fields.Integer("Number of Webpages",
                                 compute='_compute_slides_statistics',
                                 store=True)
    nbr_quiz = fields.Integer("Number of Quizs",
                              compute="_compute_slides_statistics",
                              store=True)
    total_slides = fields.Integer(compute='_compute_slides_statistics',
                                  store=True)

    _sql_constraints = [(
        'exclusion_html_content_and_url',
        "CHECK(html_content IS NULL OR url IS NULL)",
        "A slide is either filled with a document url or HTML content. Not both."
    )]

    @api.depends('channel_id.slide_ids.is_category',
                 'channel_id.slide_ids.sequence')
    def _compute_category_id(self):
        """ Will take all the slides of the channel for which the index is higher
        than the index of this category and lower than the index of the next category.

        Lists are manually sorted because when adding a new browse record order
        will not be correct as the added slide would actually end up at the
        first place no matter its sequence."""
        self.category_id = False  # initialize whatever the state

        channel_slides = {}
        for slide in self:
            if slide.channel_id.id not in channel_slides:
                channel_slides[
                    slide.channel_id.id] = slide.channel_id.slide_ids

        for cid, slides in channel_slides.items():
            current_category = self.env['slide.slide']
            slide_list = list(slides)
            slide_list.sort(key=lambda s: (s.sequence, not s.is_category))
            for slide in slide_list:
                if slide.is_category:
                    current_category = slide
                elif slide.category_id != current_category:
                    slide.category_id = current_category.id

    @api.depends('question_ids')
    def _compute_questions_count(self):
        for slide in self:
            slide.questions_count = len(slide.question_ids)

    @api.depends('website_message_ids.res_id', 'website_message_ids.model',
                 'website_message_ids.message_type')
    def _compute_comments_count(self):
        for slide in self:
            slide.comments_count = len(slide.website_message_ids)

    @api.depends('slide_views', 'public_views')
    def _compute_total(self):
        for record in self:
            record.total_views = record.slide_views + record.public_views

    @api.depends('slide_partner_ids.vote')
    @api.depends_context('uid')
    def _compute_user_info(self):
        slide_data = dict.fromkeys(
            self.ids, dict({
                'likes': 0,
                'dislikes': 0,
                'user_vote': False
            }))
        slide_partners = self.env['slide.slide.partner'].sudo().search([
            ('slide_id', 'in', self.ids)
        ])
        for slide_partner in slide_partners:
            if slide_partner.vote == 1:
                slide_data[slide_partner.slide_id.id]['likes'] += 1
                if slide_partner.partner_id == self.env.user.partner_id:
                    slide_data[slide_partner.slide_id.id]['user_vote'] = 1
            elif slide_partner.vote == -1:
                slide_data[slide_partner.slide_id.id]['dislikes'] += 1
                if slide_partner.partner_id == self.env.user.partner_id:
                    slide_data[slide_partner.slide_id.id]['user_vote'] = -1
        for slide in self:
            slide.update(slide_data[slide.id])

    @api.depends('slide_partner_ids.slide_id')
    def _compute_slide_views(self):
        # TODO awa: tried compute_sudo, for some reason it doesn't work in here...
        read_group_res = self.env['slide.slide.partner'].sudo().read_group(
            [('slide_id', 'in', self.ids)], ['slide_id'], groupby=['slide_id'])
        mapped_data = dict((res['slide_id'][0], res['slide_id_count'])
                           for res in read_group_res)
        for slide in self:
            slide.slide_views = mapped_data.get(slide.id, 0)

    @api.depends('slide_ids.sequence', 'slide_ids.slide_type',
                 'slide_ids.is_published', 'slide_ids.is_category')
    def _compute_slides_statistics(self):
        # Do not use dict.fromkeys(self.ids, dict()) otherwise it will use the same dictionnary for all keys.
        # Therefore, when updating the dict of one key, it updates the dict of all keys.
        keys = [
            'nbr_%s' % slide_type for slide_type in
            self.env['slide.slide']._fields['slide_type'].get_values(self.env)
        ]
        default_vals = dict((key, 0) for key in keys + ['total_slides'])

        res = self.env['slide.slide'].read_group(
            [('is_published', '=', True), ('category_id', 'in', self.ids),
             ('is_category', '=', False)], ['category_id', 'slide_type'],
            ['category_id', 'slide_type'],
            lazy=False)

        type_stats = self._compute_slides_statistics_type(res)

        for record in self:
            record.update(type_stats.get(record._origin.id, default_vals))

    def _compute_slides_statistics_type(self, read_group_res):
        """ Compute statistics based on all existing slide types """
        slide_types = self.env['slide.slide']._fields['slide_type'].get_values(
            self.env)
        keys = ['nbr_%s' % slide_type for slide_type in slide_types]
        result = dict((cid, dict((key, 0) for key in keys + ['total_slides']))
                      for cid in self.ids)
        for res_group in read_group_res:
            cid = res_group['category_id'][0]
            slide_type = res_group.get('slide_type')
            if slide_type:
                slide_type_count = res_group.get('__count', 0)
                result[cid]['nbr_%s' % slide_type] = slide_type_count
                result[cid]['total_slides'] += slide_type_count
        return result

    @api.depends('slide_partner_ids.partner_id')
    @api.depends('uid')
    def _compute_user_membership_id(self):
        slide_partners = self.env['slide.slide.partner'].sudo().search([
            ('slide_id', 'in', self.ids),
            ('partner_id', '=', self.env.user.partner_id.id),
        ])

        for record in self:
            record.user_membership_id = next(
                (slide_partner for slide_partner in slide_partners
                 if slide_partner.slide_id == record),
                self.env['slide.slide.partner'])

    @api.depends('document_id', 'slide_type', 'mime_type')
    def _compute_embed_code(self):
        base_url = request and request.httprequest.url_root or self.env[
            'ir.config_parameter'].sudo().get_param('web.base.url')
        if base_url[-1] == '/':
            base_url = base_url[:-1]
        for record in self:
            if record.datas and (not record.document_id or record.slide_type
                                 in ['document', 'presentation']):
                slide_url = base_url + url_for(
                    '/slides/embed/%s?page=1' % record.id)
                record.embed_code = '<iframe src="%s" class="o_wslides_iframe_viewer" allowFullScreen="true" height="%s" width="%s" frameborder="0"></iframe>' % (
                    slide_url, 315, 420)
            elif record.slide_type == 'video' and record.document_id:
                if not record.mime_type:
                    # embed youtube video
                    query = urls.url_parse(record.url).query
                    query = query + '&theme=light' if query else 'theme=light'
                    record.embed_code = '<iframe src="//www.youtube.com/embed/%s?%s" allowFullScreen="true" frameborder="0"></iframe>' % (
                        record.document_id, query)
                else:
                    # embed google doc video
                    record.embed_code = '<iframe src="//drive.google.com/file/d/%s/preview" allowFullScreen="true" frameborder="0"></iframe>' % (
                        record.document_id)
            else:
                record.embed_code = False

    @api.onchange('url')
    def _on_change_url(self):
        self.ensure_one()
        if self.url:
            res = self._parse_document_url(self.url)
            if res.get('error'):
                raise Warning(
                    _('Could not fetch data from url. Document or access right not available:\n%s'
                      ) % res['error'])
            values = res['values']
            if not values.get('document_id'):
                raise Warning(
                    _('Please enter valid Youtube or Google Doc URL'))
            for key, value in values.items():
                self[key] = value

    @api.onchange('datas')
    def _on_change_datas(self):
        """ For PDFs, we assume that it takes 5 minutes to read a page. """
        if self.datas:
            data = base64.b64decode(self.datas)
            if data.startswith(b'%PDF-'):
                pdf = PyPDF2.PdfFileReader(io.BytesIO(data),
                                           overwriteWarnings=False)
                self.completion_time = (5 * len(pdf.pages)) / 60

    @api.depends('name', 'channel_id.website_id.domain')
    def _compute_website_url(self):
        # TDE FIXME: clena this link.tracker strange stuff
        super(Slide, self)._compute_website_url()
        for slide in self:
            if slide.id:  # avoid to perform a slug on a not yet saved record in case of an onchange.
                base_url = slide.channel_id.get_base_url()
                # link_tracker is not in dependencies, so use it to shorten url only if installed.
                if self.env.registry.get('link.tracker'):
                    url = self.env['link.tracker'].sudo().create({
                        'url':
                        '%s/slides/slide/%s' % (base_url, slug(slide)),
                        'title':
                        slide.name,
                    }).short_url
                else:
                    url = '%s/slides/slide/%s' % (base_url, slug(slide))
                slide.website_url = url

    def get_backend_menu_id(self):
        return self.env.ref('website_slides.website_slides_menu_root').id

    @api.depends('channel_id.can_publish')
    def _compute_can_publish(self):
        for record in self:
            record.can_publish = record.channel_id.can_publish

    @api.model
    def _get_can_publish_error_message(self):
        return _(
            "Publishing is restricted to the responsible of training courses or members of the publisher group for documentation courses"
        )

    # ---------------------------------------------------------
    # ORM Overrides
    # ---------------------------------------------------------

    @api.model
    def create(self, values):
        # Do not publish slide if user has not publisher rights
        channel = self.env['slide.channel'].browse(values['channel_id'])
        if not channel.can_publish:
            # 'website_published' is handled by mixin
            values['date_published'] = False

        if values.get('slide_type'
                      ) == 'infographic' and not values.get('image_1920'):
            values['image_1920'] = values['datas']
        if values.get('is_category'):
            values['is_preview'] = True
            values['is_published'] = True
        if values.get('is_published') and not values.get('date_published'):
            values['date_published'] = datetime.datetime.now()
        if values.get('url') and not values.get('document_id'):
            doc_data = self._parse_document_url(values['url']).get(
                'values', dict())
            for key, value in doc_data.items():
                values.setdefault(key, value)

        slide = super(Slide, self).create(values)

        if slide.is_published and not slide.is_category:
            slide._post_publication()
        return slide

    def write(self, values):
        if values.get('url') and values['url'] != self.url:
            doc_data = self._parse_document_url(values['url']).get(
                'values', dict())
            for key, value in doc_data.items():
                values.setdefault(key, value)
        if values.get('is_category'):
            values['is_preview'] = True
            values['is_published'] = True

        res = super(Slide, self).write(values)
        if values.get('is_published'):
            self.date_published = datetime.datetime.now()
            self._post_publication()

        if 'is_published' in values or 'active' in values:
            # if the slide is published/unpublished, recompute the completion for the partners
            self.slide_partner_ids._set_completed_callback()

        return res

    @api.returns('self', lambda value: value.id)
    def copy(self, default=None):
        """Sets the sequence to zero so that it always lands at the beginning
        of the newly selected course as an uncategorized slide"""
        rec = super(Slide, self).copy(default)
        rec.sequence = 0
        return rec

    def unlink(self):
        if self.question_ids and self.channel_id.channel_partner_ids:
            raise UserError(
                _("People already took this quiz. To keep course progression it should not be deleted."
                  ))
        super(Slide, self).unlink()

    # ---------------------------------------------------------
    # Mail/Rating
    # ---------------------------------------------------------

    @api.returns('mail.message', lambda value: value.id)
    def message_post(self, *, message_type='notification', **kwargs):
        self.ensure_one()
        if message_type == 'comment' and not self.channel_id.can_comment:  # user comments have a restriction on karma
            raise AccessError(_('Not enough karma to comment'))
        return super(Slide, self).message_post(message_type=message_type,
                                               **kwargs)

    def get_access_action(self, access_uid=None):
        """ Instead of the classic form view, redirect to website if it is published. """
        self.ensure_one()
        if self.website_published:
            return {
                'type': 'ir.actions.act_url',
                'url': '%s' % self.website_url,
                'target': 'self',
                'target_type': 'public',
                'res_id': self.id,
            }
        return super(Slide, self).get_access_action(access_uid)

    def _notify_get_groups(self):
        """ Add access button to everyone if the document is active. """
        groups = super(Slide, self)._notify_get_groups()

        if self.website_published:
            for group_name, group_method, group_data in groups:
                group_data['has_button_access'] = True

        return groups

    # ---------------------------------------------------------
    # Business Methods
    # ---------------------------------------------------------

    def _post_publication(self):
        base_url = self.env['ir.config_parameter'].sudo().get_param(
            'web.base.url')
        for slide in self.filtered(lambda slide: slide.website_published and
                                   slide.channel_id.publish_template_id):
            publish_template = slide.channel_id.publish_template_id
            html_body = publish_template.with_context(
                base_url=base_url)._render_template(publish_template.body_html,
                                                    'slide.slide', slide.id)
            subject = publish_template._render_template(
                publish_template.subject, 'slide.slide', slide.id)
            # We want to use the 'reply_to' of the template if set. However, `mail.message` will check
            # if the key 'reply_to' is in the kwargs before calling _get_reply_to. If the value is
            # falsy, we don't include it in the 'message_post' call.
            kwargs = {}
            reply_to = publish_template._render_template(
                publish_template.reply_to, 'slide.slide', slide.id)
            if reply_to:
                kwargs['reply_to'] = reply_to
            slide.channel_id.with_context(
                mail_create_nosubscribe=True).message_post(
                    subject=subject,
                    body=html_body,
                    subtype='website_slides.mt_channel_slide_published',
                    email_layout_xmlid='mail.mail_notification_light',
                    **kwargs,
                )
        return True

    def _generate_signed_token(self, partner_id):
        """ Lazy generate the acces_token and return it signed by the given partner_id
            :rtype tuple (string, int)
            :return (signed_token, partner_id)
        """
        if not self.access_token:
            self.write({'access_token': self._default_access_token()})
        return self._sign_token(partner_id)

    def _send_share_email(self, email, fullscreen):
        # TDE FIXME: template to check
        mail_ids = []
        base_url = self.env['ir.config_parameter'].sudo().get_param(
            'web.base.url')
        for record in self:
            if self.env.user.has_group('base.group_portal'):
                mail_ids.append(
                    self.channel_id.share_template_id.with_context(
                        user=self.env.user,
                        email=email,
                        base_url=base_url,
                        fullscreen=fullscreen).sudo().send_mail(
                            record.id,
                            notif_layout='mail.mail_notification_light',
                            email_values={
                                'email_from':
                                self.env['res.company'].catchall
                                or self.env['res.company'].email,
                                'email_to':
                                email
                            }))
            else:
                mail_ids.append(
                    self.channel_id.share_template_id.with_context(
                        user=self.env.user,
                        email=email,
                        base_url=base_url,
                        fullscreen=fullscreen).send_mail(
                            record.id,
                            notif_layout='mail.mail_notification_light',
                            email_values={'email_to': email}))
        return mail_ids

    def action_like(self):
        self.check_access_rights('read')
        self.check_access_rule('read')
        return self._action_vote(upvote=True)

    def action_dislike(self):
        self.check_access_rights('read')
        self.check_access_rule('read')
        return self._action_vote(upvote=False)

    def _action_vote(self, upvote=True):
        """ Private implementation of voting. It does not check for any real access
        rights; public methods should grant access before calling this method.

          :param upvote: if True, is a like; if False, is a dislike
        """
        self_sudo = self.sudo()
        SlidePartnerSudo = self.env['slide.slide.partner'].sudo()
        slide_partners = SlidePartnerSudo.search([
            ('slide_id', 'in', self.ids),
            ('partner_id', '=', self.env.user.partner_id.id)
        ])
        slide_id = slide_partners.mapped('slide_id')
        new_slides = self_sudo - slide_id
        channel = slide_id.channel_id
        karma_to_add = 0

        for slide_partner in slide_partners:
            if upvote:
                new_vote = 0 if slide_partner.vote == -1 else 1
                if slide_partner.vote != 1:
                    karma_to_add += channel.karma_gen_slide_vote
            else:
                new_vote = 0 if slide_partner.vote == 1 else -1
                if slide_partner.vote != -1:
                    karma_to_add -= channel.karma_gen_slide_vote
            slide_partner.vote = new_vote

        for new_slide in new_slides:
            new_vote = 1 if upvote else -1
            new_slide.write({
                'slide_partner_ids': [(0, 0, {
                    'vote':
                    new_vote,
                    'partner_id':
                    self.env.user.partner_id.id
                })]
            })
            karma_to_add += new_slide.channel_id.karma_gen_slide_vote * (
                1 if upvote else -1)

        if karma_to_add:
            self.env.user.add_karma(karma_to_add)

    def action_set_viewed(self, quiz_attempts_inc=False):
        if not all(slide.channel_id.is_member for slide in self):
            raise UserError(
                _('You cannot mark a slide as viewed if you are not among its members.'
                  ))

        return bool(
            self._action_set_viewed(self.env.user.partner_id,
                                    quiz_attempts_inc=quiz_attempts_inc))

    def _action_set_viewed(self, target_partner, quiz_attempts_inc=False):
        self_sudo = self.sudo()
        SlidePartnerSudo = self.env['slide.slide.partner'].sudo()
        existing_sudo = SlidePartnerSudo.search([('slide_id', 'in', self.ids),
                                                 ('partner_id', '=',
                                                  target_partner.id)])
        if quiz_attempts_inc:
            for exsting_slide in existing_sudo:
                exsting_slide.write({
                    'quiz_attempts_count':
                    exsting_slide.quiz_attempts_count + 1
                })

        new_slides = self_sudo - existing_sudo.mapped('slide_id')
        return SlidePartnerSudo.create([{
            'slide_id':
            new_slide.id,
            'channel_id':
            new_slide.channel_id.id,
            'partner_id':
            target_partner.id,
            'quiz_attempts_count':
            1 if quiz_attempts_inc else 0,
            'vote':
            0
        } for new_slide in new_slides])

    def action_set_completed(self):
        if not all(slide.channel_id.is_member for slide in self):
            raise UserError(
                _('You cannot mark a slide as completed if you are not among its members.'
                  ))

        return self._action_set_completed(self.env.user.partner_id)

    def _action_set_completed(self, target_partner):
        self_sudo = self.sudo()
        SlidePartnerSudo = self.env['slide.slide.partner'].sudo()
        existing_sudo = SlidePartnerSudo.search([('slide_id', 'in', self.ids),
                                                 ('partner_id', '=',
                                                  target_partner.id)])
        existing_sudo.write({'completed': True})

        new_slides = self_sudo - existing_sudo.mapped('slide_id')
        SlidePartnerSudo.create([{
            'slide_id': new_slide.id,
            'channel_id': new_slide.channel_id.id,
            'partner_id': target_partner.id,
            'vote': 0,
            'completed': True
        } for new_slide in new_slides])

        return True

    def _action_set_quiz_done(self):
        if not all(slide.channel_id.is_member for slide in self):
            raise UserError(
                _('You cannot mark a slide quiz as completed if you are not among its members.'
                  ))

        points = 0
        for slide in self:
            user_membership_sudo = slide.user_membership_id.sudo()
            if not user_membership_sudo or user_membership_sudo.completed or not user_membership_sudo.quiz_attempts_count:
                continue

            gains = [
                slide.quiz_first_attempt_reward,
                slide.quiz_second_attempt_reward,
                slide.quiz_third_attempt_reward,
                slide.quiz_fourth_attempt_reward
            ]
            points += gains[
                user_membership_sudo.quiz_attempts_count -
                1] if user_membership_sudo.quiz_attempts_count <= len(
                    gains) else gains[-1]

        return self.env.user.sudo().add_karma(points)

    def _compute_quiz_info(self, target_partner, quiz_done=False):
        result = dict.fromkeys(self.ids, False)
        slide_partners = self.env['slide.slide.partner'].sudo().search([
            ('slide_id', 'in', self.ids),
            ('partner_id', '=', target_partner.id)
        ])
        slide_partners_map = dict(
            (sp.slide_id.id, sp) for sp in slide_partners)
        for slide in self:
            if not slide.question_ids:
                gains = [0]
            else:
                gains = [
                    slide.quiz_first_attempt_reward,
                    slide.quiz_second_attempt_reward,
                    slide.quiz_third_attempt_reward,
                    slide.quiz_fourth_attempt_reward
                ]
            result[slide.id] = {
                'quiz_karma_max':
                gains[0],  # what could be gained if succeed at first try
                'quiz_karma_gain':
                gains[0],  # what would be gained at next test
                'quiz_karma_won': 0,  # what has been gained
                'quiz_attempts_count': 0,  # number of attempts
            }
            slide_partner = slide_partners_map.get(slide.id)
            if slide.question_ids and slide_partner:
                if slide_partner.quiz_attempts_count:
                    result[slide.id]['quiz_karma_gain'] = gains[
                        slide_partner.
                        quiz_attempts_count] if slide_partner.quiz_attempts_count < len(
                            gains) else gains[-1]
                    result[slide.id][
                        'quiz_attempts_count'] = slide_partner.quiz_attempts_count
                if quiz_done or slide_partner.completed:
                    result[slide.id]['quiz_karma_won'] = gains[
                        slide_partner.quiz_attempts_count -
                        1] if slide_partner.quiz_attempts_count < len(
                            gains) else gains[-1]
        return result

    # --------------------------------------------------
    # Parsing methods
    # --------------------------------------------------

    @api.model
    def _fetch_data(self, base_url, params, content_type=False):
        result = {'values': dict()}
        try:
            response = requests.get(base_url, timeout=3, params=params)
            response.raise_for_status()
            if content_type == 'json':
                result['values'] = response.json()
            elif content_type in ('image', 'pdf'):
                result['values'] = base64.b64encode(response.content)
            else:
                result['values'] = response.content
        except requests.exceptions.HTTPError as e:
            result['error'] = e.response.content
        except requests.exceptions.ConnectionError as e:
            result['error'] = str(e)
        return result

    def _find_document_data_from_url(self, url):
        url_obj = urls.url_parse(url)
        if url_obj.ascii_host == 'youtu.be':
            return ('youtube', url_obj.path[1:] if url_obj.path else False)
        elif url_obj.ascii_host in ('youtube.com', 'www.youtube.com',
                                    'm.youtube.com'):
            v_query_value = url_obj.decode_query().get('v')
            if v_query_value:
                return ('youtube', v_query_value)
            split_path = url_obj.path.split('/')
            if len(split_path) >= 3 and split_path[1] in ('v', 'embed'):
                return ('youtube', split_path[2])

        expr = re.compile(
            r'(^https:\/\/docs.google.com|^https:\/\/drive.google.com).*\/d\/([^\/]*)'
        )
        arg = expr.match(url)
        document_id = arg and arg.group(2) or False
        if document_id:
            return ('google', document_id)

        return (None, False)

    def _parse_document_url(self, url, only_preview_fields=False):
        document_source, document_id = self._find_document_data_from_url(url)
        if document_source and hasattr(self,
                                       '_parse_%s_document' % document_source):
            return getattr(self, '_parse_%s_document' % document_source)(
                document_id, only_preview_fields)
        return {'error': _('Unknown document')}

    def _parse_youtube_document(self, document_id, only_preview_fields):
        """ If we receive a duration (YT video), we use it to determine the slide duration.
        The received duration is under a special format (e.g: PT1M21S15, meaning 1h 21m 15s). """

        key = self.env['website'].get_current_website(
        ).website_slide_google_app_key
        fetch_res = self._fetch_data(
            'https://www.googleapis.com/youtube/v3/videos', {
                'id': document_id,
                'key': key,
                'part': 'snippet,contentDetails',
                'fields': 'items(id,snippet,contentDetails)'
            }, 'json')
        if fetch_res.get('error'):
            return fetch_res

        values = {'slide_type': 'video', 'document_id': document_id}
        items = fetch_res['values'].get('items')
        if not items:
            return {'error': _('Please enter valid Youtube or Google Doc URL')}
        youtube_values = items[0]

        youtube_duration = youtube_values.get('contentDetails',
                                              {}).get('duration')
        if youtube_duration:
            parsed_duration = re.search(
                r'^PT(?:(\d+)H)?(?:(\d+)M)?(?:(\d+)S)?$', youtube_duration)
            values['completion_time'] = (int(parsed_duration.group(1) or 0)) + \
                                        (int(parsed_duration.group(2) or 0) / 60) + \
                                        (int(parsed_duration.group(3) or 0) / 3600)

        if youtube_values.get('snippet'):
            snippet = youtube_values['snippet']
            if only_preview_fields:
                values.update({
                    'url_src': snippet['thumbnails']['high']['url'],
                    'title': snippet['title'],
                    'description': snippet['description']
                })

                return values

            values.update({
                'name':
                snippet['title'],
                'image_1920':
                self._fetch_data(snippet['thumbnails']['high']['url'], {},
                                 'image')['values'],
                'description':
                snippet['description'],
                'mime_type':
                False,
            })
        return {'values': values}

    @api.model
    def _parse_google_document(self, document_id, only_preview_fields):
        def get_slide_type(vals):
            # TDE FIXME: WTF ??
            slide_type = 'presentation'
            if vals.get('image_1920'):
                image = Image.open(
                    io.BytesIO(base64.b64decode(vals['image_1920'])))
                width, height = image.size
                if height > width:
                    return 'document'
            return slide_type

        # Google drive doesn't use a simple API key to access the data, but requires an access
        # token. However, this token is generated in module google_drive, which is not in the
        # dependencies of website_slides. We still keep the 'key' parameter just in case, but that
        # is probably useless.
        params = {}
        params['projection'] = 'BASIC'
        if 'google.drive.config' in self.env:
            access_token = self.env['google.drive.config'].get_access_token()
            if access_token:
                params['access_token'] = access_token
        if not params.get('access_token'):
            params['key'] = self.env['website'].get_current_website(
            ).website_slide_google_app_key

        fetch_res = self._fetch_data(
            'https://www.googleapis.com/drive/v2/files/%s' % document_id,
            params, "json")
        if fetch_res.get('error'):
            return fetch_res

        google_values = fetch_res['values']
        if only_preview_fields:
            return {
                'url_src': google_values['thumbnailLink'],
                'title': google_values['title'],
            }

        values = {
            'name':
            google_values['title'],
            'image_1920':
            self._fetch_data(
                google_values['thumbnailLink'].replace('=s220', ''), {},
                'image')['values'],
            'mime_type':
            google_values['mimeType'],
            'document_id':
            document_id,
        }
        if google_values['mimeType'].startswith('video/'):
            values['slide_type'] = 'video'
        elif google_values['mimeType'].startswith('image/'):
            values['datas'] = values['image_1920']
            values['slide_type'] = 'infographic'
        elif google_values['mimeType'].startswith(
                'application/vnd.google-apps'):
            values['slide_type'] = get_slide_type(values)
            if 'exportLinks' in google_values:
                values['datas'] = self._fetch_data(
                    google_values['exportLinks']['application/pdf'], params,
                    'pdf')['values']
        elif google_values['mimeType'] == 'application/pdf':
            # TODO: Google Drive PDF document doesn't provide plain text transcript
            values['datas'] = self._fetch_data(google_values['webContentLink'],
                                               {}, 'pdf')['values']
            values['slide_type'] = get_slide_type(values)

        return {'values': values}

    def _default_website_meta(self):
        res = super(Slide, self)._default_website_meta()
        res['default_opengraph']['og:title'] = res['default_twitter'][
            'twitter:title'] = self.name
        res['default_opengraph']['og:description'] = res['default_twitter'][
            'twitter:description'] = self.description
        res['default_opengraph']['og:image'] = res['default_twitter'][
            'twitter:image'] = self.env['website'].image_url(
                self, 'image_1024')
        res['default_meta_description'] = self.description
        return res
Beispiel #16
0
class MrpRoutingWorkcenter(models.Model):
    _name = 'mrp.routing.workcenter'
    _description = 'Work Center Usage'
    _order = 'sequence, id'
    _check_company_auto = True

    name = fields.Char('Operation', required=True)
    workcenter_id = fields.Many2one('mrp.workcenter', 'Work Center', required=True, check_company=True)
    sequence = fields.Integer(
        'Sequence', default=100,
        help="Gives the sequence order when displaying a list of routing Work Centers.")
    routing_id = fields.Many2one(
        'mrp.routing', 'Parent Routing',
        index=True, ondelete='cascade', required=True,
        help="The routing contains all the Work Centers used and for how long. This will create work orders afterwards "
        "which alters the execution of the manufacturing order.")
    note = fields.Text('Description')
    company_id = fields.Many2one(
        'res.company', 'Company',
        readonly=True, related='routing_id.company_id', store=True)
    worksheet = fields.Binary('PDF', help="Upload your PDF file.")
    worksheet_type = fields.Selection([
        ('pdf', 'PDF'), ('google_slide', 'Google Slide')],
        string="Work Sheet", default="pdf",
        help="Defines if you want to use a PDF or a Google Slide as work sheet."
    )
    worksheet_google_slide = fields.Char('Google Slide', help="Paste the url of your Google Slide. Make sure the access to the document is public.")
    time_mode = fields.Selection([
        ('auto', 'Compute based on real time'),
        ('manual', 'Set duration manually')], string='Duration Computation',
        default='manual')
    time_mode_batch = fields.Integer('Based on', default=10)
    time_cycle_manual = fields.Float(
        'Manual Duration', default=60,
        help="Time in minutes. Is the time used in manual mode, or the first time supposed in real time when there are not any work orders yet.")
    time_cycle = fields.Float('Duration', compute="_compute_time_cycle")
    workorder_count = fields.Integer("# Work Orders", compute="_compute_workorder_count")
    batch = fields.Selection([
        ('no',  'Once all products are processed'),
        ('yes', 'Once some products are processed')], string='Start Next Operation',
        default='no', required=True)
    batch_size = fields.Float('Quantity to Process', default=1.0)
    workorder_ids = fields.One2many('mrp.workorder', 'operation_id', string="Work Orders")

    @api.depends('time_cycle_manual', 'time_mode', 'workorder_ids')
    def _compute_time_cycle(self):
        manual_ops = self.filtered(lambda operation: operation.time_mode == 'manual')
        for operation in manual_ops:
            operation.time_cycle = operation.time_cycle_manual
        for operation in self - manual_ops:
            data = self.env['mrp.workorder'].read_group([
                ('operation_id', '=', operation.id),
                ('state', '=', 'done')], ['operation_id', 'duration', 'qty_produced'], ['operation_id'],
                limit=operation.time_mode_batch)
            count_data = dict((item['operation_id'][0], (item['duration'], item['qty_produced'])) for item in data)
            if count_data.get(operation.id) and count_data[operation.id][1]:
                operation.time_cycle = (count_data[operation.id][0] / count_data[operation.id][1]) * (operation.workcenter_id.capacity or 1.0)
            else:
                operation.time_cycle = operation.time_cycle_manual

    def _compute_workorder_count(self):
        data = self.env['mrp.workorder'].read_group([
            ('operation_id', 'in', self.ids),
            ('state', '=', 'done')], ['operation_id'], ['operation_id'])
        count_data = dict((item['operation_id'][0], item['operation_id_count']) for item in data)
        for operation in self:
            operation.workorder_count = count_data.get(operation.id, 0)
Beispiel #17
0
class Import(models.TransientModel):

    _name = 'base_import.import'
    _description = 'Base Import'

    # allow imports to survive for 12h in case user is slow
    _transient_max_hours = 12.0

    res_model = fields.Char('Model')
    file = fields.Binary(
        'File',
        help="File to check and/or import, raw binary (not base64)",
        attachment=False)
    file_name = fields.Char('File Name')
    file_type = fields.Char('File Type')

    @api.model
    def get_fields(self, model, depth=FIELDS_RECURSION_LIMIT):
        """ Recursively get fields for the provided model (through
        fields_get) and filter them according to importability

        The output format is a list of ``Field``, with ``Field``
        defined as:

        .. class:: Field

            .. attribute:: id (str)

                A non-unique identifier for the field, used to compute
                the span of the ``required`` attribute: if multiple
                ``required`` fields have the same id, only one of them
                is necessary.

            .. attribute:: name (str)

                The field's logical (Harpiya) name within the scope of
                its parent.

            .. attribute:: string (str)

                The field's human-readable name (``@string``)

            .. attribute:: required (bool)

                Whether the field is marked as required in the
                model. Clients must provide non-empty import values
                for all required fields or the import will error out.

            .. attribute:: fields (list(Field))

                The current field's subfields. The database and
                external identifiers for m2o and m2m fields; a
                filtered and transformed fields_get for o2m fields (to
                a variable depth defined by ``depth``).

                Fields with no sub-fields will have an empty list of
                sub-fields.

        :param str model: name of the model to get fields form
        :param int depth: depth of recursion into o2m fields
        """
        Model = self.env[model]
        importable_fields = [{
            'id': 'id',
            'name': 'id',
            'string': _("External ID"),
            'required': False,
            'fields': [],
            'type': 'id',
        }]
        if not depth:
            return importable_fields

        model_fields = Model.fields_get()
        blacklist = models.MAGIC_COLUMNS + [Model.CONCURRENCY_CHECK_FIELD]
        for name, field in model_fields.items():
            if name in blacklist:
                continue
            # an empty string means the field is deprecated, @deprecated must
            # be absent or False to mean not-deprecated
            if field.get('deprecated', False) is not False:
                continue
            if field.get('readonly'):
                states = field.get('states')
                if not states:
                    continue
                # states = {state: [(attr, value), (attr2, value2)], state2:...}
                if not any(attr == 'readonly' and value is False
                           for attr, value in itertools.chain.from_iterable(
                               states.values())):
                    continue
            field_value = {
                'id': name,
                'name': name,
                'string': field['string'],
                # Y U NO ALWAYS HAS REQUIRED
                'required': bool(field.get('required')),
                'fields': [],
                'type': field['type'],
            }

            if field['type'] in ('many2many', 'many2one'):
                field_value['fields'] = [
                    dict(field_value,
                         name='id',
                         string=_("External ID"),
                         type='id'),
                    dict(field_value,
                         name='.id',
                         string=_("Database ID"),
                         type='id'),
                ]
            elif field['type'] == 'one2many':
                field_value['fields'] = self.get_fields(field['relation'],
                                                        depth=depth - 1)
                if self.user_has_groups('base.group_no_one'):
                    field_value['fields'].append({
                        'id': '.id',
                        'name': '.id',
                        'string': _("Database ID"),
                        'required': False,
                        'fields': [],
                        'type': 'id'
                    })

            importable_fields.append(field_value)

        # TODO: cache on model?
        return importable_fields

    def _read_file(self, options):
        """ Dispatch to specific method to read file content, according to its mimetype or file type
            :param options : dict of reading options (quoting, separator, ...)
        """
        self.ensure_one()
        # guess mimetype from file content
        mimetype = guess_mimetype(self.file or b'')
        (file_extension, handler,
         req) = FILE_TYPE_DICT.get(mimetype, (None, None, None))
        if handler:
            try:
                return getattr(self, '_read_' + file_extension)(options)
            except Exception:
                _logger.warn(
                    "Failed to read file '%s' (transient id %d) using guessed mimetype %s",
                    self.file_name or '<unknown>', self.id, mimetype)

        # try reading with user-provided mimetype
        (file_extension, handler,
         req) = FILE_TYPE_DICT.get(self.file_type, (None, None, None))
        if handler:
            try:
                return getattr(self, '_read_' + file_extension)(options)
            except Exception:
                _logger.warn(
                    "Failed to read file '%s' (transient id %d) using user-provided mimetype %s",
                    self.file_name or '<unknown>', self.id, self.file_type)

        # fallback on file extensions as mime types can be unreliable (e.g.
        # software setting incorrect mime types, or non-installed software
        # leading to browser not sending mime types)
        if self.file_name:
            p, ext = os.path.splitext(self.file_name)
            if ext in EXTENSIONS:
                try:
                    return getattr(self, '_read_' + ext[1:])(options)
                except Exception:
                    _logger.warn(
                        "Failed to read file '%s' (transient id %s) using file extension",
                        self.file_name, self.id)

        if req:
            raise ImportError(
                _("Unable to load \"{extension}\" file: requires Python module \"{modname}\""
                  ).format(extension=file_extension, modname=req))
        raise ValueError(
            _("Unsupported file format \"{}\", import only supports CSV, ODS, XLS and XLSX"
              ).format(self.file_type))

    def _read_xls(self, options):
        """ Read file content, using xlrd lib """
        book = xlrd.open_workbook(file_contents=self.file or b'')
        return self._read_xls_book(book)

    def _read_xls_book(self, book):
        sheet = book.sheet_by_index(0)
        # emulate Sheet.get_rows for pre-0.9.4
        for rowx, row in enumerate(map(sheet.row, range(sheet.nrows)), 1):
            values = []
            for colx, cell in enumerate(row, 1):
                if cell.ctype is xlrd.XL_CELL_NUMBER:
                    is_float = cell.value % 1 != 0.0
                    values.append(
                        str(cell.value) if is_float else str(int(cell.value)))
                elif cell.ctype is xlrd.XL_CELL_DATE:
                    is_datetime = cell.value % 1 != 0.0
                    # emulate xldate_as_datetime for pre-0.9.3
                    dt = datetime.datetime(*xlrd.xldate.xldate_as_tuple(
                        cell.value, book.datemode))
                    values.append(
                        dt.strftime(DEFAULT_SERVER_DATETIME_FORMAT
                                    ) if is_datetime else dt.
                        strftime(DEFAULT_SERVER_DATE_FORMAT))
                elif cell.ctype is xlrd.XL_CELL_BOOLEAN:
                    values.append(u'True' if cell.value else u'False')
                elif cell.ctype is xlrd.XL_CELL_ERROR:
                    raise ValueError(
                        _("Invalid cell value at row %(row)s, column %(col)s: %(cell_value)s"
                          ) % {
                              'row':
                              rowx,
                              'col':
                              colx,
                              'cell_value':
                              xlrd.error_text_from_code.get(
                                  cell.value,
                                  _("unknown error code %s") % cell.value)
                          })
                else:
                    values.append(cell.value)
            if any(x for x in values if x.strip()):
                yield values

    # use the same method for xlsx and xls files
    _read_xlsx = _read_xls

    def _read_ods(self, options):
        """ Read file content using ODSReader custom lib """
        doc = odf_ods_reader.ODSReader(file=io.BytesIO(self.file or b''))

        return (row for row in doc.getFirstSheet()
                if any(x for x in row if x.strip()))

    def _read_csv(self, options):
        """ Returns a CSV-parsed iterator of all non-empty lines in the file
            :throws csv.Error: if an error is detected during CSV parsing
        """
        csv_data = self.file or b''
        if not csv_data:
            return iter([])

        encoding = options.get('encoding')
        if not encoding:
            encoding = options['encoding'] = chardet.detect(
                csv_data)['encoding'].lower()
            # some versions of chardet (e.g. 2.3.0 but not 3.x) will return
            # utf-(16|32)(le|be), which for python means "ignore / don't strip
            # BOM". We don't want that, so rectify the encoding to non-marked
            # IFF the guessed encoding is LE/BE and csv_data starts with a BOM
            bom = BOM_MAP.get(encoding)
            if bom and csv_data.startswith(bom):
                encoding = options['encoding'] = encoding[:-2]

        if encoding != 'utf-8':
            csv_data = csv_data.decode(encoding).encode('utf-8')

        separator = options.get('separator')
        if not separator:
            # default for unspecified separator so user gets a message about
            # having to specify it
            separator = ','
            for candidate in (',', ';', '\t', ' ', '|',
                              unicodedata.lookup('unit separator')):
                # pass through the CSV and check if all rows are the same
                # length & at least 2-wide assume it's the correct one
                it = pycompat.csv_reader(io.BytesIO(csv_data),
                                         quotechar=options['quoting'],
                                         delimiter=candidate)
                w = None
                for row in it:
                    width = len(row)
                    if w is None:
                        w = width
                    if width == 1 or width != w:
                        break  # next candidate
                else:  # nobreak
                    separator = options['separator'] = candidate
                    break

        csv_iterator = pycompat.csv_reader(io.BytesIO(csv_data),
                                           quotechar=options['quoting'],
                                           delimiter=separator)

        return (row for row in csv_iterator
                if any(x for x in row if x.strip()))

    @api.model
    def _try_match_column(self, preview_values, options):
        """ Returns the potential field types, based on the preview values, using heuristics
            :param preview_values : list of value for the column to determine
            :param options : parsing options
        """
        values = set(preview_values)
        # If all values are empty in preview than can be any field
        if values == {''}:
            return ['all']

        # If all values starts with __export__ this is probably an id
        if all(v.startswith('__export__') for v in values):
            return ['id', 'many2many', 'many2one', 'one2many']

        # If all values can be cast to int type is either id, float or monetary
        # Exception: if we only have 1 and 0, it can also be a boolean
        if all(v.isdigit() for v in values if v):
            field_type = [
                'id', 'integer', 'char', 'float', 'monetary', 'many2one',
                'many2many', 'one2many'
            ]
            if {'0', '1', ''}.issuperset(values):
                field_type.append('boolean')
            return field_type

        # If all values are either True or False, type is boolean
        if all(val.lower() in ('true', 'false', 't', 'f', '')
               for val in preview_values):
            return ['boolean']

        # If all values can be cast to float, type is either float or monetary
        try:
            thousand_separator = decimal_separator = False
            for val in preview_values:
                val = val.strip()
                if not val:
                    continue
                # value might have the currency symbol left or right from the value
                val = self._remove_currency_symbol(val)
                if val:
                    if options.get('float_thousand_separator') and options.get(
                            'float_decimal_separator'):
                        val = val.replace(
                            options['float_thousand_separator'],
                            '').replace(options['float_decimal_separator'],
                                        '.')
                    # We are now sure that this is a float, but we still need to find the
                    # thousand and decimal separator
                    else:
                        if val.count('.') > 1:
                            options['float_thousand_separator'] = '.'
                            options['float_decimal_separator'] = ','
                        elif val.count(',') > 1:
                            options['float_thousand_separator'] = ','
                            options['float_decimal_separator'] = '.'
                        elif val.find('.') > val.find(','):
                            thousand_separator = ','
                            decimal_separator = '.'
                        elif val.find(',') > val.find('.'):
                            thousand_separator = '.'
                            decimal_separator = ','
                else:
                    # This is not a float so exit this try
                    float('a')
            if thousand_separator and not options.get(
                    'float_decimal_separator'):
                options['float_thousand_separator'] = thousand_separator
                options['float_decimal_separator'] = decimal_separator
            return ['float', 'monetary']
        except ValueError:
            pass

        results = self._try_match_date_time(preview_values, options)
        if results:
            return results

        return [
            'id', 'text', 'boolean', 'char', 'datetime', 'selection',
            'many2one', 'one2many', 'many2many', 'html'
        ]

    def _try_match_date_time(self, preview_values, options):
        # Or a date/datetime if it matches the pattern
        date_patterns = [options['date_format']
                         ] if options.get('date_format') else []
        user_date_format = self.env['res.lang']._lang_get(
            self.env.user.lang).date_format
        if user_date_format:
            try:
                to_re(user_date_format)
                date_patterns.append(user_date_format)
            except KeyError:
                pass
        date_patterns.extend(DATE_PATTERNS)
        match = check_patterns(date_patterns, preview_values)
        if match:
            options['date_format'] = match
            return ['date', 'datetime']

        datetime_patterns = [options['datetime_format']
                             ] if options.get('datetime_format') else []
        datetime_patterns.extend("%s %s" % (d, t) for d in date_patterns
                                 for t in TIME_PATTERNS)
        match = check_patterns(datetime_patterns, preview_values)
        if match:
            options['datetime_format'] = match
            return ['datetime']

        return []

    @api.model
    def _find_type_from_preview(self, options, preview):
        type_fields = []
        if preview:
            for column in range(0, len(preview[0])):
                preview_values = [value[column].strip() for value in preview]
                type_field = self._try_match_column(preview_values, options)
                type_fields.append(type_field)
        return type_fields

    def _match_header(self, header, fields, options):
        """ Attempts to match a given header to a field of the
            imported model.

            :param str header: header name from the CSV file
            :param fields:
            :param dict options:
            :returns: an empty list if the header couldn't be matched, or
                      all the fields to traverse
            :rtype: list(Field)
        """
        string_match = None
        IrTranslation = self.env['ir.translation']
        for field in fields:
            # FIXME: should match all translations & original
            # TODO: use string distance (levenshtein? hamming?)
            if header.lower() == field['name'].lower():
                return [field]
            if header.lower() == field['string'].lower():
                # matching string are not reliable way because
                # strings have no unique constraint
                string_match = field
            translated_header = IrTranslation._get_source(
                'ir.model.fields,field_description', 'model', self.env.lang,
                header).lower()
            if translated_header == field['string'].lower():
                string_match = field
        if string_match:
            # this behavior is only applied if there is no matching field['name']
            return [string_match]

        if '/' not in header:
            return []

        # relational field path
        traversal = []
        subfields = fields
        # Iteratively dive into fields tree
        for section in header.split('/'):
            # Strip section in case spaces are added around '/' for
            # readability of paths
            match = self._match_header(section.strip(), subfields, options)
            # Any match failure, exit
            if not match:
                return []
            # prep subfields for next iteration within match[0]
            field = match[0]
            subfields = field['fields']
            traversal.append(field)
        return traversal

    def _match_headers(self, rows, fields, options):
        """ Attempts to match the imported model's fields to the
            titles of the parsed CSV file, if the file is supposed to have
            headers.

            Will consume the first line of the ``rows`` iterator.

            Returns the list of headers and a dict mapping cell indices
            to key paths in the ``fields`` tree. If headers were not
            requested, both collections are empty.

            :param Iterator rows:
            :param dict fields:
            :param dict options:
            :rtype: (list(str), dict(int: list(str)))
        """
        if not options.get('headers'):
            return [], {}

        headers = next(rows, None)
        if not headers:
            return [], {}

        matches = {}
        mapping_records = self.env['base_import.mapping'].search_read(
            [('res_model', '=', self.res_model)],
            ['column_name', 'field_name'])
        mapping_fields = {
            rec['column_name']: rec['field_name']
            for rec in mapping_records
        }
        for index, header in enumerate(headers):
            match_field = []
            mapping_field_name = mapping_fields.get(header.lower())
            if mapping_field_name:
                match_field = mapping_field_name.split('/')
            if not match_field:
                match_field = [
                    field['name']
                    for field in self._match_header(header, fields, options)
                ]
            matches[index] = match_field or None
        return headers, matches

    def parse_preview(self, options, count=10):
        """ Generates a preview of the uploaded files, and performs
            fields-matching between the import's file data and the model's
            columns.

            If the headers are not requested (not options.headers),
            ``matches`` and ``headers`` are both ``False``.

            :param int count: number of preview lines to generate
            :param options: format-specific options.
                            CSV: {quoting, separator, headers}
            :type options: {str, str, str, bool}
            :returns: {fields, matches, headers, preview} | {error, preview}
            :rtype: {dict(str: dict(...)), dict(int, list(str)), list(str), list(list(str))} | {str, str}
        """
        self.ensure_one()
        fields = self.get_fields(self.res_model)
        try:
            rows = self._read_file(options)
            headers, matches = self._match_headers(rows, fields, options)
            # Match should have consumed the first row (iif headers), get
            # the ``count`` next rows for preview
            preview = list(itertools.islice(rows, count))
            assert preview, "file seems to have no content"
            header_types = self._find_type_from_preview(options, preview)
            if options.get('keep_matches') and len(options.get('fields', [])):
                matches = {}
                for index, match in enumerate(options.get('fields')):
                    if match:
                        matches[index] = match.split('/')

            if options.get('keep_matches'):
                advanced_mode = options.get('advanced')
            else:
                # Check is label contain relational field
                has_relational_header = any(
                    len(models.fix_import_export_id_paths(col)) > 1
                    for col in headers)
                # Check is matches fields have relational field
                has_relational_match = any(
                    len(match) > 1 for field, match in matches.items()
                    if match)
                advanced_mode = has_relational_header or has_relational_match

            batch = False
            batch_cutoff = options.get('limit')
            if batch_cutoff:
                if count > batch_cutoff:
                    batch = len(preview) > batch_cutoff
                else:
                    batch = bool(
                        next(
                            itertools.islice(rows, batch_cutoff - count, None),
                            None))

            return {
                'fields': fields,
                'matches': matches or False,
                'headers': headers or False,
                'headers_type': header_types or False,
                'preview': preview,
                'options': options,
                'advanced_mode': advanced_mode,
                'debug': self.user_has_groups('base.group_no_one'),
                'batch': batch,
            }
        except Exception as error:
            # Due to lazy generators, UnicodeDecodeError (for
            # instance) may only be raised when serializing the
            # preview to a list in the return.
            _logger.debug("Error during parsing preview", exc_info=True)
            preview = None
            if self.file_type == 'text/csv' and self.file:
                preview = self.file[:ERROR_PREVIEW_BYTES].decode('iso-8859-1')
            return {
                'error': str(error),
                # iso-8859-1 ensures decoding will always succeed,
                # even if it yields non-printable characters. This is
                # in case of UnicodeDecodeError (or csv.Error
                # compounded with UnicodeDecodeError)
                'preview': preview,
            }

    @api.model
    def _convert_import_data(self, fields, options):
        """ Extracts the input BaseModel and fields list (with
            ``False``-y placeholders for fields to *not* import) into a
            format Model.import_data can use: a fields list without holes
            and the precisely matching data matrix

            :param list(str|bool): fields
            :returns: (data, fields)
            :rtype: (list(list(str)), list(str))
            :raises ValueError: in case the import data could not be converted
        """
        # Get indices for non-empty fields
        indices = [index for index, field in enumerate(fields) if field]
        if not indices:
            raise ValueError(
                _("You must configure at least one field to import"))
        # If only one index, itemgetter will return an atom rather
        # than a 1-tuple
        if len(indices) == 1:
            mapper = lambda row: [row[indices[0]]]
        else:
            mapper = operator.itemgetter(*indices)
        # Get only list of actually imported fields
        import_fields = [f for f in fields if f]

        rows_to_import = self._read_file(options)
        if options.get('headers'):
            rows_to_import = itertools.islice(rows_to_import, 1, None)
        data = [
            list(row) for row in map(mapper, rows_to_import)
            # don't try inserting completely empty rows (e.g. from
            # filtering out o2m fields)
            if any(row)
        ]

        # slicing needs to happen after filtering out empty rows as the
        # data offsets from load are post-filtering
        return data[options.get('skip'):], import_fields

    @api.model
    def _remove_currency_symbol(self, value):
        value = value.strip()
        negative = False
        # Careful that some countries use () for negative so replace it by - sign
        if value.startswith('(') and value.endswith(')'):
            value = value[1:-1]
            negative = True
        float_regex = re.compile(r'([+-]?[0-9.,]+)')
        split_value = [g for g in float_regex.split(value) if g]
        if len(split_value) > 2:
            # This is probably not a float
            return False
        if len(split_value) == 1:
            if float_regex.search(split_value[0]) is not None:
                return split_value[0] if not negative else '-' + split_value[0]
            return False
        else:
            # String has been split in 2, locate which index contains the float and which does not
            currency_index = 0
            if float_regex.search(split_value[0]) is not None:
                currency_index = 1
            # Check that currency exists
            currency = self.env['res.currency'].search([
                ('symbol', '=', split_value[currency_index].strip())
            ])
            if len(currency):
                return split_value[(currency_index + 1) %
                                   2] if not negative else '-' + split_value[
                                       (currency_index + 1) % 2]
            # Otherwise it is not a float with a currency symbol
            return False

    @api.model
    def _parse_float_from_data(self, data, index, name, options):
        for line in data:
            line[index] = line[index].strip()
            if not line[index]:
                continue
            thousand_separator, decimal_separator = self._infer_separators(
                line[index], options)

            if 'E' in line[index] or 'e' in line[index]:
                tmp_value = line[index].replace(thousand_separator, '.')
                try:
                    tmp_value = '{:f}'.format(float(tmp_value))
                    line[index] = tmp_value
                    thousand_separator = ' '
                except Exception:
                    pass

            line[index] = line[index].replace(thousand_separator, '').replace(
                decimal_separator, '.')
            old_value = line[index]
            line[index] = self._remove_currency_symbol(line[index])
            if line[index] is False:
                raise ValueError(
                    _("Column %s contains incorrect values (value: %s)" %
                      (name, old_value)))

    def _infer_separators(self, value, options):
        """ Try to infer the shape of the separators: if there are two
        different "non-numberic" characters in the number, the
        former/duplicated one would be grouping ("thousands" separator) and
        the latter would be the decimal separator. The decimal separator
        should furthermore be unique.
        """
        # can't use \p{Sc} using re so handroll it
        non_number = [
            # any character
            c for c in value
            # which is not a numeric decoration (() is used for negative
            # by accountants)
            if c not in '()-+'
            # which is not a digit or a currency symbol
            if unicodedata.category(c) not in ('Nd', 'Sc')
        ]

        counts = collections.Counter(non_number)
        # if we have two non-numbers *and* the last one has a count of 1,
        # we probably have grouping & decimal separators
        if len(counts) == 2 and counts[non_number[-1]] == 1:
            return [character for character, _count in counts.most_common()]

        # otherwise get whatever's in the options, or fallback to a default
        thousand_separator = options.get('float_thousand_separator', ' ')
        decimal_separator = options.get('float_decimal_separator', '.')
        return thousand_separator, decimal_separator

    def _parse_import_data(self, data, import_fields, options):
        """ Lauch first call to _parse_import_data_recursive with an
        empty prefix. _parse_import_data_recursive will be run
        recursively for each relational field.
        """
        return self._parse_import_data_recursive(self.res_model, '', data,
                                                 import_fields, options)

    def _parse_import_data_recursive(self, model, prefix, data, import_fields,
                                     options):
        # Get fields of type date/datetime
        all_fields = self.env[model].fields_get()
        for name, field in all_fields.items():
            name = prefix + name
            if field['type'] in ('date', 'datetime') and name in import_fields:
                index = import_fields.index(name)
                self._parse_date_from_data(data, index, name, field['type'],
                                           options)
            # Check if the field is in import_field and is a relational (followed by /)
            # Also verify that the field name exactly match the import_field at the correct level.
            elif any(name + '/' in import_field
                     and name == import_field.split('/')[prefix.count('/')]
                     for import_field in import_fields):
                # Recursive call with the relational as new model and add the field name to the prefix
                self._parse_import_data_recursive(field['relation'],
                                                  name + '/', data,
                                                  import_fields, options)
            elif field['type'] in ('float',
                                   'monetary') and name in import_fields:
                # Parse float, sometimes float values from file have currency symbol or () to denote a negative value
                # We should be able to manage both case
                index = import_fields.index(name)
                self._parse_float_from_data(data, index, name, options)
            elif field['type'] == 'binary' and field.get('attachment') and any(
                    f in name for f in IMAGE_FIELDS) and name in import_fields:
                index = import_fields.index(name)

                with requests.Session() as session:
                    session.stream = True

                    for num, line in enumerate(data):
                        if re.match(
                                config.get("import_image_regex",
                                           DEFAULT_IMAGE_REGEX), line[index]):
                            if not self.env.user._can_import_remote_urls():
                                raise AccessError(
                                    _("You can not import images via URL, check with your administrator or support for the reason."
                                      ))

                            line[index] = self._import_image_by_url(
                                line[index], session, name, num)
                        else:
                            try:
                                base64.b64decode(line[index], validate=True)
                            except binascii.Error:
                                raise ValueError(
                                    _("Found invalid image data, images should be imported as either URLs or base64-encoded data."
                                      ))

        return data

    def _parse_date_from_data(self, data, index, name, field_type, options):
        dt = datetime.datetime
        fmt = fields.Date.to_string if field_type == 'date' else fields.Datetime.to_string
        d_fmt = options.get('date_format')
        dt_fmt = options.get('datetime_format')
        for num, line in enumerate(data):
            if not line[index]:
                continue

            v = line[index].strip()
            try:
                # first try parsing as a datetime if it's one
                if dt_fmt and field_type == 'datetime':
                    try:
                        line[index] = fmt(dt.strptime(v, dt_fmt))
                        continue
                    except ValueError:
                        pass
                # otherwise try parsing as a date whether it's a date
                # or datetime
                line[index] = fmt(dt.strptime(v, d_fmt))
            except ValueError as e:
                raise ValueError(
                    _("Column %s contains incorrect values. Error in line %d: %s"
                      ) % (name, num + 1, e))
            except Exception as e:
                raise ValueError(
                    _("Error Parsing Date [%s:L%d]: %s") % (name, num + 1, e))

    def _import_image_by_url(self, url, session, field, line_number):
        """ Imports an image by URL

        :param str url: the original field value
        :param requests.Session session:
        :param str field: name of the field (for logging/debugging)
        :param int line_number: 0-indexed line number within the imported file (for logging/debugging)
        :return: the replacement value
        :rtype: bytes
        """
        maxsize = int(
            config.get("import_image_maxbytes", DEFAULT_IMAGE_MAXBYTES))
        _logger.debug(
            "Trying to import image from URL: %s into field %s, at line %s" %
            (url, field, line_number))
        try:
            response = session.get(url,
                                   timeout=int(
                                       config.get("import_image_timeout",
                                                  DEFAULT_IMAGE_TIMEOUT)))
            response.raise_for_status()

            if response.headers.get('Content-Length') and int(
                    response.headers['Content-Length']) > maxsize:
                raise ValueError(
                    _("File size exceeds configured maximum (%s bytes)") %
                    maxsize)

            content = bytearray()
            for chunk in response.iter_content(DEFAULT_IMAGE_CHUNK_SIZE):
                content += chunk
                if len(content) > maxsize:
                    raise ValueError(
                        _("File size exceeds configured maximum (%s bytes)") %
                        maxsize)

            image = Image.open(io.BytesIO(content))
            w, h = image.size
            if w * h > 42e6:  # Nokia Lumia 1020 photo resolution
                raise ValueError(
                    u"Image size excessive, imported images must be smaller "
                    u"than 42 million pixel")

            return base64.b64encode(content)
        except Exception as e:
            _logger.exception(e)
            raise ValueError(
                _("Could not retrieve URL: %(url)s [%(field_name)s: L%(line_number)d]: %(error)s"
                  ) % {
                      'url': url,
                      'field_name': field,
                      'line_number': line_number + 1,
                      'error': e
                  })

    def do(self, fields, columns, options, dryrun=False):
        """ Actual execution of the import

        :param fields: import mapping: maps each column to a field,
                       ``False`` for the columns to ignore
        :type fields: list(str|bool)
        :param columns: columns label
        :type columns: list(str|bool)
        :param dict options:
        :param bool dryrun: performs all import operations (and
                            validations) but rollbacks writes, allows
                            getting as much errors as possible without
                            the risk of clobbering the database.
        :returns: A list of errors. If the list is empty the import
                  executed fully and correctly. If the list is
                  non-empty it contains dicts with 3 keys ``type`` the
                  type of error (``error|warning``); ``message`` the
                  error message associated with the error (a string)
                  and ``record`` the data which failed to import (or
                  ``false`` if that data isn't available or provided)
        :rtype: dict(ids: list(int), messages: list({type, message, record}))
        """
        self.ensure_one()
        self._cr.execute('SAVEPOINT import')

        try:
            data, import_fields = self._convert_import_data(fields, options)
            # Parse date and float field
            data = self._parse_import_data(data, import_fields, options)
        except ValueError as error:
            return {
                'messages': [{
                    'type': 'error',
                    'message': str(error),
                    'record': False,
                }]
            }

        _logger.info('importing %d rows...', len(data))

        name_create_enabled_fields = options.pop('name_create_enabled_fields',
                                                 {})
        import_limit = options.pop('limit', None)
        model = self.env[self.res_model].with_context(
            import_file=True,
            name_create_enabled_fields=name_create_enabled_fields,
            _import_limit=import_limit)
        import_result = model.load(import_fields, data)
        _logger.info('done')

        # If transaction aborted, RELEASE SAVEPOINT is going to raise
        # an InternalError (ROLLBACK should work, maybe). Ignore that.
        # TODO: to handle multiple errors, create savepoint around
        #       write and release it in case of write error (after
        #       adding error to errors array) => can keep on trying to
        #       import stuff, and rollback at the end if there is any
        #       error in the results.
        try:
            if dryrun:
                self._cr.execute('ROLLBACK TO SAVEPOINT import')
                # cancel all changes done to the registry/ormcache
                self.pool.clear_caches()
                self.pool.reset_changes()
            else:
                self._cr.execute('RELEASE SAVEPOINT import')
        except psycopg2.InternalError:
            pass

        # Insert/Update mapping columns when import complete successfully
        if import_result['ids'] and options.get('headers'):
            BaseImportMapping = self.env['base_import.mapping']
            for index, column_name in enumerate(columns):
                if column_name:
                    # Update to latest selected field
                    exist_records = BaseImportMapping.search([
                        ('res_model', '=', self.res_model),
                        ('column_name', '=', column_name)
                    ])
                    if exist_records:
                        exist_records.write({'field_name': fields[index]})
                    else:
                        BaseImportMapping.create({
                            'res_model': self.res_model,
                            'column_name': column_name,
                            'field_name': fields[index]
                        })
        if 'name' in import_fields:
            index_of_name = import_fields.index('name')
            skipped = options.get('skip', 0)
            # pad front as data doesn't contain anythig for skipped lines
            r = import_result['name'] = [''] * skipped
            # only add names for the window being imported
            r.extend(x[index_of_name] for x in data[:import_limit])
            # pad back (though that's probably not useful)
            r.extend([''] * (len(data) - (import_limit or 0)))
        else:
            import_result['name'] = []

        skip = options.get('skip', 0)
        # convert load's internal nextrow to the imported file's
        if import_result[
                'nextrow']:  # don't update if nextrow = 0 (= no nextrow)
            import_result['nextrow'] += skip

        return import_result
Beispiel #18
0
class ResConfigSettings(models.TransientModel):
    _inherit = 'res.config.settings'

    def _default_website(self):
        return self.env['website'].search(
            [('company_id', '=', self.env.company.id)], limit=1)

    website_id = fields.Many2one('website',
                                 string="website",
                                 default=_default_website,
                                 ondelete='cascade')
    website_name = fields.Char('Website Name',
                               related='website_id.name',
                               readonly=False)
    website_domain = fields.Char('Website Domain',
                                 related='website_id.domain',
                                 readonly=False)
    website_country_group_ids = fields.Many2many(
        related='website_id.country_group_ids', readonly=False)
    website_company_id = fields.Many2one(related='website_id.company_id',
                                         string='Website Company',
                                         readonly=False)
    website_logo = fields.Binary(related='website_id.logo', readonly=False)
    website_wholesale = fields.Boolean(related='website_id.wholesale',
                                       readonly=False,
                                       string="Toptan Satış Sitesi")
    language_ids = fields.Many2many(related='website_id.language_ids',
                                    relation='res.lang',
                                    readonly=False)
    website_language_count = fields.Integer(
        string='Number of languages',
        compute='_compute_website_language_count',
        readonly=True)
    website_default_lang_id = fields.Many2one(
        string='Default language',
        related='website_id.default_lang_id',
        readonly=False,
        relation='res.lang')
    website_default_lang_code = fields.Char(
        'Default language code',
        related='website_id.default_lang_id.code',
        readonly=False)
    specific_user_account = fields.Boolean(
        related='website_id.specific_user_account',
        readonly=False,
        help='Are newly created user accounts website specific')

    google_analytics_key = fields.Char(
        'Google Analytics Key',
        related='website_id.google_analytics_key',
        readonly=False)
    google_management_client_id = fields.Char(
        'Google Client ID',
        related='website_id.google_management_client_id',
        readonly=False)
    google_management_client_secret = fields.Char(
        'Google Client Secret',
        related='website_id.google_management_client_secret',
        readonly=False)

    cdn_activated = fields.Boolean(related='website_id.cdn_activated',
                                   readonly=False)
    cdn_url = fields.Char(related='website_id.cdn_url', readonly=False)
    cdn_filters = fields.Text(related='website_id.cdn_filters', readonly=False)
    module_website_version = fields.Boolean("A/B Testing")
    module_website_links = fields.Boolean("Link Trackers")
    auth_signup_uninvited = fields.Selection(compute="_compute_auth_signup",
                                             inverse="_set_auth_signup")

    social_twitter = fields.Char(related='website_id.social_twitter',
                                 readonly=False)
    social_facebook = fields.Char(related='website_id.social_facebook',
                                  readonly=False)
    social_github = fields.Char(related='website_id.social_github',
                                readonly=False)
    social_linkedin = fields.Char(related='website_id.social_linkedin',
                                  readonly=False)
    social_youtube = fields.Char(related='website_id.social_youtube',
                                 readonly=False)
    social_instagram = fields.Char(related='website_id.social_instagram',
                                   readonly=False)

    @api.depends('website_id', 'social_twitter', 'social_facebook',
                 'social_github', 'social_linkedin', 'social_youtube',
                 'social_instagram')
    def has_social_network(self):
        self.has_social_network = self.social_twitter or self.social_facebook or self.social_github \
            or self.social_linkedin or self.social_youtube or self.social_instagram

    def inverse_has_social_network(self):
        if not self.has_social_network:
            self.social_twitter = ''
            self.social_facebook = ''
            self.social_github = ''
            self.social_linkedin = ''
            self.social_youtube = ''
            self.social_instagram = ''

    has_social_network = fields.Boolean("Configure Social Network",
                                        compute=has_social_network,
                                        inverse=inverse_has_social_network)

    favicon = fields.Binary('Favicon',
                            related='website_id.favicon',
                            readonly=False)
    social_default_image = fields.Binary(
        'Default Social Share Image',
        related='website_id.social_default_image',
        readonly=False)

    google_maps_api_key = fields.Char(related='website_id.google_maps_api_key',
                                      readonly=False)
    group_multi_website = fields.Boolean(
        "Multi-website", implied_group="website.group_multi_website")

    @api.depends('website_id.auth_signup_uninvited')
    def _compute_auth_signup(self):
        for config in self:
            config.auth_signup_uninvited = config.website_id.auth_signup_uninvited

    def _set_auth_signup(self):
        for config in self:
            config.website_id.auth_signup_uninvited = config.auth_signup_uninvited

    @api.depends('website_id')
    def has_google_analytics(self):
        self.has_google_analytics = bool(self.google_analytics_key)

    @api.depends('website_id')
    def has_google_analytics_dashboard(self):
        self.has_google_analytics_dashboard = bool(
            self.google_management_client_id)

    @api.depends('website_id')
    def has_google_maps(self):
        self.has_google_maps = bool(self.google_maps_api_key)

    def inverse_has_google_analytics(self):
        if not self.has_google_analytics:
            self.has_google_analytics_dashboard = False
            self.google_analytics_key = False

    def inverse_has_google_maps(self):
        if not self.has_google_maps:
            self.google_maps_api_key = False

    def inverse_has_google_analytics_dashboard(self):
        if not self.has_google_analytics_dashboard:
            self.google_management_client_id = False
            self.google_management_client_secret = False

    has_google_analytics = fields.Boolean("Google Analytics",
                                          compute=has_google_analytics,
                                          inverse=inverse_has_google_analytics)
    has_google_analytics_dashboard = fields.Boolean(
        "Google Analytics Dashboard",
        compute=has_google_analytics_dashboard,
        inverse=inverse_has_google_analytics_dashboard)
    has_google_maps = fields.Boolean("Google Maps",
                                     compute=has_google_maps,
                                     inverse=inverse_has_google_maps)

    @api.onchange('language_ids')
    def _onchange_language_ids(self):
        # If current default language is removed from language_ids
        # update the website_default_lang_id
        language_ids = self.language_ids._origin
        if not language_ids:
            self.website_default_lang_id = False
        elif self.website_default_lang_id not in language_ids:
            self.website_default_lang_id = language_ids[0]

    @api.depends('language_ids')
    def _compute_website_language_count(self):
        for config in self:
            config.website_language_count = len(self.language_ids)

    def set_values(self):
        super(ResConfigSettings, self).set_values()

    def open_template_user(self):
        action = self.env.ref('base.action_res_users').read()[0]
        action['res_id'] = literal_eval(
            self.env['ir.config_parameter'].sudo().get_param(
                'base.template_portal_user_id', 'False'))
        action['views'] = [[self.env.ref('base.view_users_form').id, 'form']]
        return action

    def website_go_to(self):
        self.website_id._force()
        return {
            'type': 'ir.actions.act_url',
            'url': '/',
            'target': 'self',
        }

    def action_website_create_new(self):
        return {
            'view_mode': 'form',
            'view_id': self.env.ref('website.view_website_form').id,
            'res_model': 'website',
            'type': 'ir.actions.act_window',
            'target': 'new',
            'res_id': False,
        }
Beispiel #19
0
class Module(models.Model):
    _name = "ir.module.module"
    _rec_name = "shortdesc"
    _description = "Module"
    _order = 'sequence,name'

    @api.model
    def fields_view_get(self,
                        view_id=None,
                        view_type='form',
                        toolbar=False,
                        submenu=False):
        res = super(Module, self).fields_view_get(view_id,
                                                  view_type,
                                                  toolbar=toolbar,
                                                  submenu=False)
        if view_type == 'form' and res.get('toolbar', False):
            install_id = self.env.ref(
                'base.action_server_module_immediate_install').id
            action = [
                rec for rec in res['toolbar']['action']
                if rec.get('id', False) != install_id
            ]
            res['toolbar'] = {'action': action}
        return res

    @classmethod
    def get_module_info(cls, name):
        try:
            return modules.load_information_from_description_file(name)
        except Exception:
            _logger.debug(
                'Error when trying to fetch information for module %s',
                name,
                exc_info=True)
            return {}

    @api.depends('name', 'description')
    def _get_desc(self):
        for module in self:
            path = modules.get_module_resource(
                module.name, 'static/description/index.html')
            if path:
                with tools.file_open(path, 'rb') as desc_file:
                    doc = desc_file.read()
                    html = lxml.html.document_fromstring(doc)
                    for element, attribute, link, pos in html.iterlinks():
                        if element.get('src') and not '//' in element.get(
                                'src') and not 'static/' in element.get('src'):
                            element.set(
                                'src', "/%s/static/description/%s" %
                                (module.name, element.get('src')))
                    module.description_html = tools.html_sanitize(
                        lxml.html.tostring(html))
            else:
                overrides = {
                    'embed_stylesheet': False,
                    'doctitle_xform': False,
                    'output_encoding': 'unicode',
                    'xml_declaration': False,
                    'file_insertion_enabled': False,
                }
                output = publish_string(
                    source=module.description
                    if not module.application and module.description else '',
                    settings_overrides=overrides,
                    writer=MyWriter())
                module.description_html = tools.html_sanitize(output)

    @api.depends('name')
    def _get_latest_version(self):
        default_version = modules.adapt_version('1.0')
        for module in self:
            module.installed_version = self.get_module_info(module.name).get(
                'version', default_version)

    @api.depends('name', 'state')
    def _get_views(self):
        IrModelData = self.env['ir.model.data'].with_context(active_test=True)
        dmodels = ['ir.ui.view', 'ir.actions.report', 'ir.ui.menu']

        for module in self:
            # Skip uninstalled modules below, no data to find anyway.
            if module.state not in ('installed', 'to upgrade', 'to remove'):
                module.views_by_module = ""
                module.reports_by_module = ""
                module.menus_by_module = ""
                continue

            # then, search and group ir.model.data records
            imd_models = defaultdict(list)
            imd_domain = [('module', '=', module.name),
                          ('model', 'in', tuple(dmodels))]
            for data in IrModelData.sudo().search(imd_domain):
                imd_models[data.model].append(data.res_id)

            def browse(model):
                # as this method is called before the module update, some xmlid
                # may be invalid at this stage; explictly filter records before
                # reading them
                return self.env[model].browse(imd_models[model]).exists()

            def format_view(v):
                return '%s%s (%s)' % (v.inherit_id and '* INHERIT '
                                      or '', v.name, v.type)

            module.views_by_module = "\n".join(
                sorted(format_view(v) for v in browse('ir.ui.view')))
            module.reports_by_module = "\n".join(
                sorted(r.name for r in browse('ir.actions.report')))
            module.menus_by_module = "\n".join(
                sorted(m.complete_name for m in browse('ir.ui.menu')))

    @api.depends('icon')
    def _get_icon_image(self):
        for module in self:
            module.icon_image = ''
            if module.icon:
                path_parts = module.icon.split('/')
                path = modules.get_module_resource(path_parts[1],
                                                   *path_parts[2:])
            else:
                path = modules.module.get_module_icon(module.name)
            if path:
                with tools.file_open(path, 'rb') as image_file:
                    module.icon_image = base64.b64encode(image_file.read())

    name = fields.Char('Technical Name',
                       readonly=True,
                       required=True,
                       index=True)
    category_id = fields.Many2one('ir.module.category',
                                  string='Category',
                                  readonly=True,
                                  index=True)
    shortdesc = fields.Char('Module Name', readonly=True, translate=True)
    summary = fields.Char('Summary', readonly=True, translate=True)
    description = fields.Text('Description', readonly=True, translate=True)
    description_html = fields.Html('Description HTML', compute='_get_desc')
    author = fields.Char("Author", readonly=True)
    maintainer = fields.Char('Maintainer', readonly=True)
    contributors = fields.Text('Contributors', readonly=True)
    website = fields.Char("Website", readonly=True)

    # attention: Incorrect field names !!
    #   installed_version refers the latest version (the one on disk)
    #   latest_version refers the installed version (the one in database)
    #   published_version refers the version available on the repository
    installed_version = fields.Char('Latest Version',
                                    compute='_get_latest_version')
    latest_version = fields.Char('Installed Version', readonly=True)
    published_version = fields.Char('Published Version', readonly=True)

    url = fields.Char('URL', readonly=True)
    sequence = fields.Integer('Sequence', default=100)
    dependencies_id = fields.One2many('ir.module.module.dependency',
                                      'module_id',
                                      string='Dependencies',
                                      readonly=True)
    exclusion_ids = fields.One2many('ir.module.module.exclusion',
                                    'module_id',
                                    string='Exclusions',
                                    readonly=True)
    auto_install = fields.Boolean(
        'Automatic Installation',
        help='An auto-installable module is automatically installed by the '
        'system when all its dependencies are satisfied. '
        'If the module has no dependency, it is always installed.')
    state = fields.Selection(STATES,
                             string='Status',
                             default='uninstallable',
                             readonly=True,
                             index=True)
    demo = fields.Boolean('Demo Data', default=False, readonly=True)
    license = fields.Selection(
        [('GPL-2', 'GPL Version 2'),
         ('GPL-2 or any later version', 'GPL-2 or later version'),
         ('GPL-3', 'GPL Version 3'),
         ('GPL-3 or any later version', 'GPL-3 or later version'),
         ('AGPL-3', 'Affero GPL-3'), ('LGPL-3', 'LGPL Version 3'),
         ('Other OSI approved licence', 'Other OSI Approved License'),
         ('OEEL-1', 'Harpiya Enterprise Edition License v1.0'),
         ('OPL-1', 'Harpiya Proprietary License v1.0'),
         ('Other proprietary', 'Other Proprietary')],
        string='License',
        default='LGPL-3',
        readonly=True)
    menus_by_module = fields.Text(string='Menus',
                                  compute='_get_views',
                                  store=True)
    reports_by_module = fields.Text(string='Reports',
                                    compute='_get_views',
                                    store=True)
    views_by_module = fields.Text(string='Views',
                                  compute='_get_views',
                                  store=True)
    application = fields.Boolean('Application', readonly=True)
    icon = fields.Char('Icon URL')
    icon_image = fields.Binary(string='Icon', compute='_get_icon_image')
    to_buy = fields.Boolean('Harpiya Enterprise Module', default=False)

    _sql_constraints = [
        ('name_uniq', 'UNIQUE (name)',
         'The name of the module must be unique!'),
    ]

    def unlink(self):
        if not self:
            return True
        for module in self:
            if module.state in ('installed', 'to upgrade', 'to remove',
                                'to install'):
                raise UserError(
                    _('You are trying to remove a module that is installed or will be installed.'
                      ))
        self.clear_caches()
        return super(Module, self).unlink()

    @staticmethod
    def _check_python_external_dependency(pydep):
        try:
            pkg_resources.get_distribution(pydep)
        except pkg_resources.DistributionNotFound as e:
            try:
                importlib.import_module(pydep)
                _logger.info(
                    "python external dependency on '%s' does not appear to be a valid PyPI package. Using a PyPI package name is recommended.",
                    pydep)
            except ImportError:
                # backward compatibility attempt failed
                _logger.warning("DistributionNotFound: %s", e)
                raise Exception('Python library not installed: %s' % (pydep, ))
        except pkg_resources.VersionConflict as e:
            _logger.warning("VersionConflict: %s", e)
            raise Exception('Python library version conflict: %s' % (pydep, ))
        except Exception as e:
            _logger.warning("get_distribution(%s) failed: %s", pydep, e)
            raise Exception('Error finding python library %s' % (pydep, ))

    @staticmethod
    def _check_external_dependencies(terp):
        depends = terp.get('external_dependencies')
        if not depends:
            return
        for pydep in depends.get('python', []):
            Module._check_python_external_dependency(pydep)

        for binary in depends.get('bin', []):
            try:
                tools.find_in_path(binary)
            except IOError:
                raise Exception('Unable to find %r in path' % (binary, ))

    @classmethod
    def check_external_dependencies(cls, module_name, newstate='to install'):
        terp = cls.get_module_info(module_name)
        try:
            cls._check_external_dependencies(terp)
        except Exception as e:
            if newstate == 'to install':
                msg = _(
                    'Unable to install module "%s" because an external dependency is not met: %s'
                )
            elif newstate == 'to upgrade':
                msg = _(
                    'Unable to upgrade module "%s" because an external dependency is not met: %s'
                )
            else:
                msg = _(
                    'Unable to process module "%s" because an external dependency is not met: %s'
                )
            raise UserError(msg % (module_name, e.args[0]))

    def _state_update(self, newstate, states_to_update, level=100):
        if level < 1:
            raise UserError(_('Recursion error in modules dependencies !'))

        # whether some modules are installed with demo data
        demo = False

        for module in self:
            # determine dependency modules to update/others
            update_mods, ready_mods = self.browse(), self.browse()
            for dep in module.dependencies_id:
                if dep.state == 'unknown':
                    raise UserError(
                        _("You try to install module '%s' that depends on module '%s'.\nBut the latter module is not available in your system."
                          ) % (
                              module.name,
                              dep.name,
                          ))
                if dep.depend_id.state == newstate:
                    ready_mods += dep.depend_id
                else:
                    update_mods += dep.depend_id

            # update dependency modules that require it, and determine demo for module
            update_demo = update_mods._state_update(newstate,
                                                    states_to_update,
                                                    level=level - 1)
            module_demo = module.demo or update_demo or any(
                mod.demo for mod in ready_mods)
            demo = demo or module_demo

            # check dependencies and update module itself
            self.check_external_dependencies(module.name, newstate)
            if module.state in states_to_update:
                module.write({'state': newstate, 'demo': module_demo})

        return demo

    @assert_log_admin_access
    def button_install(self):
        # domain to select auto-installable (but not yet installed) modules
        auto_domain = [('state', '=', 'uninstalled'),
                       ('auto_install', '=', True)]

        # determine whether an auto-install module must be installed:
        #  - all its dependencies are installed or to be installed,
        #  - at least one dependency is 'to install'
        install_states = frozenset(('installed', 'to install', 'to upgrade'))

        def must_install(module):
            states = {
                dep.state
                for dep in module.dependencies_id if dep.auto_install_required
            }
            return states <= install_states and 'to install' in states

        modules = self
        while modules:
            # Mark the given modules and their dependencies to be installed.
            modules._state_update('to install', ['uninstalled'])

            # Determine which auto-installable modules must be installed.
            modules = self.search(auto_domain).filtered(must_install)

        # the modules that are installed/to install/to upgrade
        install_mods = self.search([('state', 'in', list(install_states))])

        # check individual exclusions
        install_names = {module.name for module in install_mods}
        for module in install_mods:
            for exclusion in module.exclusion_ids:
                if exclusion.name in install_names:
                    msg = _('Modules "%s" and "%s" are incompatible.')
                    raise UserError(
                        msg %
                        (module.shortdesc, exclusion.exclusion_id.shortdesc))

        # check category exclusions
        def closure(module):
            todo = result = module
            while todo:
                result |= todo
                todo = todo.dependencies_id.depend_id
            return result

        exclusives = self.env['ir.module.category'].search([('exclusive', '=',
                                                             True)])
        for category in exclusives:
            # retrieve installed modules in category and sub-categories
            categories = category.search([('id', 'child_of', category.ids)])
            modules = install_mods.filtered(
                lambda mod: mod.category_id in categories)
            # the installation is valid if all installed modules in categories
            # belong to the transitive dependencies of one of them
            if modules and not any(modules <= closure(module)
                                   for module in modules):
                msg = _(
                    'You are trying to install incompatible modules in category "%s":'
                )
                labels = dict(self.fields_get(['state'])['state']['selection'])
                raise UserError("\n".join([msg % category.name] + [
                    "- %s (%s)" % (module.shortdesc, labels[module.state])
                    for module in modules
                ]))

        return dict(ACTION_DICT, name=_('Install'))

    @assert_log_admin_access
    def button_immediate_install(self):
        """ Installs the selected module(s) immediately and fully,
        returns the next res.config action to execute

        :returns: next res.config item to execute
        :rtype: dict[str, object]
        """
        _logger.info('User #%d triggered module installation', self.env.uid)
        # We use here the request object (which is thread-local) as a kind of
        # "global" env because the env is not usable in the following use case.
        # When installing a Chart of Account, I would like to send the
        # allowed companies to configure it on the correct company.
        # Otherwise, the SUPERUSER won't be aware of that and will try to
        # configure the CoA on his own company, which makes no sense.
        if request:
            request.allowed_company_ids = self.env.companies.ids
        return self._button_immediate_function(type(self).button_install)

    @assert_log_admin_access
    def button_install_cancel(self):
        self.write({'state': 'uninstalled', 'demo': False})
        return True

    @assert_log_admin_access
    def module_uninstall(self):
        """ Perform the various steps required to uninstall a module completely
        including the deletion of all database structures created by the module:
        tables, columns, constraints, etc.
        """
        modules_to_remove = self.mapped('name')
        self.env['ir.model.data']._module_data_uninstall(modules_to_remove)
        # we deactivate prefetching to not try to read a column that has been deleted
        self.with_context(prefetch_fields=False).write({
            'state': 'uninstalled',
            'latest_version': False
        })
        return True

    def _remove_copied_views(self):
        """ Remove the copies of the views installed by the modules in `self`.

        Those copies do not have an external id so they will not be cleaned by
        `_module_data_uninstall`. This is why we rely on `key` instead.

        It is important to remove these copies because using them will crash if
        they rely on data that don't exist anymore if the module is removed.
        """
        domain = expression.OR([[('key', '=like', m.name + '.%')]
                                for m in self])
        orphans = self.env['ir.ui.view'].with_context(
            **{
                'active_test': False,
                MODULE_UNINSTALL_FLAG: True
            }).search(domain)
        orphans.unlink()

    @api.returns('self')
    def downstream_dependencies(self,
                                known_deps=None,
                                exclude_states=('uninstalled', 'uninstallable',
                                                'to remove')):
        """ Return the modules that directly or indirectly depend on the modules
        in `self`, and that satisfy the `exclude_states` filter.
        """
        if not self:
            return self
        known_deps = known_deps or self.browse()
        query = """ SELECT DISTINCT m.id
                    FROM ir_module_module_dependency d
                    JOIN ir_module_module m ON (d.module_id=m.id)
                    WHERE
                        d.name IN (SELECT name from ir_module_module where id in %s) AND
                        m.state NOT IN %s AND
                        m.id NOT IN %s """
        self._cr.execute(query, (tuple(self.ids), tuple(exclude_states),
                                 tuple(known_deps.ids or self.ids)))
        new_deps = self.browse([row[0] for row in self._cr.fetchall()])
        missing_mods = new_deps - known_deps
        known_deps |= new_deps
        if missing_mods:
            known_deps |= missing_mods.downstream_dependencies(
                known_deps, exclude_states)
        return known_deps

    @api.returns('self')
    def upstream_dependencies(self,
                              known_deps=None,
                              exclude_states=('installed', 'uninstallable',
                                              'to remove')):
        """ Return the dependency tree of modules of the modules in `self`, and
        that satisfy the `exclude_states` filter.
        """
        if not self:
            return self
        known_deps = known_deps or self.browse()
        query = """ SELECT DISTINCT m.id
                    FROM ir_module_module_dependency d
                    JOIN ir_module_module m ON (d.module_id=m.id)
                    WHERE
                        m.name IN (SELECT name from ir_module_module_dependency where module_id in %s) AND
                        m.state NOT IN %s AND
                        m.id NOT IN %s """
        self._cr.execute(query, (tuple(self.ids), tuple(exclude_states),
                                 tuple(known_deps.ids or self.ids)))
        new_deps = self.browse([row[0] for row in self._cr.fetchall()])
        missing_mods = new_deps - known_deps
        known_deps |= new_deps
        if missing_mods:
            known_deps |= missing_mods.upstream_dependencies(
                known_deps, exclude_states)
        return known_deps

    def next(self):
        """
        Return the action linked to an ir.actions.todo is there exists one that
        should be executed. Otherwise, redirect to /web
        """
        Todos = self.env['ir.actions.todo']
        _logger.info('getting next %s', Todos)
        active_todo = Todos.search([('state', '=', 'open')], limit=1)
        if active_todo:
            _logger.info('next action is "%s"', active_todo.name)
            return active_todo.action_launch()
        return {
            'type': 'ir.actions.act_url',
            'target': 'self',
            'url': '/web',
        }

    def _button_immediate_function(self, function):
        try:
            # This is done because the installation/uninstallation/upgrade can modify a currently
            # running cron job and prevent it from finishing, and since the ir_cron table is locked
            # during execution, the lock won't be released until timeout.
            self._cr.execute("SELECT * FROM ir_cron FOR UPDATE NOWAIT")
        except psycopg2.OperationalError:
            raise UserError(
                _("The server is busy right now, module operations are not possible at"
                  " this time, please try again later."))
        function(self)

        self._cr.commit()
        api.Environment.reset()
        modules.registry.Registry.new(self._cr.dbname, update_module=True)

        self._cr.commit()
        env = api.Environment(self._cr, self._uid, self._context)
        # pylint: disable=next-method-called
        config = env['ir.module.module'].next() or {}
        if config.get('type') not in ('ir.actions.act_window_close', ):
            return config

        # reload the client; open the first available root menu
        menu = env['ir.ui.menu'].search([('parent_id', '=', False)])[:1]
        return {
            'type': 'ir.actions.client',
            'tag': 'reload',
            'params': {
                'menu_id': menu.id
            },
        }

    @assert_log_admin_access
    def button_immediate_uninstall(self):
        """
        Uninstall the selected module(s) immediately and fully,
        returns the next res.config action to execute
        """
        _logger.info('User #%d triggered module uninstallation', self.env.uid)
        return self._button_immediate_function(type(self).button_uninstall)

    @assert_log_admin_access
    def button_uninstall(self):
        if 'base' in self.mapped('name'):
            raise UserError(_("The `base` module cannot be uninstalled"))
        if not all(state in ('installed', 'to upgrade')
                   for state in self.mapped('state')):
            raise UserError(
                _("One or more of the selected modules have already been uninstalled, if you "
                  "believe this to be an error, you may try again later or contact support."
                  ))
        deps = self.downstream_dependencies()
        (self + deps).write({'state': 'to remove'})
        return dict(ACTION_DICT, name=_('Uninstall'))

    @assert_log_admin_access
    def button_uninstall_wizard(self):
        """ Launch the wizard to uninstall the given module. """
        return {
            'type': 'ir.actions.act_window',
            'target': 'new',
            'name': _('Uninstall module'),
            'view_mode': 'form',
            'res_model': 'base.module.uninstall',
            'context': {
                'default_module_id': self.id
            },
        }

    def button_uninstall_cancel(self):
        self.write({'state': 'installed'})
        return True

    @assert_log_admin_access
    def button_immediate_upgrade(self):
        """
        Upgrade the selected module(s) immediately and fully,
        return the next res.config action to execute
        """
        return self._button_immediate_function(type(self).button_upgrade)

    @assert_log_admin_access
    def button_upgrade(self):
        Dependency = self.env['ir.module.module.dependency']
        self.update_list()

        todo = list(self)
        i = 0
        while i < len(todo):
            module = todo[i]
            i += 1
            if module.state not in ('installed', 'to upgrade'):
                raise UserError(
                    _("Can not upgrade module '%s'. It is not installed.") %
                    (module.name, ))
            self.check_external_dependencies(module.name, 'to upgrade')
            for dep in Dependency.search([('name', '=', module.name)]):
                if dep.module_id.state == 'installed' and dep.module_id not in todo:
                    todo.append(dep.module_id)

        self.browse(module.id
                    for module in todo).write({'state': 'to upgrade'})

        to_install = []
        for module in todo:
            for dep in module.dependencies_id:
                if dep.state == 'unknown':
                    raise UserError(
                        _('You try to upgrade the module %s that depends on the module: %s.\nBut this module is not available in your system.'
                          ) % (
                              module.name,
                              dep.name,
                          ))
                if dep.state == 'uninstalled':
                    to_install += self.search([('name', '=', dep.name)]).ids

        self.browse(to_install).button_install()
        return dict(ACTION_DICT, name=_('Apply Schedule Upgrade'))

    @assert_log_admin_access
    def button_upgrade_cancel(self):
        self.write({'state': 'installed'})
        return True

    @staticmethod
    def get_values_from_terp(terp):
        return {
            'description': terp.get('description', ''),
            'shortdesc': terp.get('name', ''),
            'author': terp.get('author', 'Unknown'),
            'maintainer': terp.get('maintainer', False),
            'contributors': ', '.join(terp.get('contributors', [])) or False,
            'website': terp.get('website', ''),
            'license': terp.get('license', 'LGPL-3'),
            'sequence': terp.get('sequence', 100),
            'application': terp.get('application', False),
            'auto_install': terp.get('auto_install', False) is not False,
            'icon': terp.get('icon', False),
            'summary': terp.get('summary', ''),
            'url': terp.get('url') or terp.get('live_test_url', ''),
            'to_buy': False
        }

    @api.model
    def create(self, vals):
        new = super(Module, self).create(vals)
        module_metadata = {
            'name': 'module_%s' % vals['name'],
            'model': 'ir.module.module',
            'module': 'base',
            'res_id': new.id,
            'noupdate': True,
        }
        self.env['ir.model.data'].create(module_metadata)
        return new

    # update the list of available packages
    @assert_log_admin_access
    @api.model
    def update_list(self):
        res = [0, 0]  # [update, add]

        default_version = modules.adapt_version('1.0')
        known_mods = self.with_context(lang=None).search([])
        known_mods_names = {mod.name: mod for mod in known_mods}

        # iterate through detected modules and update/create them in db
        for mod_name in modules.get_modules():
            mod = known_mods_names.get(mod_name)
            terp = self.get_module_info(mod_name)
            values = self.get_values_from_terp(terp)

            if mod:
                updated_values = {}
                for key in values:
                    old = getattr(mod, key)
                    if (old or values[key]) and values[key] != old:
                        updated_values[key] = values[key]
                if terp.get('installable',
                            True) and mod.state == 'uninstallable':
                    updated_values['state'] = 'uninstalled'
                if parse_version(terp.get(
                        'version', default_version)) > parse_version(
                            mod.latest_version or default_version):
                    res[0] += 1
                if updated_values:
                    mod.write(updated_values)
            else:
                mod_path = modules.get_module_path(mod_name)
                if not mod_path or not terp:
                    continue
                state = "uninstalled" if terp.get('installable',
                                                  True) else "uninstallable"
                mod = self.create(dict(name=mod_name, state=state, **values))
                res[1] += 1

            mod._update_dependencies(terp.get('depends', []),
                                     terp.get('auto_install'))
            mod._update_exclusions(terp.get('excludes', []))
            mod._update_category(terp.get('category', 'Uncategorized'))

        return res

    @assert_log_admin_access
    def download(self, download=True):
        return []

    @assert_log_admin_access
    @api.model
    def install_from_urls(self, urls):
        if not self.env.user.has_group('base.group_system'):
            raise AccessDenied()

        # One-click install is opt-in - cfr Issue #15225
        ad_dir = tools.config.addons_data_dir
        if not os.access(ad_dir, os.W_OK):
            msg = (_(
                "Automatic install of downloaded Apps is currently disabled."
            ) + "\n\n" + _(
                "To enable it, make sure this directory exists and is writable on the server:"
            ) + "\n%s" % ad_dir)
            _logger.warning(msg)
            raise UserError(msg)

        apps_server = urls.url_parse(self.get_apps_server())

        OPENERP = harpiya.release.product_name.lower()
        tmp = tempfile.mkdtemp()
        _logger.debug('Install from url: %r', urls)
        try:
            # 1. Download & unzip missing modules
            for module_name, url in urls.items():
                if not url:
                    continue  # nothing to download, local version is already the last one

                up = urls.url_parse(url)
                if up.scheme != apps_server.scheme or up.netloc != apps_server.netloc:
                    raise AccessDenied()

                try:
                    _logger.info('Downloading module `%s` from OpenERP Apps',
                                 module_name)
                    response = requests.get(url)
                    response.raise_for_status()
                    content = response.content
                except Exception:
                    _logger.exception('Failed to fetch module %s', module_name)
                    raise UserError(
                        _('The `%s` module appears to be unavailable at the moment, please try again later.'
                          ) % module_name)
                else:
                    zipfile.ZipFile(io.BytesIO(content)).extractall(tmp)
                    assert os.path.isdir(os.path.join(tmp, module_name))

            # 2a. Copy/Replace module source in addons path
            for module_name, url in urls.items():
                if module_name == OPENERP or not url:
                    continue  # OPENERP is special case, handled below, and no URL means local module
                module_path = modules.get_module_path(module_name,
                                                      downloaded=True,
                                                      display_warning=False)
                bck = backup(module_path, False)
                _logger.info('Copy downloaded module `%s` to `%s`',
                             module_name, module_path)
                shutil.move(os.path.join(tmp, module_name), module_path)
                if bck:
                    shutil.rmtree(bck)

            # 2b.  Copy/Replace server+base module source if downloaded
            if urls.get(OPENERP):
                # special case. it contains the server and the base module.
                # extract path is not the same
                base_path = os.path.dirname(modules.get_module_path('base'))

                # copy all modules in the SERVER/harpiya/addons directory to the new "harpiya" module (except base itself)
                for d in os.listdir(base_path):
                    if d != 'base' and os.path.isdir(os.path.join(
                            base_path, d)):
                        destdir = os.path.join(
                            tmp, OPENERP, 'addons',
                            d)  # XXX 'harpiya' subdirectory ?
                        shutil.copytree(os.path.join(base_path, d), destdir)

                # then replace the server by the new "base" module
                server_dir = tools.config['root_path']  # XXX or dirname()
                bck = backup(server_dir)
                _logger.info('Copy downloaded module `harpiya` to `%s`',
                             server_dir)
                shutil.move(os.path.join(tmp, OPENERP), server_dir)
                #if bck:
                #    shutil.rmtree(bck)

            self.update_list()

            with_urls = [
                module_name for module_name, url in urls.items() if url
            ]
            downloaded = self.search([('name', 'in', with_urls)])
            installed = self.search([('id', 'in', downloaded.ids),
                                     ('state', '=', 'installed')])

            to_install = self.search([('name', 'in', list(urls)),
                                      ('state', '=', 'uninstalled')])
            post_install_action = to_install.button_immediate_install()

            if installed or to_install:
                # in this case, force server restart to reload python code...
                self._cr.commit()
                harpiya.service.server.restart()
                return {
                    'type': 'ir.actions.client',
                    'tag': 'home',
                    'params': {
                        'wait': True
                    },
                }
            return post_install_action

        finally:
            shutil.rmtree(tmp)

    @api.model
    def get_apps_server(self):
        return tools.config.get('apps_server', 'https://apps.harpiya.com/apps')

    def _update_dependencies(self, depends=None, auto_install_requirements=()):
        existing = set(dep.name for dep in self.dependencies_id)
        needed = set(depends or [])
        for dep in (needed - existing):
            self._cr.execute(
                'INSERT INTO ir_module_module_dependency (module_id, name) values (%s, %s)',
                (self.id, dep))
        for dep in (existing - needed):
            self._cr.execute(
                'DELETE FROM ir_module_module_dependency WHERE module_id = %s and name = %s',
                (self.id, dep))
        self._cr.execute(
            'UPDATE ir_module_module_dependency SET auto_install_required = (name = any(%s)) WHERE module_id = %s',
            (list(auto_install_requirements or ()), self.id))
        self.invalidate_cache(['dependencies_id'], self.ids)

    def _update_exclusions(self, excludes=None):
        existing = set(excl.name for excl in self.exclusion_ids)
        needed = set(excludes or [])
        for name in (needed - existing):
            self._cr.execute(
                'INSERT INTO ir_module_module_exclusion (module_id, name) VALUES (%s, %s)',
                (self.id, name))
        for name in (existing - needed):
            self._cr.execute(
                'DELETE FROM ir_module_module_exclusion WHERE module_id=%s AND name=%s',
                (self.id, name))
        self.invalidate_cache(['exclusion_ids'], self.ids)

    def _update_category(self, category='Uncategorized'):
        current_category = self.category_id
        current_category_path = []
        while current_category:
            current_category_path.insert(0, current_category.name)
            current_category = current_category.parent_id

        categs = category.split('/')
        if categs != current_category_path:
            cat_id = modules.db.create_categories(self._cr, categs)
            self.write({'category_id': cat_id})

    def _update_translations(self, filter_lang=None):
        if not filter_lang:
            langs = self.env['res.lang'].get_installed()
            filter_lang = [code for code, _ in langs]
        elif not isinstance(filter_lang, (list, tuple)):
            filter_lang = [filter_lang]

        update_mods = self.filtered(lambda r: r.state in
                                    ('installed', 'to install', 'to upgrade'))
        mod_dict = {
            mod.name: mod.dependencies_id.mapped('name')
            for mod in update_mods
        }
        mod_names = topological_sort(mod_dict)
        self.env['ir.translation']._load_module_terms(mod_names, filter_lang)

    def _check(self):
        for module in self:
            if not module.description_html:
                _logger.warning('module %s: description is empty !',
                                module.name)

    @api.model
    @tools.ormcache()
    def _installed(self):
        """ Return the set of installed modules as a dictionary {name: id} """
        return {
            module.name: module.id
            for module in self.sudo().search([('state', '=', 'installed')])
        }
Beispiel #20
0
class IrUiMenu(models.Model):
    _name = 'ir.ui.menu'
    _description = 'Menu'
    _order = "sequence,id"
    _parent_store = True

    def __init__(self, *args, **kwargs):
        super(IrUiMenu, self).__init__(*args, **kwargs)
        self.pool['ir.model.access'].register_cache_clearing_method(self._name, 'clear_caches')

    name = fields.Char(string='Menu', required=True, translate=True)
    active = fields.Boolean(default=True)
    sequence = fields.Integer(default=10)
    child_id = fields.One2many('ir.ui.menu', 'parent_id', string='Child IDs')
    parent_id = fields.Many2one('ir.ui.menu', string='Parent Menu', index=True, ondelete="restrict")
    parent_path = fields.Char(index=True)
    groups_id = fields.Many2many('res.groups', 'ir_ui_menu_group_rel',
                                 'menu_id', 'gid', string='Groups',
                                 help="If you have groups, the visibility of this menu will be based on these groups. "\
                                      "If this field is empty, Harpiya will compute visibility based on the related object's read access.")
    complete_name = fields.Char(compute='_compute_complete_name', string='Full Path')
    web_icon = fields.Char(string='Web Icon File')
    action = fields.Reference(selection=[('ir.actions.report', 'ir.actions.report'),
                                         ('ir.actions.act_window', 'ir.actions.act_window'),
                                         ('ir.actions.act_url', 'ir.actions.act_url'),
                                         ('ir.actions.server', 'ir.actions.server'),
                                         ('ir.actions.client', 'ir.actions.client')])

    web_icon_data = fields.Binary(string='Web Icon Image', attachment=True)

    @api.depends('name', 'parent_id.complete_name')
    def _compute_complete_name(self):
        for menu in self:
            menu.complete_name = menu._get_full_name()

    def _get_full_name(self, level=6):
        """ Return the full name of ``self`` (up to a certain level). """
        if level <= 0:
            return '...'
        if self.parent_id:
            return self.parent_id._get_full_name(level - 1) + MENU_ITEM_SEPARATOR + (self.name or "")
        else:
            return self.name

    def read_image(self, path):
        if not path:
            return False
        path_info = path.split(',')
        icon_path = get_module_resource(path_info[0], path_info[1])
        icon_image = False
        if icon_path:
            with tools.file_open(icon_path, 'rb') as icon_file:
                icon_image = base64.encodestring(icon_file.read())
        return icon_image

    @api.constrains('parent_id')
    def _check_parent_id(self):
        if not self._check_recursion():
            raise ValidationError(_('Error! You cannot create recursive menus.'))

    @api.model
    @tools.ormcache('frozenset(self.env.user.groups_id.ids)', 'debug')
    def _visible_menu_ids(self, debug=False):
        """ Return the ids of the menu items visible to the user. """
        # retrieve all menus, and determine which ones are visible
        context = {'ir.ui.menu.full_list': True}
        menus = self.with_context(context).search([])

        groups = self.env.user.groups_id
        if not debug:
            groups = groups - self.env.ref('base.group_no_one')
        # first discard all menus with groups the user does not have
        menus = menus.filtered(
            lambda menu: not menu.groups_id or menu.groups_id & groups)

        # take apart menus that have an action
        action_menus = menus.filtered(lambda m: m.action and m.action.exists())
        folder_menus = menus - action_menus
        visible = self.browse()

        # process action menus, check whether their action is allowed
        access = self.env['ir.model.access']
        MODEL_GETTER = {
            'ir.actions.act_window': lambda action: action.res_model,
            'ir.actions.report': lambda action: action.model,
            'ir.actions.server': lambda action: action.model_id.model,
        }
        for menu in action_menus:
            get_model = MODEL_GETTER.get(menu.action._name)
            if not get_model or not get_model(menu.action) or \
                    access.check(get_model(menu.action), 'read', False):
                # make menu visible, and its folder ancestors, too
                visible += menu
                menu = menu.parent_id
                while menu and menu in folder_menus and menu not in visible:
                    visible += menu
                    menu = menu.parent_id

        return set(visible.ids)

    @api.returns('self')
    def _filter_visible_menus(self):
        """ Filter `self` to only keep the menu items that should be visible in
            the menu hierarchy of the current user.
            Uses a cache for speeding up the computation.
        """
        visible_ids = self._visible_menu_ids(request.session.debug if request else False)
        return self.filtered(lambda menu: menu.id in visible_ids)

    @api.model
    def _search(self, args, offset=0, limit=None, order=None, count=False, access_rights_uid=None):
        menu_ids = super(IrUiMenu, self)._search(args, offset=0, limit=None, order=order, count=False, access_rights_uid=access_rights_uid)
        menus = self.browse(menu_ids)
        if menus:
            # menu filtering is done only on main menu tree, not other menu lists
            if not self._context.get('ir.ui.menu.full_list'):
                menus = menus._filter_visible_menus()
            if offset:
                menus = menus[offset:]
            if limit:
                menus = menus[:limit]
        return len(menus) if count else menus.ids

    def name_get(self):
        return [(menu.id, menu._get_full_name()) for menu in self]

    @api.model_create_multi
    def create(self, vals_list):
        self.clear_caches()
        for values in vals_list:
            if 'web_icon' in values:
                values['web_icon_data'] = self._compute_web_icon_data(values.get('web_icon'))
        return super(IrUiMenu, self).create(vals_list)

    def write(self, values):
        self.clear_caches()
        if 'web_icon' in values:
            values['web_icon_data'] = self._compute_web_icon_data(values.get('web_icon'))
        return super(IrUiMenu, self).write(values)

    def _compute_web_icon_data(self, web_icon):
        """ Returns the image associated to `web_icon`.
            `web_icon` can either be:
              - an image icon [module, path]
              - a built icon [icon_class, icon_color, background_color]
            and it only has to call `read_image` if it's an image.
        """
        if web_icon and len(web_icon.split(',')) == 2:
            return self.read_image(web_icon)

    def unlink(self):
        # Detach children and promote them to top-level, because it would be unwise to
        # cascade-delete submenus blindly. We also can't use ondelete=set null because
        # that is not supported when _parent_store is used (would silently corrupt it).
        # TODO: ideally we should move them under a generic "Orphans" menu somewhere?
        extra = {'ir.ui.menu.full_list': True,
                 'active_test': False}
        direct_children = self.with_context(**extra).search([('parent_id', 'in', self.ids)])
        direct_children.write({'parent_id': False})

        self.clear_caches()
        return super(IrUiMenu, self).unlink()

    def copy(self, default=None):
        record = super(IrUiMenu, self).copy(default=default)
        match = NUMBER_PARENS.search(record.name)
        if match:
            next_num = int(match.group(1)) + 1
            record.name = NUMBER_PARENS.sub('(%d)' % next_num, record.name)
        else:
            record.name = record.name + '(1)'
        return record

    @api.model
    @api.returns('self')
    def get_user_roots(self):
        """ Return all root menu ids visible for the user.

        :return: the root menu ids
        :rtype: list(int)
        """
        return self.search([('parent_id', '=', False)])

    @api.model
    @tools.ormcache_context('self._uid', keys=('lang',))
    def load_menus_root(self):
        fields = ['name', 'sequence', 'parent_id', 'action', 'web_icon_data']
        menu_roots = self.get_user_roots()
        menu_roots_data = menu_roots.read(fields) if menu_roots else []

        menu_root = {
            'id': False,
            'name': 'root',
            'parent_id': [-1, ''],
            'children': menu_roots_data,
            'all_menu_ids': menu_roots.ids,
        }

        menu_roots._set_menuitems_xmlids(menu_root)

        return menu_root

    @api.model
    @tools.ormcache_context('self._uid', 'debug', keys=('lang',))
    def load_menus(self, debug):
        """ Loads all menu items (all applications and their sub-menus).

        :return: the menu root
        :rtype: dict('children': menu_nodes)
        """
        fields = ['name', 'sequence', 'parent_id', 'action', 'web_icon', 'web_icon_data']
        menu_roots = self.get_user_roots()
        menu_roots_data = menu_roots.read(fields) if menu_roots else []
        menu_root = {
            'id': False,
            'name': 'root',
            'parent_id': [-1, ''],
            'children': menu_roots_data,
            'all_menu_ids': menu_roots.ids,
        }

        if not menu_roots_data:
            return menu_root

        # menus are loaded fully unlike a regular tree view, cause there are a
        # limited number of items (752 when all 6.1 addons are installed)
        menus = self.search([('id', 'child_of', menu_roots.ids)])
        menu_items = menus.read(fields)

        # add roots at the end of the sequence, so that they will overwrite
        # equivalent menu items from full menu read when put into id:item
        # mapping, resulting in children being correctly set on the roots.
        menu_items.extend(menu_roots_data)
        menu_root['all_menu_ids'] = menus.ids  # includes menu_roots!

        # make a tree using parent_id
        menu_items_map = {menu_item["id"]: menu_item for menu_item in menu_items}
        for menu_item in menu_items:
            parent = menu_item['parent_id'] and menu_item['parent_id'][0]
            if parent in menu_items_map:
                menu_items_map[parent].setdefault(
                    'children', []).append(menu_item)

        # sort by sequence a tree using parent_id
        for menu_item in menu_items:
            menu_item.setdefault('children', []).sort(key=operator.itemgetter('sequence'))

        (menu_roots + menus)._set_menuitems_xmlids(menu_root)

        return menu_root

    def _set_menuitems_xmlids(self, menu_root):
        menuitems = self.env['ir.model.data'].sudo().search([
                ('res_id', 'in', self.ids),
                ('model', '=', 'ir.ui.menu')
            ])

        xmlids = {
            menu.res_id: menu.complete_name
            for menu in menuitems
        }

        def _set_xmlids(tree, xmlids):
            tree['xmlid'] = xmlids.get(tree['id'], '')
            if 'children' in tree:
                for child in tree['children']:
                    _set_xmlids(child, xmlids)

        _set_xmlids(menu_root, xmlids)
Beispiel #21
0
class ProductBrandEpt(models.Model):
    _name = 'product.brand.ept'
    _inherit = ['website.published.multi.mixin']
    _order = 'name'
    _description = 'Product Brand'

    name = fields.Char('Brand Name', required=True, translate=True)
    description = fields.Text('Description', translate=True)
    website_id = fields.Many2one("website", string="Website")
    logo = fields.Binary('Logo File')

    def get_products(self):
        product_ids = self.env['product.template'].search([
            ('product_brand_ept_id', '=', self.id)
        ]).ids
        self.product_ids = product_ids
        for brand in self:
            brand.products_count = len(brand.product_ids)

    product_ids = fields.Many2many('product.template',
                                   string='Brand Products',
                                   help="Add products for this brand",
                                   compute=get_products,
                                   readonly=False)
    products_count = fields.Integer(
        string='Number of products',
        compute='_get_products_count',
        help='It shows the number of product counts',
        store=True)
    is_brand_page = fields.Boolean(
        string='Is Brand Page',
        help="It will set the separate landing page for this brand")
    brand_page = fields.Many2one(
        "website.page",
        string="Brand Page",
        help="Select the brand page which you want to set for this brand.")

    def write(self, values):
        vals = values.get('product_ids')
        if vals:
            old_values = self.product_ids.ids
            for remove_id in old_values:
                product = self.env['product.template'].browse(remove_id)
                product.write({'product_brand_ept_id': [(2, self.id)]})
        result = super(ProductBrandEpt, self).write(values)
        if vals:
            if self.website_published:
                if self.product_ids.ids:
                    for product_id in self.product_ids.ids:
                        product = self.env['product.template'].browse(
                            product_id)
                        product.write({'product_brand_ept_id': self.id})
                else:
                    for remove_id in old_values:
                        product = self.env['product.template'].browse(
                            remove_id)
                        product.write({'product_brand_ept_id': [(2, self.id)]})
        return result

    @api.depends('product_ids')
    def _get_products_count(self):
        """
        Compute The product count of brand
        :return:
        """
        for brand in self:
            brand.products_count = len(brand.product_ids)
Beispiel #22
0
class IrAttachment(models.Model):
    """Attachments are used to link binary files or url to any openerp document.

    External attachment storage
    ---------------------------

    The computed field ``datas`` is implemented using ``_file_read``,
    ``_file_write`` and ``_file_delete``, which can be overridden to implement
    other storage engines. Such methods should check for other location pseudo
    uri (example: hdfs://hadoopserver).

    The default implementation is the file:dirname location that stores files
    on the local filesystem using name based on their sha1 hash
    """
    _name = 'ir.attachment'
    _description = 'Attachment'
    _order = 'id desc'

    def _compute_res_name(self):
        for attachment in self:
            if attachment.res_model and attachment.res_id:
                record = self.env[attachment.res_model].browse(
                    attachment.res_id)
                attachment.res_name = record.display_name
            else:
                attachment.res_name = False

    @api.model
    def _storage(self):
        return self.env['ir.config_parameter'].sudo().get_param(
            'ir_attachment.location', 'file')

    @api.model
    def _filestore(self):
        return config.filestore(self._cr.dbname)

    @api.model
    def force_storage(self):
        """Force all attachments to be stored in the currently configured storage"""
        if not self.env.is_admin():
            raise AccessError(
                _('Only administrators can execute this action.'))

        # domain to retrieve the attachments to migrate
        domain = {
            'db': [('store_fname', '!=', False)],
            'file': [('db_datas', '!=', False)],
        }[self._storage()]

        for attach in self.search(domain):
            attach.write({'datas': attach.datas})
        return True

    @api.model
    def _full_path(self, path):
        # sanitize path
        path = re.sub('[.]', '', path)
        path = path.strip('/\\')
        return os.path.join(self._filestore(), path)

    @api.model
    def _get_path(self, bin_data, sha):
        # retro compatibility
        fname = sha[:3] + '/' + sha
        full_path = self._full_path(fname)
        if os.path.isfile(full_path):
            return fname, full_path  # keep existing path

        # scatter files across 256 dirs
        # we use '/' in the db (even on windows)
        fname = sha[:2] + '/' + sha
        full_path = self._full_path(fname)
        dirname = os.path.dirname(full_path)
        if not os.path.isdir(dirname):
            os.makedirs(dirname)
        return fname, full_path

    @api.model
    def _file_read(self, fname, bin_size=False):
        full_path = self._full_path(fname)
        r = ''
        try:
            if bin_size:
                r = human_size(os.path.getsize(full_path))
            else:
                with open(full_path, 'rb') as fd:
                    r = base64.b64encode(fd.read())
        except (IOError, OSError):
            _logger.info("_read_file reading %s", full_path, exc_info=True)
        return r

    @api.model
    def _file_write(self, value, checksum):
        bin_value = base64.b64decode(value)
        fname, full_path = self._get_path(bin_value, checksum)
        if not os.path.exists(full_path):
            try:
                with open(full_path, 'wb') as fp:
                    fp.write(bin_value)
                # add fname to checklist, in case the transaction aborts
                self._mark_for_gc(fname)
            except IOError:
                _logger.info("_file_write writing %s",
                             full_path,
                             exc_info=True)
        return fname

    @api.model
    def _file_delete(self, fname):
        # simply add fname to checklist, it will be garbage-collected later
        self._mark_for_gc(fname)

    def _mark_for_gc(self, fname):
        """ Add ``fname`` in a checklist for the filestore garbage collection. """
        # we use a spooldir: add an empty file in the subdirectory 'checklist'
        full_path = os.path.join(self._full_path('checklist'), fname)
        if not os.path.exists(full_path):
            dirname = os.path.dirname(full_path)
            if not os.path.isdir(dirname):
                with tools.ignore(OSError):
                    os.makedirs(dirname)
            open(full_path, 'ab').close()

    @api.model
    def _file_gc(self):
        """ Perform the garbage collection of the filestore. """
        if self._storage() != 'file':
            return

        # Continue in a new transaction. The LOCK statement below must be the
        # first one in the current transaction, otherwise the database snapshot
        # used by it may not contain the most recent changes made to the table
        # ir_attachment! Indeed, if concurrent transactions create attachments,
        # the LOCK statement will wait until those concurrent transactions end.
        # But this transaction will not see the new attachements if it has done
        # other requests before the LOCK (like the method _storage() above).
        cr = self._cr
        cr.commit()

        # prevent all concurrent updates on ir_attachment while collecting,
        # but only attempt to grab the lock for a little bit, otherwise it'd
        # start blocking other transactions. (will be retried later anyway)
        cr.execute("SET LOCAL lock_timeout TO '10s'")
        cr.execute("LOCK ir_attachment IN SHARE MODE")

        # retrieve the file names from the checklist
        checklist = {}
        for dirpath, _, filenames in os.walk(self._full_path('checklist')):
            dirname = os.path.basename(dirpath)
            for filename in filenames:
                fname = "%s/%s" % (dirname, filename)
                checklist[fname] = os.path.join(dirpath, filename)

        # determine which files to keep among the checklist
        whitelist = set()
        for names in cr.split_for_in_conditions(checklist):
            cr.execute(
                "SELECT store_fname FROM ir_attachment WHERE store_fname IN %s",
                [names])
            whitelist.update(row[0] for row in cr.fetchall())

        # remove garbage files, and clean up checklist
        removed = 0
        for fname, filepath in checklist.items():
            if fname not in whitelist:
                try:
                    os.unlink(self._full_path(fname))
                    removed += 1
                except (OSError, IOError):
                    _logger.info("_file_gc could not unlink %s",
                                 self._full_path(fname),
                                 exc_info=True)
            with tools.ignore(OSError):
                os.unlink(filepath)

        # commit to release the lock
        cr.commit()
        _logger.info("filestore gc %d checked, %d removed", len(checklist),
                     removed)

    @api.depends('store_fname', 'db_datas')
    def _compute_datas(self):
        bin_size = self._context.get('bin_size')
        for attach in self:
            if attach.store_fname:
                attach.datas = self._file_read(attach.store_fname, bin_size)
            else:
                attach.datas = attach.db_datas

    def _inverse_datas(self):
        for attach in self:
            vals = self._get_datas_related_values(attach.datas,
                                                  attach.mimetype)
            # take current location in filestore to possibly garbage-collect it
            fname = attach.store_fname
            # write as superuser, as user probably does not have write access
            super(IrAttachment, attach.sudo()).write(vals)
            if fname:
                self._file_delete(fname)

    def _get_datas_related_values(self, data, mimetype):
        # compute the fields that depend on datas
        bin_data = base64.b64decode(data) if data else b''
        values = {
            'file_size': len(bin_data),
            'checksum': self._compute_checksum(bin_data),
            'index_content': self._index(bin_data, mimetype),
            'store_fname': False,
            'db_datas': data,
        }
        if data and self._storage() != 'db':
            values['store_fname'] = self._file_write(data, values['checksum'])
            values['db_datas'] = False
        return values

    def _compute_checksum(self, bin_data):
        """ compute the checksum for the given datas
            :param bin_data : datas in its binary form
        """
        # an empty file has a checksum too (for caching)
        return hashlib.sha1(bin_data or b'').hexdigest()

    def _compute_mimetype(self, values):
        """ compute the mimetype of the given values
            :param values : dict of values to create or write an ir_attachment
            :return mime : string indicating the mimetype, or application/octet-stream by default
        """
        mimetype = None
        if values.get('mimetype'):
            mimetype = values['mimetype']
        if not mimetype and values.get('name'):
            mimetype = mimetypes.guess_type(values['name'])[0]
        if not mimetype and values.get('url'):
            mimetype = mimetypes.guess_type(values['url'])[0]
        if values.get('datas') and (not mimetype
                                    or mimetype == 'application/octet-stream'):
            mimetype = guess_mimetype(base64.b64decode(values['datas']))
        return mimetype or 'application/octet-stream'

    def _check_contents(self, values):
        mimetype = values['mimetype'] = self._compute_mimetype(values)
        xml_like = 'ht' in mimetype or 'xml' in mimetype  # hta, html, xhtml, etc.
        user = self.env.context.get('binary_field_real_user', self.env.user)
        force_text = (xml_like and
                      (not user._is_system()
                       or self.env.context.get('attachments_mime_plainxml')))
        if force_text:
            values['mimetype'] = 'text/plain'
        return values

    @api.model
    def _index(self, bin_data, file_type):
        """ compute the index content of the given binary data.
            This is a python implementation of the unix command 'strings'.
            :param bin_data : datas in binary form
            :return index_content : string containing all the printable character of the binary data
        """
        index_content = False
        if file_type:
            index_content = file_type.split('/')[0]
            if index_content == 'text':  # compute index_content only for text type
                words = re.findall(b"[\x20-\x7E]{4,}", bin_data)
                index_content = b"\n".join(words).decode('ascii')
        return index_content

    @api.model
    def get_serving_groups(self):
        """ An ir.attachment record may be used as a fallback in the
        http dispatch if its type field is set to "binary" and its url
        field is set as the request's url. Only the groups returned by
        this method are allowed to create and write on such records.
        """
        return ['base.group_system']

    name = fields.Char('Name', required=True)
    description = fields.Text('Description')
    res_name = fields.Char('Resource Name', compute='_compute_res_name')
    res_model = fields.Char(
        'Resource Model',
        readonly=True,
        help="The database object this attachment will be attached to.")
    res_field = fields.Char('Resource Field', readonly=True)
    res_id = fields.Many2oneReference(
        'Resource ID',
        model_field='res_model',
        readonly=True,
        help="The record id this is attached to.")
    company_id = fields.Many2one('res.company',
                                 string='Company',
                                 change_default=True,
                                 default=lambda self: self.env.company)
    type = fields.Selection(
        [('url', 'URL'), ('binary', 'File')],
        string='Type',
        required=True,
        default='binary',
        change_default=True,
        help=
        "You can either upload a file from your computer or copy/paste an internet link to your file."
    )
    url = fields.Char('Url', index=True, size=1024)
    public = fields.Boolean('Is public document')

    # for external access
    access_token = fields.Char('Access Token', groups="base.group_user")

    # the field 'datas' is computed and may use the other fields below
    datas = fields.Binary(string='File Content',
                          compute='_compute_datas',
                          inverse='_inverse_datas')
    db_datas = fields.Binary('Database Data', attachment=False)
    store_fname = fields.Char('Stored Filename')
    file_size = fields.Integer('File Size', readonly=True)
    checksum = fields.Char("Checksum/SHA1", size=40, index=True, readonly=True)
    mimetype = fields.Char('Mime Type', readonly=True)
    index_content = fields.Text('Indexed Content',
                                readonly=True,
                                prefetch=False)

    def _auto_init(self):
        res = super(IrAttachment, self)._auto_init()
        tools.create_index(self._cr, 'ir_attachment_res_idx', self._table,
                           ['res_model', 'res_id'])
        return res

    @api.constrains('type', 'url')
    def _check_serving_attachments(self):
        if self.env.is_admin():
            return
        for attachment in self:
            # restrict writing on attachments that could be served by the
            # ir.http's dispatch exception handling
            # XDO note: this should be done in check(write), constraints for access rights?
            # XDO note: if read on sudo, read twice, one for constraints, one for _inverse_datas as user
            if attachment.type == 'binary' and attachment.url:
                has_group = self.env.user.has_group
                if not any(
                    [has_group(g) for g in attachment.get_serving_groups()]):
                    raise ValidationError(
                        "Sorry, you are not allowed to write on this document")

    @api.model
    def check(self, mode, values=None):
        """Restricts the access to an ir.attachment, according to referred model
        In the 'document' module, it is overridden to relax this hard rule, since
        more complex ones apply there.
        """
        if self.env.is_superuser():
            return True
        # collect the records to check (by model)
        model_ids = defaultdict(set)  # {model_name: set(ids)}
        require_employee = False
        if self:
            # DLE P173: `test_01_portal_attachment`
            self.env['ir.attachment'].flush(
                ['res_model', 'res_id', 'create_uid', 'public', 'res_field'])
            self._cr.execute(
                'SELECT res_model, res_id, create_uid, public, res_field FROM ir_attachment WHERE id IN %s',
                [tuple(self.ids)])
            for res_model, res_id, create_uid, public, res_field in self._cr.fetchall(
            ):
                if not self.env.is_system() and res_field:
                    raise AccessError(
                        _("Sorry, you are not allowed to access this document."
                          ))
                if public and mode == 'read':
                    continue
                if not (res_model and res_id):
                    if create_uid != self._uid:
                        require_employee = True
                    continue
                model_ids[res_model].add(res_id)
        if values and values.get('res_model') and values.get('res_id'):
            model_ids[values['res_model']].add(values['res_id'])

        # check access rights on the records
        for res_model, res_ids in model_ids.items():
            # ignore attachments that are not attached to a resource anymore
            # when checking access rights (resource was deleted but attachment
            # was not)
            if res_model not in self.env:
                require_employee = True
                continue
            elif res_model == 'res.users' and len(
                    res_ids) == 1 and self._uid == list(res_ids)[0]:
                # by default a user cannot write on itself, despite the list of writeable fields
                # e.g. in the case of a user inserting an image into his image signature
                # we need to bypass this check which would needlessly throw us away
                continue
            records = self.env[res_model].browse(res_ids).exists()
            if len(records) < len(res_ids):
                require_employee = True
            # For related models, check if we can write to the model, as unlinking
            # and creating attachments can be seen as an update to the model
            access_mode = 'write' if mode in ('create', 'unlink') else mode
            records.check_access_rights(access_mode)
            records.check_access_rule(access_mode)

        if require_employee:
            if not (self.env.is_admin()
                    or self.env.user.has_group('base.group_user')):
                raise AccessError(
                    _("Sorry, you are not allowed to access this document."))

    def _read_group_allowed_fields(self):
        return [
            'type', 'company_id', 'res_id', 'create_date', 'create_uid',
            'name', 'mimetype', 'id', 'url', 'res_field', 'res_model'
        ]

    @api.model
    def read_group(self,
                   domain,
                   fields,
                   groupby,
                   offset=0,
                   limit=None,
                   orderby=False,
                   lazy=True):
        """Override read_group to add res_field=False in domain if not present."""
        if not fields:
            raise AccessError(
                _("Sorry, you must provide fields to read on attachments"))
        if any('(' in field for field in fields + groupby):
            raise AccessError(
                _("Sorry, the syntax 'name:agg(field)' is not available for attachments"
                  ))
        if not any(item[0] in ('id', 'res_field') for item in domain):
            domain.insert(0, ('res_field', '=', False))
        groupby = [groupby] if isinstance(groupby, str) else groupby
        allowed_fields = self._read_group_allowed_fields()
        fields_set = set(field.split(':')[0] for field in fields + groupby)
        if not self.env.is_system() and (
                not fields or fields_set.difference(allowed_fields)):
            raise AccessError(
                _("Sorry, you are not allowed to access these fields on attachments."
                  ))
        return super().read_group(domain,
                                  fields,
                                  groupby,
                                  offset=offset,
                                  limit=limit,
                                  orderby=orderby,
                                  lazy=lazy)

    @api.model
    def _search(self,
                args,
                offset=0,
                limit=None,
                order=None,
                count=False,
                access_rights_uid=None):
        # add res_field=False in domain if not present; the arg[0] trick below
        # works for domain items and '&'/'|'/'!' operators too
        if not any(arg[0] in ('id', 'res_field') for arg in args):
            args.insert(0, ('res_field', '=', False))

        ids = super(IrAttachment,
                    self)._search(args,
                                  offset=offset,
                                  limit=limit,
                                  order=order,
                                  count=False,
                                  access_rights_uid=access_rights_uid)

        if self.env.is_superuser():
            # rules do not apply for the superuser
            return len(ids) if count else ids

        if not ids:
            return 0 if count else []

        # Work with a set, as list.remove() is prohibitive for large lists of documents
        # (takes 20+ seconds on a db with 100k docs during search_count()!)
        orig_ids = ids
        ids = set(ids)

        # For attachments, the permissions of the document they are attached to
        # apply, so we must remove attachments for which the user cannot access
        # the linked document.
        # Use pure SQL rather than read() as it is about 50% faster for large dbs (100k+ docs),
        # and the permissions are checked in super() and below anyway.
        model_attachments = defaultdict(
            lambda: defaultdict(set))  # {res_model: {res_id: set(ids)}}
        binary_fields_attachments = set()
        self._cr.execute(
            """SELECT id, res_model, res_id, public, res_field FROM ir_attachment WHERE id IN %s""",
            [tuple(ids)])
        for row in self._cr.dictfetchall():
            if not row['res_model'] or row['public']:
                continue
            # model_attachments = {res_model: {res_id: set(ids)}}
            model_attachments[row['res_model']][row['res_id']].add(row['id'])
            # Should not retrieve binary fields attachments
            if row['res_field']:
                binary_fields_attachments.add(row['id'])

        if binary_fields_attachments:
            ids.difference_update(binary_fields_attachments)

        # To avoid multiple queries for each attachment found, checks are
        # performed in batch as much as possible.
        for res_model, targets in model_attachments.items():
            if res_model not in self.env:
                continue
            if not self.env[res_model].check_access_rights('read', False):
                # remove all corresponding attachment ids
                ids.difference_update(itertools.chain(*targets.values()))
                continue
            # filter ids according to what access rules permit
            target_ids = list(targets)
            allowed = self.env[res_model].with_context(
                active_test=False).search([('id', 'in', target_ids)])
            for res_id in set(target_ids).difference(allowed.ids):
                ids.difference_update(targets[res_id])

        # sort result according to the original sort ordering
        result = [id for id in orig_ids if id in ids]

        # If the original search reached the limit, it is important the
        # filtered record set does so too. When a JS view recieve a
        # record set whose length is bellow the limit, it thinks it
        # reached the last page.
        if len(orig_ids) == limit and len(result) < len(orig_ids):
            result.extend(
                self._search(args,
                             offset=offset + len(orig_ids),
                             limit=limit,
                             order=order,
                             count=count,
                             access_rights_uid=access_rights_uid)[:limit -
                                                                  len(result)])

        return len(result) if count else list(result)

    def _read(self, fields):
        self.check('read')
        return super(IrAttachment, self)._read(fields)

    def write(self, vals):
        self.check('write', values=vals)
        # remove computed field depending of datas
        for field in ('file_size', 'checksum'):
            vals.pop(field, False)
        if 'mimetype' in vals or 'datas' in vals:
            vals = self._check_contents(vals)
        return super(IrAttachment, self).write(vals)

    def copy(self, default=None):
        self.check('write')
        return super(IrAttachment, self).copy(default)

    def unlink(self):
        if not self:
            return True
        self.check('unlink')

        # First delete in the database, *then* in the filesystem if the
        # database allowed it. Helps avoid errors when concurrent transactions
        # are deleting the same file, and some of the transactions are
        # rolled back by PostgreSQL (due to concurrent updates detection).
        to_delete = set(attach.store_fname for attach in self
                        if attach.store_fname)
        res = super(IrAttachment, self).unlink()
        for file_path in to_delete:
            self._file_delete(file_path)

        return res

    @api.model_create_multi
    def create(self, vals_list):
        record_tuple_set = set()
        for values in vals_list:
            # remove computed field depending of datas
            for field in ('file_size', 'checksum'):
                values.pop(field, False)
            values = self._check_contents(values)
            if 'datas' in values:
                values.update(
                    self._get_datas_related_values(values.pop('datas'),
                                                   values['mimetype']))
            # 'check()' only uses res_model and res_id from values, and make an exists.
            # We can group the values by model, res_id to make only one query when
            # creating multiple attachments on a single record.
            record_tuple = (values.get('res_model'), values.get('res_id'))
            record_tuple_set.add(record_tuple)
        for record_tuple in record_tuple_set:
            (res_model, res_id) = record_tuple
            self.check('create',
                       values={
                           'res_model': res_model,
                           'res_id': res_id
                       })
        return super(IrAttachment, self).create(vals_list)

    def _post_add_create(self):
        pass

    def generate_access_token(self):
        tokens = []
        for attachment in self:
            if attachment.access_token:
                tokens.append(attachment.access_token)
                continue
            access_token = self._generate_access_token()
            attachment.write({'access_token': access_token})
            tokens.append(access_token)
        return tokens

    def _generate_access_token(self):
        return str(uuid.uuid4())

    @api.model
    def action_get(self):
        return self.env['ir.actions.act_window'].for_xml_id(
            'base', 'action_attachment')

    @api.model
    def get_serve_attachment(self,
                             url,
                             extra_domain=None,
                             extra_fields=None,
                             order=None):
        domain = [('type', '=', 'binary'),
                  ('url', '=', url)] + (extra_domain or [])
        fieldNames = ['__last_update', 'datas', 'mimetype'] + (extra_fields
                                                               or [])
        return self.search_read(domain, fieldNames, order=order, limit=1)

    @api.model
    def get_attachment_by_key(self, key, extra_domain=None, order=None):
        domain = [('key', '=', key)] + (extra_domain or [])
        return self.search(domain, order=order, limit=1)
Beispiel #23
0
class IrActionsActClient(models.Model):
    _name = 'ir.actions.client'
    _description = 'Client Action'
    _inherit = 'ir.actions.actions'
    _table = 'ir_act_client'
    _sequence = 'ir_actions_id_seq'
    _order = 'name'

    name = fields.Char(string='Action Name', translate=True)
    type = fields.Char(default='ir.actions.client')

    tag = fields.Char(string='Client action tag',
                      required=True,
                      help="An arbitrary string, interpreted by the client"
                      " according to its own needs and wishes. There "
                      "is no central tag repository across clients.")
    target = fields.Selection([('current', 'Current Window'),
                               ('new', 'New Window'),
                               ('fullscreen', 'Full Screen'),
                               ('main', 'Main action of Current Window')],
                              default="current",
                              string='Target Window')
    res_model = fields.Char(
        string='Destination Model',
        help="Optional model, mostly used for needactions.")
    context = fields.Char(
        string='Context Value',
        default="{}",
        required=True,
        help=
        "Context dictionary as Python expression, empty by default (Default: {})"
    )
    params = fields.Binary(compute='_compute_params',
                           inverse='_inverse_params',
                           string='Supplementary arguments',
                           help="Arguments sent to the client along with "
                           "the view tag")
    params_store = fields.Binary(string='Params storage',
                                 readonly=True,
                                 attachment=False)

    @api.depends('params_store')
    def _compute_params(self):
        self_bin = self.with_context(bin_size=False,
                                     bin_size_params_store=False)
        for record, record_bin in zip(self, self_bin):
            record.params = record_bin.params_store and safe_eval(
                record_bin.params_store, {'uid': self._uid})

    def _inverse_params(self):
        for record in self:
            params = record.params
            record.params_store = repr(params) if isinstance(params,
                                                             dict) else params

    def _get_default_form_view(self):
        doc = super(IrActionsActClient, self)._get_default_form_view()
        params = doc.find(".//field[@name='params']")
        params.getparent().remove(params)
        params_store = doc.find(".//field[@name='params_store']")
        params_store.getparent().remove(params_store)
        return doc
Beispiel #24
0
class Country(models.Model):
    _name = 'res.country'
    _description = 'Country'
    _order = 'name'

    name = fields.Char(string='Country Name',
                       required=True,
                       translate=True,
                       help='The full name of the country.')
    code = fields.Char(
        string='Country Code',
        size=2,
        help=
        'The ISO country code in two chars. \nYou can use this field for quick search.'
    )
    address_format = fields.Text(
        string="Layout in Reports",
        help="Display format to use for addresses belonging to this country.\n\n"
        "You can use python-style string pattern with all the fields of the address "
        "(for example, use '%(street)s' to display the field 'street') plus"
        "\n%(state_name)s: the name of the state"
        "\n%(state_code)s: the code of the state"
        "\n%(country_name)s: the name of the country"
        "\n%(country_code)s: the code of the country",
        default=
        '%(street)s\n%(street2)s\n%(city)s %(state_code)s %(zip)s\n%(country_name)s'
    )
    address_view_id = fields.Many2one(
        comodel_name='ir.ui.view',
        string="Input View",
        domain=[('model', '=', 'res.partner'), ('type', '=', 'form')],
        help=
        "Use this field if you want to replace the usual way to encode a complete address. "
        "Note that the address_format field is used to modify the way to display addresses "
        "(in reports for example), while this field is used to modify the input form for "
        "addresses.")
    currency_id = fields.Many2one('res.currency', string='Currency')
    image = fields.Binary(attachment=True)
    phone_code = fields.Integer(string='Country Calling Code')
    country_group_ids = fields.Many2many('res.country.group',
                                         'res_country_res_country_group_rel',
                                         'res_country_id',
                                         'res_country_group_id',
                                         string='Country Groups')
    state_ids = fields.One2many('res.country.state',
                                'country_id',
                                string='States')
    name_position = fields.Selection(
        [
            ('before', 'Before Address'),
            ('after', 'After Address'),
        ],
        string="Customer Name Position",
        default="before",
        help=
        "Determines where the customer/company name should be placed, i.e. after or before the address."
    )
    vat_label = fields.Char(
        string='Vat Label',
        translate=True,
        help="Use this field if you want to change vat label.")

    _sql_constraints = [('name_uniq', 'unique (name)',
                         'The name of the country must be unique !'),
                        ('code_uniq', 'unique (code)',
                         'The code of the country must be unique !')]

    name_search = location_name_search

    @api.model_create_multi
    def create(self, vals_list):
        for vals in vals_list:
            if vals.get('code'):
                vals['code'] = vals['code'].upper()
        return super(Country, self).create(vals_list)

    def write(self, vals):
        if vals.get('code'):
            vals['code'] = vals['code'].upper()
        return super(Country, self).write(vals)

    def get_address_fields(self):
        self.ensure_one()
        return re.findall(r'\((.+?)\)', self.address_format)
Beispiel #25
0
class Property(models.Model):
    _name = 'ir.property'
    _description = 'Company Property'

    name = fields.Char(index=True)
    res_id = fields.Char(
        string='Resource',
        index=True,
        help="If not set, acts as a default value for new resources",
    )
    company_id = fields.Many2one('res.company', string='Company', index=True)
    fields_id = fields.Many2one('ir.model.fields',
                                string='Field',
                                ondelete='cascade',
                                required=True,
                                index=True)
    value_float = fields.Float()
    value_integer = fields.Integer()
    value_text = fields.Text()  # will contain (char, text)
    value_binary = fields.Binary(attachment=False)
    value_reference = fields.Char()
    value_datetime = fields.Datetime()
    type = fields.Selection([
        ('char', 'Char'),
        ('float', 'Float'),
        ('boolean', 'Boolean'),
        ('integer', 'Integer'),
        ('text', 'Text'),
        ('binary', 'Binary'),
        ('many2one', 'Many2One'),
        ('date', 'Date'),
        ('datetime', 'DateTime'),
        ('selection', 'Selection'),
    ],
                            required=True,
                            default='many2one',
                            index=True)

    def _update_values(self, values):
        if 'value' not in values:
            return values
        value = values.pop('value')

        prop = None
        type_ = values.get('type')
        if not type_:
            if self:
                prop = self[0]
                type_ = prop.type
            else:
                type_ = self._fields['type'].default(self)

        field = TYPE2FIELD.get(type_)
        if not field:
            raise UserError(_('Invalid type'))

        if field == 'value_reference':
            if not value:
                value = False
            elif isinstance(value, models.BaseModel):
                value = '%s,%d' % (value._name, value.id)
            elif isinstance(value, int):
                field_id = values.get('fields_id')
                if not field_id:
                    if not prop:
                        raise ValueError()
                    field_id = prop.fields_id
                else:
                    field_id = self.env['ir.model.fields'].browse(field_id)

                value = '%s,%d' % (field_id.sudo().relation, value)

        values[field] = value
        return values

    def write(self, values):
        # if any of the records we're writing on has a res_id=False *or*
        # we're writing a res_id=False on any record
        default_set = False
        if self._ids:
            self.env.cr.execute(
                'SELECT EXISTS (SELECT 1 FROM ir_property WHERE id in %s AND res_id IS NULL)',
                [self._ids])
            default_set = self.env.cr.rowcount == 1 or any(
                v.get('res_id') is False for v in values)
        r = super(Property, self).write(self._update_values(values))
        if default_set:
            # DLE P44: test `test_27_company_dependent`
            # Easy solution, need to flush write when changing a property.
            # Maybe it would be better to be able to compute all impacted cache value and update those instead
            # Then clear_caches must be removed as well.
            self.flush()
            self.clear_caches()
        return r

    @api.model_create_multi
    def create(self, vals_list):
        vals_list = [self._update_values(vals) for vals in vals_list]
        created_default = any(not v.get('res_id') for v in vals_list)
        r = super(Property, self).create(vals_list)
        if created_default:
            # DLE P44: test `test_27_company_dependent`
            self.flush()
            self.clear_caches()
        return r

    def unlink(self):
        default_deleted = False
        if self._ids:
            self.env.cr.execute(
                'SELECT EXISTS (SELECT 1 FROM ir_property WHERE id in %s)',
                [self._ids])
            default_deleted = self.env.cr.rowcount == 1
        r = super().unlink()
        if default_deleted:
            self.clear_caches()
        return r

    def get_by_record(self):
        self.ensure_one()
        if self.type in ('char', 'text', 'selection'):
            return self.value_text
        elif self.type == 'float':
            return self.value_float
        elif self.type == 'boolean':
            return bool(self.value_integer)
        elif self.type == 'integer':
            return self.value_integer
        elif self.type == 'binary':
            return self.value_binary
        elif self.type == 'many2one':
            if not self.value_reference:
                return False
            model, resource_id = self.value_reference.split(',')
            return self.env[model].browse(int(resource_id)).exists()
        elif self.type == 'datetime':
            return self.value_datetime
        elif self.type == 'date':
            if not self.value_datetime:
                return False
            return fields.Date.to_string(
                fields.Datetime.from_string(self.value_datetime))
        return False

    @api.model
    def get(self, name, model, res_id=False):
        if not res_id:
            t, v = self._get_default_property(name, model)
            if not v or t != 'many2one':
                return v
            return self.env[v[0]].browse(v[1])

        p = self._get_property(name, model, res_id=res_id)
        if p:
            return p.get_by_record()
        return False

    # only cache Property.get(res_id=False) as that's
    # sub-optimally.
    COMPANY_KEY = "self.env.context.get('force_company') or self.env.company.id"

    @ormcache(COMPANY_KEY, 'name', 'model')
    def _get_default_property(self, name, model):
        prop = self._get_property(name, model, res_id=False)
        if not prop:
            return None, False
        v = prop.get_by_record()
        if prop.type != 'many2one':
            return prop.type, v
        return 'many2one', v and (v._name, v.id)

    def _get_property(self, name, model, res_id):
        domain = self._get_domain(name, model)
        if domain is not None:
            domain = [('res_id', '=', res_id)] + domain
            #make the search with company_id asc to make sure that properties specific to a company are given first
            return self.search(domain, limit=1, order='company_id')
        return self.browse(())

    def _get_domain(self, prop_name, model):
        field_id = self.env['ir.model.fields']._get_id(model, prop_name)
        if not field_id:
            return None
        company_id = self._context.get('force_company') or self.env.company.id
        return [('fields_id', '=', field_id),
                ('company_id', 'in', [company_id, False])]

    @api.model
    def get_multi(self, name, model, ids):
        """ Read the property field `name` for the records of model `model` with
            the given `ids`, and return a dictionary mapping `ids` to their
            corresponding value.
        """
        if not ids:
            return {}

        field = self.env[model]._fields[name]
        field_id = self.env['ir.model.fields']._get_id(model, name)
        company_id = (self._context.get('force_company')
                      or self.env.company.id)

        if field.type == 'many2one':
            comodel = self.env[field.comodel_name]
            model_pos = len(model) + 2
            value_pos = len(comodel._name) + 2
            # retrieve values: both p.res_id and p.value_reference are formatted
            # as "<rec._name>,<rec.id>"; the purpose of the LEFT JOIN is to
            # return the value id if it exists, NULL otherwise
            query = """
                SELECT substr(p.res_id, %s)::integer, r.id
                FROM ir_property p
                LEFT JOIN {} r ON substr(p.value_reference, %s)::integer=r.id
                WHERE p.fields_id=%s
                    AND (p.company_id=%s OR p.company_id IS NULL)
                    AND (p.res_id IN %s OR p.res_id IS NULL)
                ORDER BY p.company_id NULLS FIRST
            """.format(comodel._table)
            params = [model_pos, value_pos, field_id, company_id]
            clean = comodel.browse

        elif field.type in TYPE2FIELD:
            model_pos = len(model) + 2
            # retrieve values: p.res_id is formatted as "<rec._name>,<rec.id>"
            query = """
                SELECT substr(p.res_id, %s)::integer, p.{}
                FROM ir_property p
                WHERE p.fields_id=%s
                    AND (p.company_id=%s OR p.company_id IS NULL)
                    AND (p.res_id IN %s OR p.res_id IS NULL)
                ORDER BY p.company_id NULLS FIRST
            """.format(TYPE2FIELD[field.type])
            params = [model_pos, field_id, company_id]
            clean = TYPE2CLEAN[field.type]

        else:
            return dict.fromkeys(ids, False)

        # retrieve values
        cr = self.env.cr
        result = {}
        refs = {"%s,%s" % (model, id) for id in ids}
        for sub_refs in cr.split_for_in_conditions(refs):
            cr.execute(query, params + [sub_refs])
            result.update(cr.fetchall())

        # determine all values and format them
        default = result.get(None, None)
        return {id: clean(result.get(id, default)) for id in ids}

    @api.model
    def set_multi(self, name, model, values, default_value=None):
        """ Assign the property field `name` for the records of model `model`
            with `values` (dictionary mapping record ids to their value).
            If the value for a given record is the same as the default
            value, the property entry will not be stored, to avoid bloating
            the database.
            If `default_value` is provided, that value will be used instead
            of the computed default value, to determine whether the value
            for a record should be stored or not.
        """
        def clean(value):
            return value.id if isinstance(value, models.BaseModel) else value

        if not values:
            return

        if default_value is None:
            domain = self._get_domain(name, model)
            if domain is None:
                raise Exception()
            # retrieve the default value for the field
            default_value = clean(self.get(name, model))

        # retrieve the properties corresponding to the given record ids
        field_id = self.env['ir.model.fields']._get_id(model, name)
        company_id = self.env.context.get(
            'force_company') or self.env.company.id
        refs = {('%s,%s' % (model, id)): id for id in values}
        props = self.search([
            ('fields_id', '=', field_id),
            ('company_id', '=', company_id),
            ('res_id', 'in', list(refs)),
        ])

        # modify existing properties
        for prop in props:
            id = refs.pop(prop.res_id)
            value = clean(values[id])
            if value == default_value:
                # avoid prop.unlink(), as it clears the record cache that can
                # contain the value of other properties to set on record!
                prop.check_access_rights('unlink')
                prop.check_access_rule('unlink')
                self._cr.execute("DELETE FROM ir_property WHERE id=%s",
                                 [prop.id])
            elif value != clean(prop.get_by_record()):
                prop.write({'value': value})

        # create new properties for records that do not have one yet
        vals_list = []
        for ref, id in refs.items():
            value = clean(values[id])
            if value != default_value:
                vals_list.append({
                    'fields_id': field_id,
                    'company_id': company_id,
                    'res_id': ref,
                    'name': name,
                    'value': value,
                    'type': self.env[model]._fields[name].type,
                })
        self.create(vals_list)

    @api.model
    def search_multi(self, name, model, operator, value):
        """ Return a domain for the records that match the given condition. """
        default_matches = False
        include_zero = False

        field = self.env[model]._fields[name]
        if field.type == 'many2one':
            comodel = field.comodel_name

            def makeref(value):
                return value and '%s,%s' % (comodel, value)

            if operator == "=":
                value = makeref(value)
                # if searching properties not set, search those not in those set
                if value is False:
                    default_matches = True
            elif operator in ('!=', '<=', '<', '>', '>='):
                value = makeref(value)
            elif operator in ('in', 'not in'):
                value = [makeref(v) for v in value]
            elif operator in ('=like', '=ilike', 'like', 'not like', 'ilike',
                              'not ilike'):
                # most probably inefficient... but correct
                target = self.env[comodel]
                target_names = target.name_search(value,
                                                  operator=operator,
                                                  limit=None)
                target_ids = [n[0] for n in target_names]
                operator, value = 'in', [makeref(v) for v in target_ids]
        elif field.type in ('integer', 'float'):
            # No record is created in ir.property if the field's type is float or integer with a value
            # equal to 0. Then to match with the records that are linked to a property field equal to 0,
            # the negation of the operator must be taken  to compute the goods and the domain returned
            # to match the searched records is just the opposite.
            if value == 0 and operator == '=':
                operator = '!='
                include_zero = True
            elif value <= 0 and operator == '>=':
                operator = '<'
                include_zero = True
            elif value < 0 and operator == '>':
                operator = '<='
                include_zero = True
            elif value >= 0 and operator == '<=':
                operator = '>'
                include_zero = True
            elif value > 0 and operator == '<':
                operator = '>='
                include_zero = True

        # retrieve the properties that match the condition
        domain = self._get_domain(name, model)
        if domain is None:
            raise Exception()
        props = self.search(domain +
                            [(TYPE2FIELD[field.type], operator, value)])

        # retrieve the records corresponding to the properties that match
        good_ids = []
        for prop in props:
            if prop.res_id:
                res_model, res_id = prop.res_id.split(',')
                good_ids.append(int(res_id))
            else:
                default_matches = True

        if include_zero:
            return [('id', 'not in', good_ids)]
        elif default_matches:
            # exclude all records with a property that does not match
            all_ids = []
            props = self.search(domain + [('res_id', '!=', False)])
            for prop in props:
                res_model, res_id = prop.res_id.split(',')
                all_ids.append(int(res_id))
            bad_ids = list(set(all_ids) - set(good_ids))
            return [('id', 'not in', bad_ids)]
        else:
            return [('id', 'in', good_ids)]
Beispiel #26
0
class BaseDocumentLayout(models.TransientModel):
    """
    Customise the company document layout and display a live preview
    """

    _name = 'base.document.layout'
    _description = 'Company Document Layout'

    company_id = fields.Many2one('res.company',
                                 default=lambda self: self.env.company,
                                 required=True)

    logo = fields.Binary(related='company_id.logo', readonly=False)
    preview_logo = fields.Binary(related='logo', string="Preview logo")
    report_header = fields.Text(related='company_id.report_header',
                                readonly=False)
    report_footer = fields.Text(related='company_id.report_footer',
                                readonly=False)
    paperformat_id = fields.Many2one(related='company_id.paperformat_id',
                                     readonly=False)
    external_report_layout_id = fields.Many2one(
        related='company_id.external_report_layout_id', readonly=False)

    font = fields.Selection(related='company_id.font', readonly=False)
    primary_color = fields.Char(related='company_id.primary_color',
                                readonly=False)
    secondary_color = fields.Char(related='company_id.secondary_color',
                                  readonly=False)

    custom_colors = fields.Boolean(compute="_compute_custom_colors",
                                   readonly=False)
    logo_primary_color = fields.Char(compute="_compute_logo_colors")
    logo_secondary_color = fields.Char(compute="_compute_logo_colors")

    report_layout_id = fields.Many2one('report.layout')
    preview = fields.Html(compute='_compute_preview')

    @api.depends('logo_primary_color', 'logo_secondary_color', 'primary_color',
                 'secondary_color')
    def _compute_custom_colors(self):
        for wizard in self:
            logo_primary = wizard.logo_primary_color or ''
            logo_secondary = wizard.logo_secondary_color or ''
            # Force lower case on color to ensure that FF01AA == ff01aa
            wizard.custom_colors = (
                wizard.logo and wizard.primary_color and wizard.secondary_color
                and
                not (wizard.primary_color.lower() == logo_primary.lower() and
                     wizard.secondary_color.lower() == logo_secondary.lower()))

    @api.depends('logo')
    def _compute_logo_colors(self):
        for wizard in self:
            if wizard._context.get('bin_size'):
                wizard_for_image = wizard.with_context(bin_size=False)
            else:
                wizard_for_image = wizard
            wizard.logo_primary_color, wizard.logo_secondary_color = wizard_for_image._parse_logo_colors(
            )

    @api.depends('report_layout_id', 'logo', 'font', 'primary_color',
                 'secondary_color')
    def _compute_preview(self):
        """ compute a qweb based preview to display on the wizard """
        for wizard in self:
            if wizard.report_layout_id:
                ir_qweb = wizard.env['ir.qweb']
                wizard.preview = ir_qweb.render('base.layout_preview',
                                                {'company': wizard})
            else:
                wizard.preview = False

    @api.onchange('company_id')
    def _onchange_company_id(self):
        for wizard in self:
            wizard.logo = wizard.company_id.logo
            wizard.report_header = wizard.company_id.report_header
            wizard.report_footer = wizard.company_id.report_footer
            wizard.paperformat_id = wizard.company_id.paperformat_id
            wizard.external_report_layout_id = wizard.company_id.external_report_layout_id
            wizard.font = wizard.company_id.font
            wizard.primary_color = wizard.company_id.primary_color
            wizard.secondary_color = wizard.company_id.secondary_color
            wizard_layout = wizard.env["report.layout"].search([
                ('view_id.key', '=',
                 wizard.company_id.external_report_layout_id.key)
            ])
            wizard.report_layout_id = wizard_layout or wizard_layout.search(
                [], limit=1)

            if not wizard.primary_color:
                wizard.primary_color = wizard.logo_primary_color or DEFAULT_PRIMARY
            if not wizard.secondary_color:
                wizard.secondary_color = wizard.logo_secondary_color or DEFAULT_SECONDARY

    @api.onchange('custom_colors')
    def _onchange_custom_colors(self):
        for wizard in self:
            if wizard.logo and not wizard.custom_colors:
                wizard.primary_color = wizard.logo_primary_color or DEFAULT_PRIMARY
                wizard.secondary_color = wizard.logo_secondary_color or DEFAULT_SECONDARY

    @api.onchange('report_layout_id')
    def _onchange_report_layout_id(self):
        for wizard in self:
            wizard.external_report_layout_id = wizard.report_layout_id.view_id

    @api.onchange('logo')
    def _onchange_logo(self):
        for wizard in self:
            # It is admitted that if the user puts the original image back, it won't change colors
            company = wizard.company_id
            # at that point wizard.logo has been assigned the value present in DB
            if wizard.logo == company.logo and company.primary_color and company.secondary_color:
                continue

            if wizard.logo_primary_color:
                wizard.primary_color = wizard.logo_primary_color
            if wizard.logo_secondary_color:
                wizard.secondary_color = wizard.logo_secondary_color

    def _parse_logo_colors(self, logo=None, white_threshold=225):
        """
        Identifies dominant colors

        First resizes the original image to improve performance, then discards
        transparent colors and white-ish colors, then calls the averaging
        method twice to evaluate both primary and secondary colors.

        :param logo: alternate logo to process
        :param white_threshold: arbitrary value defining the maximum value a color can reach

        :return colors: hex values of primary and secondary colors
        """
        self.ensure_one()
        logo = logo or self.logo
        if not logo:
            return False, False

        # The "===" gives different base64 encoding a correct padding
        logo += b'===' if type(logo) == bytes else '==='
        try:
            # Catches exceptions caused by logo not being an image
            image = tools.image_fix_orientation(tools.base64_to_image(logo))
        except Exception:
            return False, False

        base_w, base_h = image.size
        w = int(50 * base_w / base_h)
        h = 50

        # Converts to RGBA if no alpha detected
        image_converted = image.convert(
            'RGBA') if 'A' not in image.getbands() else image
        image_resized = image_converted.resize((w, h))

        colors = []
        for color in image_resized.getcolors(w * h):
            if not (color[1][0] > white_threshold
                    and color[1][1] > white_threshold
                    and color[1][2] > white_threshold) and color[1][3] > 0:
                colors.append(color)

        if not colors:  # May happen when the whole image is white
            return False, False
        primary, remaining = tools.average_dominant_color(colors)
        secondary = tools.average_dominant_color(
            remaining)[0] if len(remaining) > 0 else primary

        # Lightness and saturation are calculated here.
        # - If both colors have a similar lightness, the most colorful becomes primary
        # - When the difference in lightness is too great, the brightest color becomes primary
        l_primary = tools.get_lightness(primary)
        l_secondary = tools.get_lightness(secondary)
        if (l_primary < 0.2 and l_secondary < 0.2) or (l_primary >= 0.2
                                                       and l_secondary >= 0.2):
            s_primary = tools.get_saturation(primary)
            s_secondary = tools.get_saturation(secondary)
            if s_primary < s_secondary:
                primary, secondary = secondary, primary
        elif l_secondary > l_primary:
            primary, secondary = secondary, primary

        return tools.rgb_to_hex(primary), tools.rgb_to_hex(secondary)

    @api.model
    def action_open_base_document_layout(self, action_ref=None):
        if not action_ref:
            action_ref = 'base.action_base_document_layout_configurator'
        return self.env.ref(action_ref).read()[0]

    def document_layout_save(self):
        # meant to be overridden
        return self.env.context.get('report_action') or {
            'type': 'ir.actions.act_window_close'
        }
Beispiel #27
0
class WebsiteVisitor(models.Model):
    _name = 'website.visitor'
    _description = 'Website Visitor'
    _order = 'last_connection_datetime DESC'

    name = fields.Char('Name')
    access_token = fields.Char(required=True,
                               default=lambda x: uuid.uuid4().hex,
                               index=True,
                               copy=False,
                               groups='base.group_website_publisher')
    active = fields.Boolean('Active', default=True)
    website_id = fields.Many2one('website', "Website", readonly=True)
    partner_id = fields.Many2one('res.partner',
                                 string="Linked Partner",
                                 help="Partner of the last logged in user.")
    partner_image = fields.Binary(related='partner_id.image_1920')

    # localisation and info
    country_id = fields.Many2one('res.country', 'Country', readonly=True)
    country_flag = fields.Binary(related="country_id.image",
                                 string="Country Flag")
    lang_id = fields.Many2one(
        'res.lang',
        string='Language',
        help="Language from the website when visitor has been created")
    timezone = fields.Selection(_tz_get, string='Timezone')
    email = fields.Char(string='Email', compute='_compute_email_phone')
    mobile = fields.Char(string='Mobile Phone', compute='_compute_email_phone')

    # Visit fields
    visit_count = fields.Integer(
        'Number of visits',
        default=1,
        readonly=True,
        help=
        "A new visit is considered if last connection was more than 8 hours ago."
    )
    website_track_ids = fields.One2many('website.track',
                                        'visitor_id',
                                        string='Visited Pages History',
                                        readonly=True)
    visitor_page_count = fields.Integer(
        'Page Views',
        compute="_compute_page_statistics",
        help="Total number of visits on tracked pages")
    page_ids = fields.Many2many('website.page',
                                string="Visited Pages",
                                compute="_compute_page_statistics")
    page_count = fields.Integer('# Visited Pages',
                                compute="_compute_page_statistics",
                                help="Total number of tracked page visited")
    last_visited_page_id = fields.Many2one(
        'website.page',
        string="Last Visited Page",
        compute="_compute_last_visited_page_id")

    # Time fields
    create_date = fields.Datetime('First connection date', readonly=True)
    last_connection_datetime = fields.Datetime('Last Connection',
                                               default=fields.Datetime.now,
                                               help="Last page view date",
                                               readonly=True)
    time_since_last_action = fields.Char(
        'Last action',
        compute="_compute_time_statistics",
        help='Time since last page view. E.g.: 2 minutes ago')
    is_connected = fields.Boolean(
        'Is connected ?',
        compute='_compute_time_statistics',
        help=
        'A visitor is considered as connected if his last page view was within the last 5 minutes.'
    )

    _sql_constraints = [
        ('access_token_unique', 'unique(access_token)',
         'Access token should be unique.'),
        ('partner_uniq', 'unique(partner_id)',
         'A partner is linked to only one visitor.'),
    ]

    @api.depends('name')
    def name_get(self):
        return [(record.id, (record.name
                             or _('Website Visitor #%s') % record.id))
                for record in self]

    @api.depends('partner_id.email_normalized', 'partner_id.mobile',
                 'partner_id.phone')
    def _compute_email_phone(self):
        results = self.env['res.partner'].search_read(
            [('id', 'in', self.partner_id.ids)],
            ['id', 'email_normalized', 'mobile', 'phone'],
        )
        mapped_data = {
            result['id']: {
                'email_normalized': result['email_normalized'],
                'mobile':
                result['mobile'] if result['mobile'] else result['phone']
            }
            for result in results
        }

        for visitor in self:
            visitor.email = mapped_data.get(visitor.partner_id.id,
                                            {}).get('email_normalized')
            visitor.mobile = mapped_data.get(visitor.partner_id.id,
                                             {}).get('mobile')

    @api.depends('website_track_ids')
    def _compute_page_statistics(self):
        results = self.env['website.track'].read_group(
            [('visitor_id', 'in', self.ids), ('url', '!=', False)],
            ['visitor_id', 'page_id', 'url'], ['visitor_id', 'page_id', 'url'],
            lazy=False)
        mapped_data = {}
        for result in results:
            visitor_info = mapped_data.get(result['visitor_id'][0], {
                'page_count': 0,
                'visitor_page_count': 0,
                'page_ids': set()
            })
            visitor_info['visitor_page_count'] += result['__count']
            visitor_info['page_count'] += 1
            if result['page_id']:
                visitor_info['page_ids'].add(result['page_id'][0])
            mapped_data[result['visitor_id'][0]] = visitor_info

        for visitor in self:
            visitor_info = mapped_data.get(visitor.id, {
                'page_count': 0,
                'visitor_page_count': 0,
                'page_ids': set()
            })
            visitor.page_ids = [(6, 0, visitor_info['page_ids'])]
            visitor.visitor_page_count = visitor_info['visitor_page_count']
            visitor.page_count = visitor_info['page_count']

    @api.depends('website_track_ids.page_id')
    def _compute_last_visited_page_id(self):
        results = self.env['website.track'].read_group(
            [('visitor_id', 'in', self.ids)],
            ['visitor_id', 'page_id', 'visit_datetime:max'],
            ['visitor_id', 'page_id'],
            lazy=False)
        mapped_data = {
            result['visitor_id'][0]: result['page_id'][0]
            for result in results if result['page_id']
        }
        for visitor in self:
            visitor.last_visited_page_id = mapped_data.get(visitor.id, False)

    @api.depends('last_connection_datetime')
    def _compute_time_statistics(self):
        for visitor in self:
            visitor.time_since_last_action = _format_time_ago(
                self.env, (datetime.now() - visitor.last_connection_datetime))
            visitor.is_connected = (
                datetime.now() -
                visitor.last_connection_datetime) < timedelta(minutes=5)

    def _prepare_visitor_send_mail_values(self):
        if self.partner_id.email:
            return {
                'res_model': 'res.partner',
                'res_id': self.partner_id.id,
                'partner_ids': [self.partner_id.id],
            }
        return {}

    def action_send_mail(self):
        self.ensure_one()
        visitor_mail_values = self._prepare_visitor_send_mail_values()
        if not visitor_mail_values:
            raise UserError(_("There is no email linked this visitor."))
        compose_form = self.env.ref('mail.email_compose_message_wizard_form',
                                    False)
        ctx = dict(
            default_model=visitor_mail_values.get('res_model'),
            default_res_id=visitor_mail_values.get('res_id'),
            default_use_template=False,
            default_partner_ids=[(6, 0, visitor_mail_values.get('partner_ids'))
                                 ],
            default_composition_mode='comment',
            default_reply_to=self.env.user.partner_id.email,
        )
        return {
            'name': _('Compose Email'),
            'type': 'ir.actions.act_window',
            'view_mode': 'form',
            'res_model': 'mail.compose.message',
            'views': [(compose_form.id, 'form')],
            'view_id': compose_form.id,
            'target': 'new',
            'context': ctx,
        }

    def _get_visitor_from_request(self, force_create=False):
        """ Return the visitor as sudo from the request if there is a visitor_uuid cookie.
            It is possible that the partner has changed or has disconnected.
            In that case the cookie is still referencing the old visitor and need to be replaced
            with the one of the visitor returned !!!. """

        # This function can be called in json with mobile app.
        # In case of mobile app, no uid is set on the jsonRequest env.
        # In case of multi db, _env is None on request, and request.env unbound.
        if not request:
            return None
        Visitor = self.env['website.visitor'].sudo()
        visitor = Visitor
        access_token = request.httprequest.cookies.get('visitor_uuid')
        if access_token:
            visitor = Visitor.with_context(active_test=False).search([
                ('access_token', '=', access_token)
            ])
            # Prefetch access_token and other fields. Since access_token has a restricted group and we access
            # a non restricted field (partner_id) first it is not fetched and will require an additional query to be retrieved.
            visitor.access_token

        if not self.env.user._is_public():
            partner_id = self.env.user.partner_id
            if not visitor or visitor.partner_id and visitor.partner_id != partner_id:
                # Partner and no cookie or wrong cookie
                visitor = Visitor.with_context(active_test=False).search([
                    ('partner_id', '=', partner_id.id)
                ])
        elif visitor and visitor.partner_id:
            # Cookie associated to a Partner
            visitor = Visitor

        if force_create and not visitor:
            visitor = self._create_visitor()

        return visitor

    def _handle_webpage_dispatch(self, response, website_page):
        # get visitor. Done here to avoid having to do it multiple times in case of override.
        visitor_sudo = self._get_visitor_from_request(force_create=True)
        if request.httprequest.cookies.get('visitor_uuid',
                                           '') != visitor_sudo.access_token:
            expiration_date = datetime.now() + timedelta(days=365)
            response.set_cookie('visitor_uuid',
                                visitor_sudo.access_token,
                                expires=expiration_date)
        self._handle_website_page_visit(response, website_page, visitor_sudo)

    def _handle_website_page_visit(self, response, website_page, visitor_sudo):
        """ Called on dispatch. This will create a website.visitor if the http request object
        is a tracked website page or a tracked view. Only on tracked elements to avoid having
        too much operations done on every page or other http requests.
        Note: The side effect is that the last_connection_datetime is updated ONLY on tracked elements."""
        url = request.httprequest.url
        website_track_values = {
            'url': url,
            'visit_datetime': datetime.now(),
        }
        if website_page:
            website_track_values['page_id'] = website_page.id
            domain = [('page_id', '=', website_page.id)]
        else:
            domain = [('url', '=', url)]
        visitor_sudo._add_tracking(domain, website_track_values)
        if visitor_sudo.lang_id.id != request.lang.id:
            visitor_sudo.write({'lang_id': request.lang.id})

    def _add_tracking(self, domain, website_track_values):
        """ Add the track and update the visitor"""
        domain = expression.AND([domain, [('visitor_id', '=', self.id)]])
        last_view = self.env['website.track'].sudo().search(domain, limit=1)
        if not last_view or last_view.visit_datetime < datetime.now(
        ) - timedelta(minutes=30):
            website_track_values['visitor_id'] = self.id
            self.env['website.track'].create(website_track_values)
        self._update_visitor_last_visit()

    def _create_visitor(self, website_track_values=None):
        """ Create a visitor and add a track to it if website_track_values is set."""
        country_code = request.session.get('geoip',
                                           {}).get('country_code', False)
        country_id = request.env['res.country'].sudo().search(
            [('code', '=',
              country_code)], limit=1).id if country_code else False
        vals = {
            'lang_id': request.lang.id,
            'country_id': country_id,
            'website_id': request.website.id,
        }
        if not self.env.user._is_public():
            vals['partner_id'] = self.env.user.partner_id.id
            vals['name'] = self.env.user.partner_id.name
        if website_track_values:
            vals['website_track_ids'] = [(0, 0, website_track_values)]
        return self.sudo().create(vals)

    def _cron_archive_visitors(self):
        one_week_ago = datetime.now() - timedelta(days=7)
        visitors_to_archive = self.env['website.visitor'].sudo().search([
            ('last_connection_datetime', '<', one_week_ago)
        ])
        visitors_to_archive.write({'active': False})

    def _update_visitor_last_visit(self):
        """ We need to do this part here to avoid concurrent updates error. """
        try:
            with self.env.cr.savepoint():
                query_lock = "SELECT * FROM website_visitor where id = %s FOR NO KEY UPDATE NOWAIT"
                self.env.cr.execute(query_lock, (self.id, ),
                                    log_exceptions=False)

                date_now = datetime.now()
                query = "UPDATE website_visitor SET "
                if self.last_connection_datetime < (date_now -
                                                    timedelta(hours=8)):
                    query += "visit_count = visit_count + 1,"
                query += """
                    active = True,
                    last_connection_datetime = %s
                    WHERE id = %s
                """
                self.env.cr.execute(query, (date_now, self.id),
                                    log_exceptions=False)
        except Exception:
            pass
Beispiel #28
0
class SnailmailLetter(models.Model):
    _name = 'snailmail.letter'
    _description = 'Snailmail Letter'

    user_id = fields.Many2one('res.users', 'Sent by')
    model = fields.Char('Model', required=True)
    res_id = fields.Integer('Document ID', required=True)
    partner_id = fields.Many2one('res.partner',
                                 string='Recipient',
                                 required=True)
    company_id = fields.Many2one('res.company',
                                 string='Company',
                                 required=True,
                                 readonly=True,
                                 default=lambda self: self.env.company.id)
    report_template = fields.Many2one('ir.actions.report',
                                      'Optional report to print and attach')

    attachment_id = fields.Many2one('ir.attachment',
                                    string='Attachment',
                                    ondelete='cascade')
    attachment_datas = fields.Binary('Document', related='attachment_id.datas')
    attachment_fname = fields.Char('Attachment Filename',
                                   related='attachment_id.name')
    color = fields.Boolean(
        string='Color', default=lambda self: self.env.company.snailmail_color)
    cover = fields.Boolean(
        string='Cover Page',
        default=lambda self: self.env.company.snailmail_cover)
    duplex = fields.Boolean(
        string='Both side',
        default=lambda self: self.env.company.snailmail_duplex)
    state = fields.Selection(
        [('pending', 'In Queue'), ('sent', 'Sent'), ('error', 'Error'),
         ('canceled', 'Canceled')],
        'Status',
        readonly=True,
        copy=False,
        default='pending',
        required=True,
        help="When a letter is created, the status is 'Pending'.\n"
        "If the letter is correctly sent, the status goes in 'Sent',\n"
        "If not, it will got in state 'Error' and the error message will be displayed in the field 'Error Message'."
    )
    error_code = fields.Selection([(err_code, err_code)
                                   for err_code in ERROR_CODES],
                                  string="Error")
    info_msg = fields.Char('Information')
    display_name = fields.Char('Display Name', compute="_compute_display_name")

    reference = fields.Char(string='Related Record',
                            compute='_compute_reference',
                            readonly=True,
                            store=False)

    message_id = fields.Many2one('mail.message',
                                 string="Snailmail Status Message")

    street = fields.Char('Street')
    street2 = fields.Char('Street2')
    zip = fields.Char('Zip')
    city = fields.Char('City')
    state_id = fields.Many2one("res.country.state", string='State')
    country_id = fields.Many2one('res.country', string='Country')

    @api.depends('reference', 'partner_id')
    def _compute_display_name(self):
        for letter in self:
            if letter.attachment_id:
                letter.display_name = "%s - %s" % (letter.attachment_id.name,
                                                   letter.partner_id.name)
            else:
                letter.display_name = letter.partner_id.name

    @api.depends('model', 'res_id')
    def _compute_reference(self):
        for res in self:
            res.reference = "%s,%s" % (res.model, res.res_id)

    @api.model
    def create(self, vals):
        msg_id = self.env[vals['model']].browse(vals['res_id']).message_post(
            body=_("Letter sent by post with Snailmail"),
            message_type='snailmail')
        partner_id = self.env['res.partner'].browse(vals['partner_id'])
        vals.update({
            'message_id': msg_id.id,
            'street': partner_id.street,
            'street2': partner_id.street2,
            'zip': partner_id.zip,
            'city': partner_id.city,
            'state_id': partner_id.state_id.id,
            'country_id': partner_id.country_id.id,
        })
        return super(SnailmailLetter, self).create(vals)

    def _fetch_attachment(self):
        """
        This method will check if we have any existent attachement matching the model
        and res_ids and create them if not found.
        """
        self.ensure_one()
        obj = self.env[self.model].browse(self.res_id)
        if not self.attachment_id:
            report = self.report_template
            if not report:
                report_name = self.env.context.get('report_name')
                report = self.env['ir.actions.report']._get_report_from_name(
                    report_name)
                if not report:
                    return False
                else:
                    self.write({'report_template': report.id})
                # report = self.env.ref('account.account_invoices')
            if report.print_report_name:
                report_name = safe_eval(report.print_report_name,
                                        {'object': obj})
            elif report.attachment:
                report_name = safe_eval(report.attachment, {'object': obj})
            else:
                report_name = 'Document'
            filename = "%s.%s" % (report_name, "pdf")
            pdf_bin, _ = report.with_context(
                snailmail_layout=not self.cover).render_qweb_pdf(self.res_id)
            attachment = self.env['ir.attachment'].create({
                'name':
                filename,
                'datas':
                base64.b64encode(pdf_bin),
                'res_model':
                'snailmail.letter',
                'res_id':
                self.id,
                'type':
                'binary',  # override default_type from context, possibly meant for another model!
            })
            self.write({'attachment_id': attachment.id})

        return self.attachment_id

    def _count_pages_pdf(self, bin_pdf):
        """ Count the number of pages of the given pdf file.
            :param bin_pdf : binary content of the pdf file
        """
        pages = 0
        for match in re.compile(b"/Count\s+(\d+)").finditer(bin_pdf):
            pages = int(match.group(1))
        return pages

    def _snailmail_create(self, route):
        """
        Create a dictionnary object to send to snailmail server.

        :return: Dict in the form:
        {
            account_token: string,    //IAP Account token of the user
            documents: [{
                pages: int,
                pdf_bin: pdf file
                res_id: int (client-side res_id),
                res_model: char (client-side res_model),
                address: {
                    name: char,
                    street: char,
                    street2: char (OPTIONAL),
                    zip: int,
                    city: char,
                    state: char (state code (OPTIONAL)),
                    country_code: char (country code)
                }
                return_address: {
                    name: char,
                    street: char,
                    street2: char (OPTIONAL),
                    zip: int,
                    city: char,at
                    state: char (state code (OPTIONAL)),
                    country_code: char (country code)
                }
            }],
            options: {
                color: boolean (true if color, false if black-white),
                duplex: boolean (true if duplex, false otherwise),
                currency_name: char
            }
        }
        """
        account_token = self.env['iap.account'].get('snailmail').account_token
        dbuuid = self.env['ir.config_parameter'].sudo().get_param(
            'database.uuid')
        documents = []

        batch = len(self) > 1
        for letter in self:
            document = {
                # generic informations to send
                'letter_id':
                letter.id,
                'res_model':
                letter.model,
                'res_id':
                letter.res_id,
                'contact_address':
                letter.partner_id.with_context(
                    snailmail_layout=True, show_address=True).name_get()[0][1],
                'address': {
                    'name':
                    letter.partner_id.name,
                    'street':
                    letter.partner_id.street,
                    'street2':
                    letter.partner_id.street2,
                    'zip':
                    letter.partner_id.zip,
                    'state':
                    letter.partner_id.state_id.code
                    if letter.partner_id.state_id else False,
                    'city':
                    letter.partner_id.city,
                    'country_code':
                    letter.partner_id.country_id.code
                },
                'return_address': {
                    'name':
                    letter.company_id.partner_id.name,
                    'street':
                    letter.company_id.partner_id.street,
                    'street2':
                    letter.company_id.partner_id.street2,
                    'zip':
                    letter.company_id.partner_id.zip,
                    'state':
                    letter.company_id.partner_id.state_id.code
                    if letter.company_id.partner_id.state_id else False,
                    'city':
                    letter.company_id.partner_id.city,
                    'country_code':
                    letter.company_id.partner_id.country_id.code,
                }
            }
            # Specific to each case:
            # If we are estimating the price: 1 object = 1 page
            # If we are printing -> attach the pdf
            if route == 'estimate':
                document.update(pages=1)
            else:
                # adding the web logo from the company for future possible customization
                document.update({
                    'company_logo':
                    letter.company_id.logo_web
                    and letter.company_id.logo_web.decode('utf-8') or False,
                })
                attachment = letter._fetch_attachment()
                if attachment:
                    document.update({
                        'pdf_bin':
                        route == 'print' and attachment.datas.decode('utf-8'),
                        'pages':
                        route == 'estimate' and self._count_pages_pdf(
                            base64.b64decode(attachment.datas)),
                    })
                else:
                    letter.write({
                        'info_msg': 'The attachment could not be generated.',
                        'state': 'error',
                        'error_code': 'ATTACHMENT_ERROR'
                    })
                    continue
                if letter.company_id.external_report_layout_id == self.env.ref(
                        'l10n_de.external_layout_din5008', False):
                    document.update({
                        'rightaddress': 0,
                    })
            documents.append(document)

        return {
            'account_token': account_token,
            'dbuuid': dbuuid,
            'documents': documents,
            'options': {
                'color': self and self[0].color,
                'cover': self and self[0].cover,
                'duplex': self and self[0].duplex,
                'currency_name': 'EUR',
            },
            # this will not raise the InsufficientCreditError which is the behaviour we want for now
            'batch': True,
        }

    def _get_error_message(self, error):
        if error == 'CREDIT_ERROR':
            link = self.env['iap.account'].get_credits_url(
                service_name='snailmail')
            return _(
                'You don\'t have enough credits to perform this operation.<br>Please go to your <a href=%s target="new">iap account</a>.'
            ) % link
        if error == 'TRIAL_ERROR':
            link = self.env['iap.account'].get_credits_url(
                service_name='snailmail', trial=True)
            return _(
                'You don\'t have an IAP account registered for this service.<br>Please go to <a href=%s target="new">iap.harpiya.com</a> to claim your free credits.'
            ) % link
        if error == 'NO_PRICE_AVAILABLE':
            return _('The country of the partner is not covered by Snailmail.')
        if error == 'MISSING_REQUIRED_FIELDS':
            return _('One or more required fields are empty.')
        if error == 'FORMAT_ERROR':
            return _(
                'The attachment of the letter could not be sent. Please check its content and contact the support if the problem persists.'
            )
        else:
            return _('An unknown error happened. Please contact the support.')
        return error

    def _snailmail_print(self, immediate=True):
        valid_address_letters = self.filtered(lambda l: l._is_valid_address(l))
        invalid_address_letters = self - valid_address_letters
        invalid_address_letters._snailmail_print_invalid_address()
        if valid_address_letters and immediate:
            valid_address_letters._snailmail_print_valid_address()
        self.env.cr.commit()

    def _snailmail_print_invalid_address(self):
        for letter in self:
            letter.write({
                'state':
                'error',
                'error_code':
                'MISSING_REQUIRED_FIELDS',
                'info_msg':
                _('The address of the recipient is not complete')
            })
        self.send_snailmail_update()

    def _snailmail_print_valid_address(self):
        """
        get response
        {
            'request_code': RESPONSE_OK, # because we receive 200 if good or fail
            'total_cost': total_cost,
            'credit_error': credit_error,
            'request': {
                'documents': documents,
                'options': options
                }
            }
        }
        """
        endpoint = self.env['ir.config_parameter'].sudo().get_param(
            'snailmail.endpoint', DEFAULT_ENDPOINT)
        params = self._snailmail_create('print')
        response = jsonrpc(endpoint + PRINT_ENDPOINT, params=params)
        for doc in response['request']['documents']:
            if doc.get('sent') and response['request_code'] == 200:
                note = _(
                    'The document was correctly sent by post.<br>The tracking id is %s'
                    % doc['send_id'])
                letter_data = {
                    'info_msg': note,
                    'state': 'sent',
                    'error_code': False
                }
            else:
                error = doc['error'] if response[
                    'request_code'] == 200 else response['reason']

                note = _(
                    'An error occured when sending the document by post.<br>Error: %s'
                ) % self._get_error_message(error)
                letter_data = {
                    'info_msg':
                    note,
                    'state':
                    'error',
                    'error_code':
                    error if error in ERROR_CODES else 'UNKNOWN_ERROR'
                }

            letter = self.browse(doc['letter_id'])
            letter.write(letter_data)
        self.send_snailmail_update()

    def send_snailmail_update(self):
        notifications = []
        for letter in self:
            notifications.append([(self._cr.dbname, 'res.partner',
                                   letter.user_id.partner_id.id), {
                                       'type':
                                       'snailmail_update',
                                       'elements':
                                       letter._format_snailmail_failures()
                                   }])
        self.env['bus.bus'].sendmany(notifications)

    def snailmail_print(self):
        self._snailmail_print()

    def cancel(self):
        self.write({'state': 'canceled', 'error_code': False})
        self.send_snailmail_update()

    @api.model
    def _snailmail_cron(self):
        letters_send = self.search([
            '|', ('state', '=', 'pending'), '&', ('state', '=', 'error'),
            ('error_code', 'in', [
                'TRIAL_ERROR', 'CREDIT_ERROR', 'ATTACHMENT_ERROR',
                'MISSING_REQUIRED_FIELDS'
            ])
        ])
        letters_send._snailmail_print()

    @api.model
    def fetch_failed_letters(self):
        failed_letters = self.search([('state', '=', 'error'),
                                      ('user_id.id', '=', self.env.user.id),
                                      ('res_id', '!=', 0),
                                      ('model', '!=', False)])
        return failed_letters._format_snailmail_failures()

    @api.model
    def _is_valid_address(self, record):
        record.ensure_one()
        required_keys = ['street', 'city', 'zip', 'country_id']
        return all(record[key] for key in required_keys)

    def _format_snailmail_failures(self):
        """
        A shorter message to notify a failure update
        """
        failures_infos = []
        for letter in self:
            info = {
                'message_id':
                letter.message_id.id,
                'record_name':
                letter.message_id.record_name,
                'model_name':
                self.env['ir.model']._get(letter.model).display_name,
                'uuid':
                letter.message_id.message_id,
                'res_id':
                letter.res_id,
                'model':
                letter.model,
                'last_message_date':
                letter.message_id.date,
                'module_icon':
                '/snailmail/static/img/snailmail_failure.png',
                'snailmail_status':
                letter.error_code if letter.state == 'error' else '',
                'snailmail_error':
                letter.state == 'error',
                'failure_type':
                'snailmail',
            }
            failures_infos.append(info)
        return failures_infos
Beispiel #29
0
class Website(models.Model):

    _name = "website"
    _description = "Website"

    @api.model
    def website_domain(self, website_id=False):
        return [('website_id', 'in', (False, website_id or self.id))]

    def _active_languages(self):
        return self.env['res.lang'].search([]).ids

    def _default_language(self):
        lang_code = self.env['ir.default'].get('res.partner', 'lang')
        def_lang_id = self.env['res.lang']._lang_get_id(lang_code)
        return def_lang_id or self._active_languages()[0]

    name = fields.Char('Website Name', required=True)
    domain = fields.Char(
        'Website Domain',
        help=
        'Will be prefixed by http in canonical URLs if no scheme is specified')
    country_group_ids = fields.Many2many(
        'res.country.group',
        'website_country_group_rel',
        'website_id',
        'country_group_id',
        string='Country Groups',
        help='Used when multiple websites have the same domain.')
    company_id = fields.Many2one('res.company',
                                 string="Company",
                                 default=lambda self: self.env.company,
                                 required=True)
    language_ids = fields.Many2many('res.lang',
                                    'website_lang_rel',
                                    'website_id',
                                    'lang_id',
                                    'Languages',
                                    default=_active_languages)
    default_lang_id = fields.Many2one('res.lang',
                                      string="Default Language",
                                      default=_default_language,
                                      required=True)
    auto_redirect_lang = fields.Boolean(
        'Autoredirect Language',
        default=True,
        help="Should users be redirected to their browser's language")

    def _default_social_facebook(self):
        return self.env.ref('base.main_company').social_facebook

    def _default_social_github(self):
        return self.env.ref('base.main_company').social_github

    def _default_social_linkedin(self):
        return self.env.ref('base.main_company').social_linkedin

    def _default_social_youtube(self):
        return self.env.ref('base.main_company').social_youtube

    def _default_social_instagram(self):
        return self.env.ref('base.main_company').social_instagram

    def _default_social_twitter(self):
        return self.env.ref('base.main_company').social_twitter

    def _default_logo(self):
        image_path = get_resource_path('website', 'static/src/img',
                                       'website_logo.png')
        with tools.file_open(image_path, 'rb') as f:
            return base64.b64encode(f.read())

    logo = fields.Binary('Website Logo',
                         default=_default_logo,
                         help="Display this logo on the website.")
    wholesale = fields.Boolean("Toptan Satış Sitesi")
    social_twitter = fields.Char('Twitter Account',
                                 default=_default_social_twitter)
    social_facebook = fields.Char('Facebook Account',
                                  default=_default_social_facebook)
    social_github = fields.Char('GitHub Account',
                                default=_default_social_github)
    social_linkedin = fields.Char('LinkedIn Account',
                                  default=_default_social_linkedin)
    social_youtube = fields.Char('Youtube Account',
                                 default=_default_social_youtube)
    social_instagram = fields.Char('Instagram Account',
                                   default=_default_social_instagram)
    social_default_image = fields.Binary(
        string="Default Social Share Image",
        help=
        "If set, replaces the company logo as the default social share image.")

    google_analytics_key = fields.Char('Google Analytics Key')
    google_management_client_id = fields.Char('Google Client ID')
    google_management_client_secret = fields.Char('Google Client Secret')

    google_maps_api_key = fields.Char('Google Maps API Key')

    user_id = fields.Many2one('res.users', string='Public User', required=True)
    cdn_activated = fields.Boolean('Content Delivery Network (CDN)')
    cdn_url = fields.Char('CDN Base URL', default='')
    cdn_filters = fields.Text(
        'CDN Filters',
        default=lambda s: '\n'.join(DEFAULT_CDN_FILTERS),
        help=
        "URL matching those filters will be rewritten using the CDN Base URL")
    partner_id = fields.Many2one(related='user_id.partner_id',
                                 relation='res.partner',
                                 string='Public Partner',
                                 readonly=False)
    menu_id = fields.Many2one('website.menu',
                              compute='_compute_menu',
                              string='Main Menu')
    homepage_id = fields.Many2one('website.page', string='Homepage')

    def _default_favicon(self):
        img_path = get_resource_path('web', 'static/src/img/favicon.png')
        with tools.file_open(img_path, 'rb') as f:
            return base64.b64encode(f.read())

    favicon = fields.Binary(
        string="Website Favicon",
        help=
        "This field holds the image used to display a favicon on the website.",
        default=_default_favicon)
    theme_id = fields.Many2one('ir.module.module', help='Installed theme')

    specific_user_account = fields.Boolean(
        'Specific User Account',
        help='If True, new accounts will be associated to the current website')
    auth_signup_uninvited = fields.Selection([
        ('b2b', 'On invitation'),
        ('b2c', 'Free sign up'),
    ],
                                             string='Customer Account',
                                             default='b2b')

    @api.onchange('language_ids')
    def _onchange_language_ids(self):
        language_ids = self.language_ids._origin
        if language_ids and self.default_lang_id not in language_ids:
            self.default_lang_id = language_ids[0]

    def _compute_menu(self):
        for website in self:
            menus = self.env['website.menu'].browse(website._get_menu_ids())

            # use field parent_id (1 query) to determine field child_id (2 queries by level)"
            for menu in menus:
                menu._cache['child_id'] = ()
            for menu in menus:
                # don't add child menu if parent is forbidden
                if menu.parent_id and menu.parent_id in menus:
                    menu.parent_id._cache['child_id'] += (menu.id, )

            website.menu_id = menus and menus.filtered(
                lambda m: not m.parent_id)[0].id or False

    # self.env.uid for ir.rule groups on menu
    @tools.ormcache('self.env.uid', 'self.id')
    def _get_menu_ids(self):
        return self.env['website.menu'].search([('website_id', '=', self.id)
                                                ]).ids

    @api.model
    def create(self, vals):
        self._handle_favicon(vals)

        if 'user_id' not in vals:
            company = self.env['res.company'].browse(vals.get('company_id'))
            vals['user_id'] = company._get_public_user(
            ).id if company else self.env.ref('base.public_user').id

        res = super(Website, self).create(vals)
        res._bootstrap_homepage()

        if not self.env.user.has_group(
                'website.group_multi_website') and self.search_count([]) > 1:
            all_user_groups = 'base.group_portal,base.group_user,base.group_public'
            groups = self.env['res.groups'].concat(
                *(self.env.ref(it) for it in all_user_groups.split(',')))
            groups.write({
                'implied_ids':
                [(4, self.env.ref('website.group_multi_website').id)]
            })

        return res

    def write(self, values):
        public_user_to_change_websites = self.env['website']
        self._handle_favicon(values)

        self.clear_caches()

        if 'company_id' in values and 'user_id' not in values:
            public_user_to_change_websites = self.filtered(lambda w: w.sudo(
            ).user_id.company_id.id != values['company_id'])
            if public_user_to_change_websites:
                company = self.env['res.company'].browse(values['company_id'])
                super(Website, public_user_to_change_websites).write(
                    dict(values,
                         user_id=company and company._get_public_user().id))

        result = super(Website,
                       self - public_user_to_change_websites).write(values)
        if 'cdn_activated' in values or 'cdn_url' in values or 'cdn_filters' in values:
            # invalidate the caches from static node at compile time
            self.env['ir.qweb'].clear_caches()
        return result

    @api.model
    def _handle_favicon(self, vals):
        if 'favicon' in vals:
            vals['favicon'] = tools.image_process(vals['favicon'],
                                                  size=(256, 256),
                                                  crop='center',
                                                  output_format='ICO')

    def unlink(self):
        # Do not delete invoices, delete what's strictly necessary
        attachments_to_unlink = self.env['ir.attachment'].search([
            ('website_id', 'in', self.ids),
            '|',
            '|',
            ('key', '!=', False),  # theme attachment
            ('url', 'ilike', '.custom.'),  # customized theme attachment
            ('url', 'ilike', '.assets\\_'),
        ])
        attachments_to_unlink.unlink()
        return super(Website, self).unlink()

    # ----------------------------------------------------------
    # Page Management
    # ----------------------------------------------------------
    def _bootstrap_homepage(self):
        Page = self.env['website.page']
        standard_homepage = self.env.ref('website.homepage',
                                         raise_if_not_found=False)
        if not standard_homepage:
            return

        new_homepage_view = '''<t name="Homepage" t-name="website.homepage%s">
        <t t-call="website.layout">
            <t t-set="pageName" t-value="'homepage'"/>
            <div id="wrap" class="oe_structure oe_empty"/>
            </t>
        </t>''' % (self.id)
        standard_homepage.with_context(
            website_id=self.id).arch_db = new_homepage_view

        homepage_page = Page.search([
            ('website_id', '=', self.id),
            ('key', '=', standard_homepage.key),
        ],
                                    limit=1)
        if not homepage_page:
            homepage_page = Page.create({
                'website_published':
                True,
                'url':
                '/',
                'view_id':
                self.with_context(
                    website_id=self.id).viewref('website.homepage').id,
            })
        # prevent /-1 as homepage URL
        homepage_page.url = '/'
        self.homepage_id = homepage_page

        # Bootstrap default menu hierarchy, create a new minimalist one if no default
        default_menu = self.env.ref('website.main_menu')
        self.copy_menu_hierarchy(default_menu)

    def copy_menu_hierarchy(self, top_menu):
        def copy_menu(menu, t_menu):
            new_menu = menu.copy({
                'parent_id': t_menu.id,
                'website_id': self.id,
            })
            for submenu in menu.child_id:
                copy_menu(submenu, new_menu)

        for website in self:
            new_top_menu = top_menu.copy({
                'name':
                _('Top Menu for Website %s') % website.id,
                'website_id':
                website.id,
            })
            for submenu in top_menu.child_id:
                copy_menu(submenu, new_top_menu)

    @api.model
    def new_page(self,
                 name=False,
                 add_menu=False,
                 template='website.default_page',
                 ispage=True,
                 namespace=None):
        """ Create a new website page, and assign it a xmlid based on the given one
            :param name : the name of the page
            :param template : potential xml_id of the page to create
            :param namespace : module part of the xml_id if none, the template module name is used
        """
        if namespace:
            template_module = namespace
        else:
            template_module, _ = template.split('.')
        page_url = '/' + slugify(name, max_length=1024, path=True)
        page_url = self.get_unique_path(page_url)
        page_key = slugify(name)
        result = dict({'url': page_url, 'view_id': False})

        if not name:
            name = 'Home'
            page_key = 'home'

        template_record = self.env.ref(template)
        website_id = self._context.get('website_id')
        key = self.get_unique_key(page_key, template_module)
        view = template_record.copy({'website_id': website_id, 'key': key})

        view.with_context(lang=None).write({
            'arch':
            template_record.arch.replace(template, key),
            'name':
            name,
        })

        if view.arch_fs:
            view.arch_fs = False

        website = self.get_current_website()
        if ispage:
            page = self.env['website.page'].create({
                'url': page_url,
                'website_id':
                website.id,  # remove it if only one website or not?
                'view_id': view.id,
            })
            result['view_id'] = view.id
        if add_menu:
            self.env['website.menu'].create({
                'name': name,
                'url': page_url,
                'parent_id': website.menu_id.id,
                'page_id': page.id,
                'website_id': website.id,
            })
        return result

    @api.model
    def guess_mimetype(self):
        return _guess_mimetype()

    def get_unique_path(self, page_url):
        """ Given an url, return that url suffixed by counter if it already exists
            :param page_url : the url to be checked for uniqueness
        """
        inc = 0
        # we only want a unique_path for website specific.
        # we need to be able to have /url for website=False, and /url for website=1
        # in case of duplicate, page manager will allow you to manage this case
        domain_static = [('website_id', '=', self.get_current_website().id)
                         ]  # .website_domain()
        page_temp = page_url
        while self.env['website.page'].with_context(
                active_test=False).sudo().search([('url', '=', page_temp)] +
                                                 domain_static):
            inc += 1
            page_temp = page_url + (inc and "-%s" % inc or "")
        return page_temp

    def get_unique_key(self, string, template_module=False):
        """ Given a string, return an unique key including module prefix.
            It will be suffixed by a counter if it already exists to garantee uniqueness.
            :param string : the key to be checked for uniqueness, you can pass it with 'website.' or not
            :param template_module : the module to be prefixed on the key, if not set, we will use website
        """
        if template_module:
            string = template_module + '.' + string
        else:
            if not string.startswith('website.'):
                string = 'website.' + string

        # Look for unique key
        key_copy = string
        inc = 0
        domain_static = self.get_current_website().website_domain()
        while self.env['website.page'].with_context(
                active_test=False).sudo().search([('key', '=', key_copy)] +
                                                 domain_static):
            inc += 1
            key_copy = string + (inc and "-%s" % inc or "")
        return key_copy

    @api.model
    def page_search_dependencies(self, page_id=False):
        """ Search dependencies just for information. It will not catch 100%
            of dependencies and False positive is more than possible
            Each module could add dependences in this dict
            :returns a dictionnary where key is the 'categorie' of object related to the given
                view, and the value is the list of text and link to the resource using given page
        """
        dependencies = {}
        if not page_id:
            return dependencies

        page = self.env['website.page'].browse(int(page_id))
        website = self.env['website'].browse(self._context.get('website_id'))
        url = page.url

        # search for website_page with link
        website_page_search_dom = [('view_id.arch_db', 'ilike', url)
                                   ] + website.website_domain()
        pages = self.env['website.page'].search(website_page_search_dom)
        page_key = _('Page')
        if len(pages) > 1:
            page_key = _('Pages')
        page_view_ids = []
        for page in pages:
            dependencies.setdefault(page_key, [])
            dependencies[page_key].append({
                'text':
                _('Page <b>%s</b> contains a link to this page') % page.url,
                'item':
                page.name,
                'link':
                page.url,
            })
            page_view_ids.append(page.view_id.id)

        # search for ir_ui_view (not from a website_page) with link
        page_search_dom = [('arch_db', 'ilike', url),
                           ('id', 'not in', page_view_ids)
                           ] + website.website_domain()
        views = self.env['ir.ui.view'].search(page_search_dom)
        view_key = _('Template')
        if len(views) > 1:
            view_key = _('Templates')
        for view in views:
            dependencies.setdefault(view_key, [])
            dependencies[view_key].append({
                'text':
                _('Template <b>%s (id:%s)</b> contains a link to this page') %
                (view.key or view.name, view.id),
                'link':
                '/web#id=%s&view_type=form&model=ir.ui.view' % view.id,
                'item':
                _('%s (id:%s)') % (view.key or view.name, view.id),
            })
        # search for menu with link
        menu_search_dom = [('url', 'ilike', '%s' % url)
                           ] + website.website_domain()

        menus = self.env['website.menu'].search(menu_search_dom)
        menu_key = _('Menu')
        if len(menus) > 1:
            menu_key = _('Menus')
        for menu in menus:
            dependencies.setdefault(menu_key, []).append({
                'text':
                _('This page is in the menu <b>%s</b>') % menu.name,
                'link':
                '/web#id=%s&view_type=form&model=website.menu' % menu.id,
                'item':
                menu.name,
            })

        return dependencies

    @api.model
    def page_search_key_dependencies(self, page_id=False):
        """ Search dependencies just for information. It will not catch 100%
            of dependencies and False positive is more than possible
            Each module could add dependences in this dict
            :returns a dictionnary where key is the 'categorie' of object related to the given
                view, and the value is the list of text and link to the resource using given page
        """
        dependencies = {}
        if not page_id:
            return dependencies

        page = self.env['website.page'].browse(int(page_id))
        website = self.env['website'].browse(self._context.get('website_id'))
        key = page.key

        # search for website_page with link
        website_page_search_dom = [('view_id.arch_db', 'ilike', key),
                                   ('id', '!=', page.id)
                                   ] + website.website_domain()
        pages = self.env['website.page'].search(website_page_search_dom)
        page_key = _('Page')
        if len(pages) > 1:
            page_key = _('Pages')
        page_view_ids = []
        for p in pages:
            dependencies.setdefault(page_key, [])
            dependencies[page_key].append({
                'text':
                _('Page <b>%s</b> is calling this file') % p.url,
                'item':
                p.name,
                'link':
                p.url,
            })
            page_view_ids.append(p.view_id.id)

        # search for ir_ui_view (not from a website_page) with link
        page_search_dom = [
            ('arch_db', 'ilike', key),
            ('id', 'not in', page_view_ids),
            ('id', '!=', page.view_id.id),
        ] + website.website_domain()
        views = self.env['ir.ui.view'].search(page_search_dom)
        view_key = _('Template')
        if len(views) > 1:
            view_key = _('Templates')
        for view in views:
            dependencies.setdefault(view_key, [])
            dependencies[view_key].append({
                'text':
                _('Template <b>%s (id:%s)</b> is calling this file') %
                (view.key or view.name, view.id),
                'item':
                _('%s (id:%s)') % (view.key or view.name, view.id),
                'link':
                '/web#id=%s&view_type=form&model=ir.ui.view' % view.id,
            })

        return dependencies

    # ----------------------------------------------------------
    # Languages
    # ----------------------------------------------------------

    def _get_alternate_languages(self, canonical_params):
        self.ensure_one()

        if not self._is_canonical_url(canonical_params=canonical_params):
            # no hreflang on non-canonical pages
            return []

        languages = self.language_ids
        if len(languages) <= 1:
            # no hreflang if no alternate language
            return []

        langs = []
        shorts = []

        for lg in languages:
            lg_codes = lg.code.split('_')
            short = lg_codes[0]
            shorts.append(short)
            langs.append({
                'hreflang': ('-'.join(lg_codes)).lower(),
                'short':
                short,
                'href':
                self._get_canonical_url_localized(
                    lang=lg, canonical_params=canonical_params),
            })

        # if there is only one region for a language, use only the language code
        for lang in langs:
            if shorts.count(lang['short']) == 1:
                lang['hreflang'] = lang['short']

        # add the default
        langs.append({
            'hreflang':
            'x-default',
            'href':
            self._get_canonical_url_localized(
                lang=self.default_lang_id, canonical_params=canonical_params),
        })

        return langs

    # ----------------------------------------------------------
    # Utilities
    # ----------------------------------------------------------

    @api.model
    def get_current_website(self, fallback=True):
        if request and request.session.get('force_website_id'):
            website_id = self.browse(
                request.session['force_website_id']).exists()
            if not website_id:
                # Don't crash is session website got deleted
                request.session.pop('force_website_id')
            else:
                return website_id

        website_id = self.env.context.get('website_id')
        if website_id:
            return self.browse(website_id)

        # The format of `httprequest.host` is `domain:port`
        domain_name = request and request.httprequest.host or ''

        country = request.session.geoip.get(
            'country_code') if request and request.session.geoip else False
        country_id = False
        if country:
            country_id = self.env['res.country'].search(
                [('code', '=', country)], limit=1).id

        website_id = self._get_current_website_id(domain_name,
                                                  country_id,
                                                  fallback=fallback)
        return self.browse(website_id)

    @tools.cache('domain_name', 'country_id', 'fallback')
    @api.model
    def _get_current_website_id(self, domain_name, country_id, fallback=True):
        """Get the current website id.

        First find all the websites for which the configured `domain` (after
        ignoring a potential scheme) is equal to the given
        `domain_name`. If there is only one result, return it immediately.

        If there are no website found for the given `domain_name`, either
        fallback to the first found website (no matter its `domain`) or return
        False depending on the `fallback` parameter.

        If there are multiple websites for the same `domain_name`, we need to
        filter them out by country. We return the first found website matching
        the given `country_id`. If no found website matching `domain_name`
        corresponds to the given `country_id`, the first found website for
        `domain_name` will be returned (no matter its country).

        :param domain_name: the domain for which we want the website.
            In regard to the `url_parse` method, only the `netloc` part should
            be given here, no `scheme`.
        :type domain_name: string

        :param country_id: id of the country for which we want the website
        :type country_id: int

        :param fallback: if True and no website is found for the specificed
            `domain_name`, return the first website (without filtering them)
        :type fallback: bool

        :return: id of the found website, or False if no website is found and
            `fallback` is False
        :rtype: int or False

        :raises: if `fallback` is True but no website at all is found
        """
        def _remove_port(domain_name):
            return (domain_name or '').split(':')[0]

        def _filter_domain(website, domain_name, ignore_port=False):
            """Ignore `scheme` from the `domain`, just match the `netloc` which
            is host:port in the version of `url_parse` we use."""
            # Here we add http:// to the domain if it's not set because
            # `url_parse` expects it to be set to correctly return the `netloc`.
            website_domain = urls.url_parse(website._get_http_domain()).netloc
            if ignore_port:
                website_domain = _remove_port(website_domain)
                domain_name = _remove_port(domain_name)
            return website_domain.lower() == (domain_name or '').lower()

        # Sort on country_group_ids so that we fall back on a generic website:
        # websites with empty country_group_ids will be first.
        found_websites = self.search([
            ('domain', 'ilike', _remove_port(domain_name))
        ]).sorted('country_group_ids')
        # Filter for the exact domain (to filter out potential subdomains) due
        # to the use of ilike.
        websites = found_websites.filtered(
            lambda w: _filter_domain(w, domain_name))
        # If there is no domain matching for the given port, ignore the port.
        websites = websites or found_websites.filtered(
            lambda w: _filter_domain(w, domain_name, ignore_port=True))

        if not websites:
            if not fallback:
                return False
            return self.search([], limit=1).id
        elif len(websites) == 1:
            return websites.id
        else:  # > 1 website with the same domain
            country_specific_websites = websites.filtered(
                lambda website: country_id in website.country_group_ids.mapped(
                    'country_ids').ids)
            return country_specific_websites[
                0].id if country_specific_websites else websites[0].id

    def _force(self):
        self._force_website(self.id)

    def _force_website(self, website_id):
        if request:
            request.session['force_website_id'] = website_id and str(
                website_id).isdigit() and int(website_id)

    @api.model
    def is_publisher(self):
        return self.env['ir.model.access'].check('ir.ui.view', 'write', False)

    @api.model
    def is_user(self):
        return self.env['ir.model.access'].check('ir.ui.menu', 'read', False)

    @api.model
    def is_public_user(self):
        return request.env.user.id == request.website.user_id.id

    @api.model
    def viewref(self, view_id, raise_if_not_found=True):
        ''' Given an xml_id or a view_id, return the corresponding view record.
            In case of website context, return the most specific one.

            If no website_id is in the context, it will return the generic view,
            instead of a random one like `get_view_id`.

            Look also for archived views, no matter the context.

            :param view_id: either a string xml_id or an integer view_id
            :param raise_if_not_found: should the method raise an error if no view found
            :return: The view record or empty recordset
        '''
        View = self.env['ir.ui.view']
        view = View
        if isinstance(view_id, str):
            if 'website_id' in self._context:
                domain = [('key', '=', view_id)
                          ] + self.env['website'].website_domain(
                              self._context.get('website_id'))
                order = 'website_id'
            else:
                domain = [('key', '=', view_id)]
                order = View._order
            views = View.with_context(active_test=False).search(domain,
                                                                order=order)
            if views:
                view = views.filter_duplicate()
            else:
                # we handle the raise below
                view = self.env.ref(view_id, raise_if_not_found=False)
                # self.env.ref might return something else than an ir.ui.view (eg: a theme.ir.ui.view)
                if not view or view._name != 'ir.ui.view':
                    # make sure we always return a recordset
                    view = View
        elif isinstance(view_id, int):
            view = View.browse(view_id)
        else:
            raise ValueError('Expecting a string or an integer, not a %s.' %
                             (type(view_id)))

        if not view and raise_if_not_found:
            raise ValueError(
                'No record found for unique ID %s. It may have been deleted.' %
                (view_id))
        return view

    @api.model
    def get_template(self, template):
        View = self.env['ir.ui.view']
        if isinstance(template, int):
            view_id = template
        else:
            if '.' not in template:
                template = 'website.%s' % template
            view_id = View.get_view_id(template)
        if not view_id:
            raise NotFound
        return View.browse(view_id)

    @api.model
    def pager(self, url, total, page=1, step=30, scope=5, url_args=None):
        return pager(url,
                     total,
                     page=page,
                     step=step,
                     scope=scope,
                     url_args=url_args)

    def rule_is_enumerable(self, rule):
        """ Checks that it is possible to generate sensible GET queries for
            a given rule (if the endpoint matches its own requirements)
            :type rule: werkzeug.routing.Rule
            :rtype: bool
        """
        endpoint = rule.endpoint
        methods = endpoint.routing.get('methods') or ['GET']

        converters = list(rule._converters.values())
        if not ('GET' in methods and endpoint.routing['type'] == 'http'
                and endpoint.routing['auth'] in ('none', 'public')
                and endpoint.routing.get('website', False) and all(
                    hasattr(converter, 'generate')
                    for converter in converters)):
            return False

        # dont't list routes without argument having no default value or converter
        spec = inspect.getargspec(endpoint.method.original_func)

        # remove self and arguments having a default value
        defaults_count = len(spec.defaults or [])
        args = spec.args[1:(-defaults_count or None)]

        # check that all args have a converter
        return all((arg in rule._converters) for arg in args)

    def enumerate_pages(self, query_string=None, force=False):
        """ Available pages in the website/CMS. This is mostly used for links
            generation and can be overridden by modules setting up new HTML
            controllers for dynamic pages (e.g. blog).
            By default, returns template views marked as pages.
            :param str query_string: a (user-provided) string, fetches pages
                                     matching the string
            :returns: a list of mappings with two keys: ``name`` is the displayable
                      name of the resource (page), ``url`` is the absolute URL
                      of the same.
            :rtype: list({name: str, url: str})
        """

        router = request.httprequest.app.get_db_router(request.db)
        # Force enumeration to be performed as public user
        url_set = set()

        sitemap_endpoint_done = set()

        for rule in router.iter_rules():
            if 'sitemap' in rule.endpoint.routing:
                if rule.endpoint in sitemap_endpoint_done:
                    continue
                sitemap_endpoint_done.add(rule.endpoint)

                func = rule.endpoint.routing['sitemap']
                if func is False:
                    continue
                for loc in func(self.env, rule, query_string):
                    yield loc
                continue

            if not self.rule_is_enumerable(rule):
                continue

            converters = rule._converters or {}
            if query_string and not converters and (
                    query_string not in rule.build({},
                                                   append_unknown=False)[1]):
                continue

            values = [{}]
            # converters with a domain are processed after the other ones
            convitems = sorted(
                converters.items(),
                key=lambda x:
                (hasattr(x[1], 'domain') and
                 (x[1].domain != '[]'), rule._trace.index((True, x[0]))))

            for (i, (name, converter)) in enumerate(convitems):
                newval = []
                for val in values:
                    query = i == len(convitems) - 1 and query_string
                    if query:
                        r = "".join([
                            x[1] for x in rule._trace[1:] if not x[0]
                        ])  # remove model converter from route
                        query = sitemap_qs2dom(
                            query, r, self.env[converter.model]._rec_name)
                        if query == FALSE_DOMAIN:
                            continue
                    for value_dict in converter.generate(uid=self.env.uid,
                                                         dom=query,
                                                         args=val):
                        newval.append(val.copy())
                        value_dict[name] = value_dict['loc']
                        del value_dict['loc']
                        newval[-1].update(value_dict)
                values = newval

            for value in values:
                domain_part, url = rule.build(value, append_unknown=False)
                if not query_string or query_string.lower() in url.lower():
                    page = {'loc': url}
                    if url in url_set:
                        continue
                    url_set.add(url)

                    yield page

        # '/' already has a http.route & is in the routing_map so it will already have an entry in the xml
        domain = [('url', '!=', '/')]
        if not force:
            domain += [('website_indexed', '=', True)]
            # is_visible
            domain += [('website_published', '=', True), '|',
                       ('date_publish', '=', False),
                       ('date_publish', '<=', fields.Datetime.now())]

        if query_string:
            domain += [('url', 'like', query_string)]

        pages = self.get_website_pages(domain)

        for page in pages:
            record = {
                'loc': page['url'],
                'id': page['id'],
                'name': page['name']
            }
            if page.view_id and page.view_id.priority != 16:
                record['priority'] = min(
                    round(page.view_id.priority / 32.0, 1), 1)
            if page['write_date']:
                record['lastmod'] = page['write_date'].date()
            yield record

    def get_website_pages(self, domain=[], order='name', limit=None):
        domain += self.get_current_website().website_domain()
        pages = self.env['website.page'].search(domain,
                                                order='name',
                                                limit=limit)
        return pages

    def search_pages(self, needle=None, limit=None):

        name = slugify(needle, max_length=50, path=True)
        res = []
        for page in self.enumerate_pages(query_string=name, force=True):
            res.append(page)
            if len(res) == limit:
                break
        return res

    @api.model
    def image_url(self, record, field, size=None):
        """ Returns a local url that points to the image field of a given browse record. """
        sudo_record = record.sudo()
        sha = hashlib.sha1(
            str(getattr(sudo_record,
                        '__last_update')).encode('utf-8')).hexdigest()[0:7]
        size = '' if size is None else '/%s' % size
        return '/web/image/%s/%s/%s%s?unique=%s' % (record._name, record.id,
                                                    field, size, sha)

    def get_cdn_url(self, uri):
        self.ensure_one()
        if not uri:
            return ''
        cdn_url = self.cdn_url
        cdn_filters = (self.cdn_filters or '').splitlines()
        for flt in cdn_filters:
            if flt and re.match(flt, uri):
                return urls.url_join(cdn_url, uri)
        return uri

    @api.model
    def action_dashboard_redirect(self):
        if self.env.user.has_group(
                'base.group_system') or self.env.user.has_group(
                    'website.group_website_designer'):
            return self.env.ref('website.backend_dashboard').read()[0]
        return self.env.ref('website.action_website').read()[0]

    def button_go_website(self):
        self._force()
        return {
            'type': 'ir.actions.act_url',
            'url': '/',
            'target': 'self',
        }

    def _get_http_domain(self):
        """Get the domain of the current website, prefixed by http if no
        scheme is specified.

        Empty string if no domain is specified on the website.
        """
        self.ensure_one()
        if not self.domain:
            return ''
        res = urls.url_parse(self.domain)
        return 'http://' + self.domain if not res.scheme else self.domain

    def get_base_url(self):
        self.ensure_one()
        return self._get_http_domain() or super(BaseModel, self).get_base_url()

    def _get_canonical_url_localized(self, lang, canonical_params):
        """Returns the canonical URL for the current request with translatable
        elements appropriately translated in `lang`.

        If `request.endpoint` is not true, returns the current `path` instead.

        `url_quote_plus` is applied on the returned path.
        """
        self.ensure_one()
        if request.endpoint:
            router = request.httprequest.app.get_db_router(request.db).bind('')
            arguments = dict(request.endpoint_arguments)
            for key, val in list(arguments.items()):
                if isinstance(val, models.BaseModel):
                    if val.env.context.get('lang') != lang.url_code:
                        arguments[key] = val.with_context(lang=lang.url_code)
            path = router.build(request.endpoint, arguments)
        else:
            # The build method returns a quoted URL so convert in this case for consistency.
            path = urls.url_quote_plus(request.httprequest.path, safe='/')
        lang_path = ('/' +
                     lang.url_code) if lang != self.default_lang_id else ''
        canonical_query_string = '?%s' % urls.url_encode(
            canonical_params) if canonical_params else ''
        return self.get_base_url() + lang_path + path + canonical_query_string

    def _get_canonical_url(self, canonical_params):
        """Returns the canonical URL for the current request."""
        self.ensure_one()
        return self._get_canonical_url_localized(
            lang=request.lang, canonical_params=canonical_params)

    def _is_canonical_url(self, canonical_params):
        """Returns whether the current request URL is canonical."""
        self.ensure_one()
        # Compare OrderedMultiDict because the order is important, there must be
        # only one canonical and not params permutations.
        params = request.httprequest.args
        canonical_params = canonical_params or OrderedMultiDict()
        if params != canonical_params:
            return False
        # Compare URL at the first rerouting iteration (if available) because
        # it's the one with the language in the path.
        # It is important to also test the domain of the current URL.
        current_url = request.httprequest.url_root[:-1] + (
            hasattr(request, 'rerouting') and request.rerouting[0]
            or request.httprequest.path)
        canonical_url = self._get_canonical_url_localized(
            lang=request.lang, canonical_params=None)
        # A request path with quotable characters (such as ",") is never
        # canonical because request.httprequest.base_url is always unquoted,
        # and canonical url is always quoted, so it is never possible to tell
        # if the current URL is indeed canonical or not.
        return current_url == canonical_url