Example #1
0
    def _recycle_records(self, batch_commits=False):
        self.env.flush_all()
        records_to_clean = []
        is_test = bool(config['test_enable'] or config['test_file'])

        existing_recycle_records = self.env[
            'data_recycle.record'].with_context(active_test=False).search([
                ('recycle_model_id', 'in', self.ids)
            ])
        mapped_existing_records = defaultdict(list)
        for recycle_record in existing_recycle_records:
            mapped_existing_records[recycle_record.recycle_model_id].append(
                recycle_record.res_id)

        for recycle_model in self:
            rule_domain = ast.literal_eval(
                recycle_model.domain
            ) if recycle_model.domain and recycle_model.domain != '[]' else []
            if recycle_model.time_field_id and recycle_model.time_field_delta and recycle_model.time_field_delta_unit:
                if recycle_model.time_field_id.ttype == 'date':
                    now = fields.Date.today()
                else:
                    now = fields.Datetime.now()
                delta = relativedelta(
                    **{
                        recycle_model.time_field_delta_unit:
                        recycle_model.time_field_delta
                    })
                rule_domain = expression.AND([
                    rule_domain,
                    [(recycle_model.time_field_id.name, '<=', now - delta)]
                ])
            model = self.env[recycle_model.res_model_name]
            if recycle_model.include_archived:
                model = model.with_context(active_test=False)
            records_to_recycle = model.search(rule_domain)
            records_to_create = [
                {
                    'res_id': record.id,
                    'recycle_model_id': recycle_model.id,
                } for record in records_to_recycle
                if record.id not in mapped_existing_records[recycle_model]
            ]

            if recycle_model.recycle_mode == 'automatic':
                for records_to_create_batch in split_every(
                        DR_CREATE_STEP_AUTO, records_to_create):
                    self.env['data_recycle.record'].create(
                        records_to_create_batch).action_validate()
                    if batch_commits and not is_test:
                        # Commit after each batch iteration to avoid complete rollback on timeout as
                        # this can create lots of new records.
                        self.env.cr.commit()
            else:
                records_to_clean = records_to_clean + records_to_create
        for records_to_clean_batch in split_every(DR_CREATE_STEP_MANUAL,
                                                  records_to_clean):
            self.env['data_recycle.record'].create(records_to_clean_batch)
            if batch_commits and not is_test:
                self.env.cr.commit()
    def _refresh_statistics(self):
        super(SocialLivePostLinkedin, self)._refresh_statistics()
        accounts = self.env['social.account'].search([('media_type', '=',
                                                       'linkedin')])

        for account in accounts:
            linkedin_post_ids = self.env['social.live.post'].sudo().search(
                [('account_id', '=', account.id)],
                order='create_date DESC',
                limit=1000)
            if not linkedin_post_ids:
                continue

            linkedin_post_ids = {
                post.linkedin_post_id: post
                for post in linkedin_post_ids
            }

            session = requests.Session()

            # The LinkedIn API limit the query parameters to 4KB
            # An LinkedIn URN is approximatively 40 characters
            # So we keep a big margin and we split over 50 LinkedIn posts
            for batch_linkedin_post_ids in tools.split_every(
                    50, linkedin_post_ids):
                endpoint = url_join(
                    self.env['social.media']._LINKEDIN_ENDPOINT,
                    'organizationalEntityShareStatistics?shares=List(%s)' %
                    ','.join(map(quote, batch_linkedin_post_ids)))

                response = session.get(
                    endpoint,
                    params={
                        'q': 'organizationalEntity',
                        'organizationalEntity': account.linkedin_account_urn,
                        'count': 50
                    },
                    headers=account._linkedin_bearer_headers(),
                    timeout=10)

                if response.status_code != 200 or 'elements' not in response.json(
                ):
                    account.sudo().is_media_disconnected = True
                    break

                for stats in response.json()['elements']:
                    urn = stats.get('share')
                    stats = stats.get('totalShareStatistics')

                    if not urn or not stats or urn not in batch_linkedin_post_ids:
                        continue

                    linkedin_post_ids[urn].update({
                        'engagement':
                        stats.get('likeCount', 0) +
                        stats.get('commentCount', 0) +
                        stats.get('shareCount', 0)
                    })
    def _split_messages(self):
        """
        拆分消息
        """
        groups = defaultdict(list)

        for record_ids in groups.items():
            for message_batch in tools.split_every(record_ids):
                yield message_batch
Example #4
0
    def _notify_members(self, message):
        """Send the given message to all members of the mail group (except the author)."""
        self.ensure_one()

        if message.mail_group_id != self:
            raise UserError(_('The group of the message do not match.'))

        if not message.mail_message_id.reply_to:
            _logger.error('The alias or the catchall domain is missing, group might not work properly.')

        base_url = self.get_base_url()
        body = self.env['mail.render.mixin']._replace_local_links(message.body)
        access_token = self._generate_group_access_token()
        mail_values = []
        for batch_members in tools.split_every(GROUP_SEND_BATCH_SIZE, self.member_ids):
            for member in batch_members:
                if member.email_normalized == message.email_from_normalized:
                    # Do not send the email to his author
                    continue

                # SMTP headers related to the subscription
                email_url_encoded = urls.url_quote(member.email_normalized)
                headers = {
                    'List-Archive': f'<{base_url}/groups/{slug(self)}>',
                    'List-Subscribe': f'<{base_url}/groups?email={email_url_encoded}>',
                    'List-Unsubscribe': f'<{base_url}/groups?unsubscribe&email={email_url_encoded}>',
                }

                # Add the footer (member specific) in the body
                template_values = {
                    'mailto': f'{self.alias_name}@{self.alias_domain}',
                    'group_url': f'{base_url}/groups/{slug(self)}',
                    'unsub_url':  f'{base_url}/groups?unsubscribe&group_id={self.id}&token={access_token}'
                }
                template = self.env.ref('mail_group.mail_group_footer')
                footer = template._render(template_values, engine='ir.qweb', minimal_qcontext=True)
                member_body = tools.append_content_to_html(body, footer)

                mail_values.append({
                    'auto_delete': True,
                    'attachment_ids': message.attachment_ids.ids,
                    'body_html': member_body,
                    'email_from': message.email_from,
                    'email_to': member.email,
                    'headers': json.dumps(headers),
                    'mail_message_id': message.mail_message_id.id,
                    'message_id': message.mail_message_id.message_id,
                    'model': 'mail.group',
                    'reply_to': message.mail_message_id.reply_to,
                    'res_id': self.id,
                    'subject': message.subject,
                })

            if mail_values:
                self.env['mail.mail'].sudo().create(mail_values)
Example #5
0
    def _split_by_mail_configuration(self):
        """Group the <mail.mail> based on their "email_from" and their "mail_server_id".

        The <mail.mail> will have the "same sending configuration" if they have the same
        mail server or the same mail from. For performance purpose, we can use an SMTP
        session in batch and therefore we need to group them by the parameter that will
        influence the mail server used.

        The same "sending configuration" may repeat in order to limit batch size
        according to the `mail.session.batch.size` system parameter.

        Return iterators over
            mail_server_id, email_from, Records<mail.mail>.ids
        """
        mail_values = self.read(['id', 'email_from', 'mail_server_id'])

        # First group the <mail.mail> per mail_server_id and per email_from
        group_per_email_from = defaultdict(list)
        for values in mail_values:
            mail_server_id = values['mail_server_id'][0] if values[
                'mail_server_id'] else False
            group_per_email_from[(mail_server_id,
                                  values['email_from'])].append(values['id'])

        # Then find the mail server for each email_from and group the <mail.mail>
        # per mail_server_id and smtp_from
        mail_servers = self.env['ir.mail_server'].sudo().search(
            [], order='sequence')
        group_per_smtp_from = defaultdict(list)
        for (mail_server_id,
             email_from), mail_ids in group_per_email_from.items():
            if not mail_server_id:
                mail_server, smtp_from = self.env[
                    'ir.mail_server']._find_mail_server(
                        email_from, mail_servers)
                mail_server_id = mail_server.id if mail_server else False
            else:
                smtp_from = email_from

            group_per_smtp_from[(mail_server_id, smtp_from)].extend(mail_ids)

        sys_params = self.env['ir.config_parameter'].sudo()
        batch_size = int(sys_params.get_param('mail.session.batch.size', 1000))

        for (mail_server_id,
             smtp_from), record_ids in group_per_smtp_from.items():
            for batch_ids in tools.split_every(batch_size, record_ids):
                yield mail_server_id, smtp_from, batch_ids
Example #6
0
    def _split_by_server(self):
        """Returns an iterator of pairs `(mail_server_id, record_ids)` for current recordset.

        The same `mail_server_id` may repeat in order to limit batch size according to
        the `mail.session.batch.size` system parameter.
        """
        groups = defaultdict(list)
        # Turn prefetch OFF to avoid MemoryError on very large mail queues, we only care
        # about the mail server ids in this case.
        for mail in self.with_context(prefetch_fields=False):
            groups[mail.mail_server_id.id].append(mail.id)
        sys_params = self.env['ir.config_parameter'].sudo()
        batch_size = int(sys_params.get_param('mail.session.batch.size', 1000))
        for server_id, record_ids in groups.items():
            for mail_batch in tools.split_every(batch_size, record_ids):
                yield server_id, mail_batch
Example #7
0
 def _compute_allowed_picking_ids(self):
     # Backport of f329de26: allowed_picking_ids is useless, view_stock_landed_cost_form no longer uses it,
     # the field and its compute are kept since this is a stable version. Still, this compute has been made
     # more resilient to MemoryErrors.
     valued_picking_ids_per_company = defaultdict(list)
     if self.company_id:
         self.env.cr.execute(
             """SELECT sm.picking_id, sm.company_id
                                  FROM stock_move AS sm
                            INNER JOIN stock_valuation_layer AS svl ON svl.stock_move_id = sm.id
                                 WHERE sm.picking_id IS NOT NULL AND sm.company_id IN %s
                              GROUP BY sm.picking_id, sm.company_id""",
             [tuple(self.company_id.ids)])
         for res in self.env.cr.fetchall():
             valued_picking_ids_per_company[res[1]].append(res[0])
     for cost in self:
         n = 5000
         cost.allowed_picking_ids = valued_picking_ids_per_company[
             cost.company_id.id][:n]
         for ids_chunk in tools.split_every(
                 n, valued_picking_ids_per_company[cost.company_id.id][n:]):
             cost.allowed_picking_ids = [(4, id_) for id_ in ids_chunk]
Example #8
0
    def _firebase_send_message_from_iap(self, data, visitors):
        social_iap_endpoint = self.env['ir.config_parameter'].sudo().get_param(
            'social.social_iap_endpoint',
            self.env['social.media']._DEFAULT_SOCIAL_IAP_ENDPOINT)
        batch_size = 100

        tokens = visitors.mapped('push_token')
        data.update({
            'db_uuid':
            self.env['ir.config_parameter'].sudo().get_param('database.uuid')
        })
        for tokens_batch in tools.split_every(batch_size,
                                              tokens,
                                              piece_maker=list):
            batch_data = dict(data)
            batch_data['tokens'] = tokens_batch
            iap_tools.iap_jsonrpc(url_join(
                social_iap_endpoint,
                '/iap/social_push_notifications/firebase_send_message'),
                                  params=batch_data)

        return []
Example #9
0
    def _procure_orderpoint_confirm(self, use_new_cursor=False, company_id=None, raise_user_error=True):
        """ Create procurements based on orderpoints.
        :param bool use_new_cursor: if set, use a dedicated cursor and auto-commit after processing
            1000 orderpoints.
            This is appropriate for batch jobs only.
        """
        self = self.with_company(company_id)
        orderpoints_noprefetch = self.read(['id'])
        orderpoints_noprefetch = [orderpoint['id'] for orderpoint in orderpoints_noprefetch]

        for orderpoints_batch in split_every(1000, orderpoints_noprefetch):
            if use_new_cursor:
                cr = registry(self._cr.dbname).cursor()
                self = self.with_env(self.env(cr=cr))
            orderpoints_batch = self.env['stock.warehouse.orderpoint'].browse(orderpoints_batch)
            # ensure that qty_* which depends on datetime.now() are correctly
            # recomputed
            orderpoints_batch._compute_qty_to_order()
            orderpoints_exceptions = []
            while orderpoints_batch:
                procurements = []
                for orderpoint in orderpoints_batch:
                    if float_compare(orderpoint.qty_to_order, 0.0, precision_rounding=orderpoint.product_uom.rounding) == 1:
                        date = datetime.combine(orderpoint.lead_days_date, time.min)
                        values = orderpoint._prepare_procurement_values(date=date)
                        procurements.append(self.env['procurement.group'].Procurement(
                            orderpoint.product_id, orderpoint.qty_to_order, orderpoint.product_uom,
                            orderpoint.location_id, orderpoint.name, orderpoint.name,
                            orderpoint.company_id, values))

                try:
                    with self.env.cr.savepoint():
                        self.env['procurement.group'].with_context(from_orderpoint=True).run(procurements, raise_user_error=raise_user_error)
                except ProcurementException as errors:
                    for procurement, error_msg in errors.procurement_exceptions:
                        orderpoints_exceptions += [(procurement.values.get('orderpoint_id'), error_msg)]
                    failed_orderpoints = self.env['stock.warehouse.orderpoint'].concat(*[o[0] for o in orderpoints_exceptions])
                    if not failed_orderpoints:
                        _logger.error('Unable to process orderpoints')
                        break
                    orderpoints_batch -= failed_orderpoints

                except OperationalError:
                    if use_new_cursor:
                        cr.rollback()
                        continue
                    else:
                        raise
                else:
                    orderpoints_batch._post_process_scheduler()
                    break

            # Log an activity on product template for failed orderpoints.
            for orderpoint, error_msg in orderpoints_exceptions:
                existing_activity = self.env['mail.activity'].search([
                    ('res_id', '=', orderpoint.product_id.product_tmpl_id.id),
                    ('res_model_id', '=', self.env.ref('product.model_product_template').id),
                    ('note', '=', error_msg)])
                if not existing_activity:
                    orderpoint.product_id.product_tmpl_id.activity_schedule(
                        'mail.mail_activity_data_warning',
                        note=error_msg,
                        user_id=orderpoint.product_id.responsible_id.id or SUPERUSER_ID,
                    )

            if use_new_cursor:
                cr.commit()
                cr.close()

        return {}
Example #10
0
 def _split_batch(self):
     batch_size = int(self.env['ir.config_parameter'].sudo().get_param(
         'sms.session.batch.size', 500))
     for sms_batch in tools.split_every(batch_size, self.ids):
         yield sms_batch
Example #11
0
    def _notify_members(self, message):
        """Send the given message to all members of the mail group (except the author)."""
        self.ensure_one()

        if message.mail_group_id != self:
            raise UserError(_('The group of the message do not match.'))

        if not message.mail_message_id.reply_to:
            _logger.error(
                'The alias or the catchall domain is missing, group might not work properly.'
            )

        base_url = self.get_base_url()
        body = self.env['mail.render.mixin']._replace_local_links(message.body)
        access_token = self._generate_group_access_token()
        mail_values = []

        # Email added in a dict to be sure to send only once the email to each address
        member_emails = {
            email_normalize(member.email): member.email
            for member in self.member_ids
        }

        for batch_email_member in tools.split_every(GROUP_SEND_BATCH_SIZE,
                                                    member_emails.items()):
            for email_member_normalized, email_member in batch_email_member:
                if email_member_normalized == message.email_from_normalized:
                    # Do not send the email to his author
                    continue

                # SMTP headers related to the subscription
                email_url_encoded = urls.url_quote(email_member)
                headers = {
                    **self._notify_email_header_dict(),
                    'List-Archive': f'<{base_url}/groups/{slug(self)}>',
                    'List-Subscribe':
                    f'<{base_url}/groups?email={email_url_encoded}>',
                    'List-Unsubscribe':
                    f'<{base_url}/groups?unsubscribe&email={email_url_encoded}>',
                    'Precedence': 'list',
                    'X-Auto-Response-Suppress':
                    'OOF',  # avoid out-of-office replies from MS Exchange
                }
                if self.alias_name and self.alias_domain:
                    headers.update({
                        'List-Id':
                        f'<{self.alias_name}.{self.alias_domain}>',
                        'List-Post':
                        f'<mailto:{self.alias_name}@{self.alias_domain}>',
                        'X-Forge-To':
                        f'"{self.name}" <{self.alias_name}@{self.alias_domain}>',
                    })

                if message.mail_message_id.parent_id:
                    headers[
                        'In-Reply-To'] = message.mail_message_id.parent_id.message_id

                # Add the footer (member specific) in the body
                template_values = {
                    'mailto':
                    f'{self.alias_name}@{self.alias_domain}',
                    'group_url':
                    f'{base_url}/groups/{slug(self)}',
                    'unsub_label':
                    f'{base_url}/groups?unsubscribe',
                    'unsub_url':
                    f'{base_url}/groups?unsubscribe&group_id={self.id}&token={access_token}&email={email_url_encoded}',
                }
                template = self.env.ref('mail_group.mail_group_footer')
                footer = template._render(template_values,
                                          engine='ir.qweb',
                                          minimal_qcontext=True)
                member_body = tools.append_content_to_html(body,
                                                           footer,
                                                           plaintext=False)

                mail_values.append({
                    'auto_delete': True,
                    'attachment_ids': message.attachment_ids.ids,
                    'body_html': member_body,
                    'email_from': message.email_from,
                    'email_to': email_member,
                    'headers': json.dumps(headers),
                    'mail_message_id': message.mail_message_id.id,
                    'message_id': message.mail_message_id.message_id,
                    'model': 'mail.group',
                    'reply_to': message.mail_message_id.reply_to,
                    'res_id': self.id,
                    'subject': message.subject,
                })

            if mail_values:
                self.env['mail.mail'].sudo().create(mail_values)
Example #12
0
    def _firebase_send_message_from_configuration(self, data, visitors):
        """ This method now has a dual implementation to handle cases when the firebase_admin
        python library is not installed / not in the correct version.

        1. When firebase_admin is available:
           Sends messages by batch of 100 (max limit from firebase).
           Returns a tuple containing:
              - The matched website.visitors (search_read records).
              - A list of firebase_admin.messaging.BatchResponse to be handled by the caller.

        2. When firebase_admin is NOT available.
           Sends messages one by one using the firebase REST API.
           (Which is what firebase_admin does under the hood anyway)
           It requires a bearer token for authentication that we obtain using the google_auth library.
           Returns a tuple containing:
              - The matched website.visitors (search_read records).
              - An empty list. """

        if not visitors:
            return [], []

        if not self.firebase_admin_key_file:
            raise UserError(
                _("Firebase Admin Key File is missing from the configuration.")
            )

        results = []
        tokens = visitors.mapped('push_token')
        if firebase_admin and self._check_firebase_version():
            self._init_firebase_app()
            batch_size = 100

            for tokens_batch in tools.split_every(batch_size,
                                                  tokens,
                                                  piece_maker=list):
                firebase_message = messaging.MulticastMessage(
                    data=data, tokens=tokens_batch)
                results.append(messaging.send_multicast(firebase_message))
        elif service_account:
            firebase_data = json.loads(
                base64.b64decode(self.firebase_admin_key_file).decode())
            firebase_credentials = service_account.Credentials.from_service_account_info(
                firebase_data,
                scopes=['https://www.googleapis.com/auth/firebase.messaging'])
            firebase_credentials.refresh(google_requests.Request())
            auth_token = firebase_credentials.token

            for token in tokens:
                requests.post(
                    f'https://fcm.googleapis.com/v1/projects/{firebase_data["project_id"]}/messages:send',
                    json={'message': {
                        'data': data,
                        'token': token
                    }},
                    headers={'authorization': f'Bearer {auth_token}'},
                    timeout=10)
        else:
            raise UserError(
                _('You have to either install "firebase_admin>=2.17.0" or '
                  '"google_auth>=1.18.0" to be able to send push '
                  'notifications.'))

        return tokens, results
Example #13
0
    def _procure_orderpoint_confirm(self,
                                    use_new_cursor=False,
                                    company_id=None,
                                    raise_user_error=True):
        """ Create procurements based on orderpoints.
        :param bool use_new_cursor: if set, use a dedicated cursor and auto-commit after processing
            1000 orderpoints.
            This is appropriate for batch jobs only.
        """
        self = self.with_company(company_id)

        for orderpoints_batch_ids in split_every(1000, self.ids):
            if use_new_cursor:
                cr = registry(self._cr.dbname).cursor()
                self = self.with_env(self.env(cr=cr))
            orderpoints_batch = self.env['stock.warehouse.orderpoint'].browse(
                orderpoints_batch_ids)
            all_orderpoints_exceptions = []
            while orderpoints_batch:
                procurements = []
                for orderpoint in orderpoints_batch:
                    origins = orderpoint.env.context.get('origins', {}).get(
                        orderpoint.id, False)
                    if origins:
                        origin = '%s - %s' % (orderpoint.display_name,
                                              ','.join(origins))
                    else:
                        origin = orderpoint.name
                    if float_compare(orderpoint.qty_to_order,
                                     0.0,
                                     precision_rounding=orderpoint.product_uom.
                                     rounding) == 1:
                        date = orderpoint._get_orderpoint_procurement_date()
                        values = orderpoint._prepare_procurement_values(
                            date=date)
                        procurements.append(
                            self.env['procurement.group'].Procurement(
                                orderpoint.product_id, orderpoint.qty_to_order,
                                orderpoint.product_uom, orderpoint.location_id,
                                orderpoint.name, origin, orderpoint.company_id,
                                values))

                try:
                    with self.env.cr.savepoint():
                        self.env['procurement.group'].with_context(
                            from_orderpoint=True).run(
                                procurements,
                                raise_user_error=raise_user_error)
                except ProcurementException as errors:
                    orderpoints_exceptions = []
                    for procurement, error_msg in errors.procurement_exceptions:
                        orderpoints_exceptions += [
                            (procurement.values.get('orderpoint_id'),
                             error_msg)
                        ]
                    all_orderpoints_exceptions += orderpoints_exceptions
                    failed_orderpoints = self.env[
                        'stock.warehouse.orderpoint'].concat(
                            *[o[0] for o in orderpoints_exceptions])
                    if not failed_orderpoints:
                        _logger.error('Unable to process orderpoints')
                        break
                    orderpoints_batch -= failed_orderpoints

                except OperationalError:
                    if use_new_cursor:
                        cr.rollback()
                        continue
                    else:
                        raise
                else:
                    orderpoints_batch._post_process_scheduler()
                    break

            # Log an activity on product template for failed orderpoints.
            for orderpoint, error_msg in all_orderpoints_exceptions:
                existing_activity = self.env['mail.activity'].search([
                    ('res_id', '=', orderpoint.product_id.product_tmpl_id.id),
                    ('res_model_id', '=',
                     self.env.ref('product.model_product_template').id),
                    ('note', '=', error_msg)
                ])
                if not existing_activity:
                    orderpoint.product_id.product_tmpl_id.activity_schedule(
                        'mail.mail_activity_data_warning',
                        note=error_msg,
                        user_id=orderpoint.product_id.responsible_id.id
                        or SUPERUSER_ID,
                    )

            if use_new_cursor:
                cr.commit()
                cr.close()
                _logger.info(
                    "A batch of %d orderpoints is processed and committed",
                    len(orderpoints_batch_ids))

        return {}