def set_values(self): """ Set values for the fields other that `default`, `group` and `module` """ self = self.with_context(active_test=False) classified = self._get_classified_fields() # default values fields IrDefault = self.env['ir.default'].sudo() for name, model, field in classified['default']: if isinstance(self[name], models.BaseModel): if self._fields[name].type == 'many2one': value = self[name].id else: value = self[name].ids else: value = self[name] IrDefault.set(model, field, value) # group fields: modify group / implied groups current_settings = self.default_get(list(self.fields_get())) with self.env.norecompute(): for name, groups, implied_group in sorted( classified['group'], key=lambda k: self[k[0]]): groups = groups.sudo() implied_group = implied_group.sudo() if self[name] == current_settings[name]: continue if int(self[name]): groups.write( {'implied_ids': [Command.link(implied_group.id)]}) else: groups.write( {'implied_ids': [Command.unlink(implied_group.id)]}) implied_group.write({ 'users': [Command.unlink(user.id) for user in groups.users] }) # config fields: store ir.config_parameters IrConfigParameter = self.env['ir.config_parameter'].sudo() for name, icp in classified['config']: field = self._fields[name] value = self[name] if field.type == 'char': # storing developer keys as ir.config_parameter may lead to nasty # bugs when users leave spaces around them value = (value or "").strip() or False elif field.type in ('integer', 'float'): value = repr(value) if value else False elif field.type == 'many2one': # value is a (possibly empty) recordset value = value.id IrConfigParameter.set_param(icp, value)
def _compute_event_ticket_ids(self): """ Update event configuration from its event type. Depends are set only on event_type_id itself, not its sub fields. Purpose is to emulate an onchange: if event type is changed, update event configuration. Changing event type content itself should not trigger this method. When synchronizing tickets: * lines that have no registrations linked are remove; * type lines are added; Note that updating event_ticket_ids triggers _compute_start_sale_date (start_sale_date computation) so ensure result to avoid cache miss. """ for event in self: if not event.event_type_id and not event.event_ticket_ids: event.event_ticket_ids = False continue # lines to keep: those with existing registrations tickets_to_remove = event.event_ticket_ids.filtered(lambda ticket: not ticket._origin.registration_ids) command = [Command.unlink(ticket.id) for ticket in tickets_to_remove] if event.event_type_id.use_ticket: command += [ Command.create({ attribute_name: line[attribute_name] if not isinstance(line[attribute_name], models.BaseModel) else line[attribute_name].id for attribute_name in self.env['event.type.ticket']._get_event_ticket_fields_whitelist() }) for line in event.event_type_id.event_type_ticket_ids ] event.event_ticket_ids = command
def _compute_event_mail_ids(self): """ Update event configuration from its event type. Depends are set only on event_type_id itself, not its sub fields. Purpose is to emulate an onchange: if event type is changed, update event configuration. Changing event type content itself should not trigger this method. When synchronizing mails: * lines that are not sent and have no registrations linked are remove; * type lines are added; """ for event in self: if not event.event_type_id and not event.event_mail_ids: event.event_mail_ids = self._default_event_mail_ids() continue # lines to keep: those with already sent emails or registrations mails_to_remove = event.event_mail_ids.filtered( lambda mail: not (mail._origin.mail_done) and not ( mail._origin.mail_registration_ids)) command = [Command.unlink(mail.id) for mail in mails_to_remove] if event.event_type_id.event_type_mail_ids: command += [ Command.create(line._prepare_event_mail_values()) for line in event.event_type_id.event_type_mail_ids ] if command: event.event_mail_ids = command
def _compute_event_mail_ids(self): """ Update event configuration from its event type. Depends are set only on event_type_id itself, not its sub fields. Purpose is to emulate an onchange: if event type is changed, update event configuration. Changing event type content itself should not trigger this method. When synchronizing mails: * lines that are not sent and have no registrations linked are remove; * type lines are added; """ for event in self: if not event.event_type_id and not event.event_mail_ids: event.event_mail_ids = False continue # lines to keep: those with already sent emails or registrations mails_to_remove = event.event_mail_ids.filtered( lambda mail: not(mail._origin.mail_sent or mail._origin.mail_registration_ids) ) command = [Command.unlink(mail.id) for mail in mails_to_remove] if event.event_type_id.use_mail_schedule: command += [ Command.create({ attribute_name: line[attribute_name] if not isinstance(line[attribute_name], models.BaseModel) else line[attribute_name].id for attribute_name in self.env['event.type.mail']._get_event_mail_fields_whitelist() }) for line in event.event_type_id.event_type_mail_ids ] if command: event.event_mail_ids = command
def _compute_event_booth_ids(self): """ Update event configuration from its event type. Depends are set only on event_type_id itself, not its sub fields. Purpose is to emulate an onchange: if event type is changed, update event configuration. Changing event type content itself should not trigger this method. When synchronizing booths: * lines that are available are removed; * template lines are added; """ for event in self: if not event.event_type_id and not event.event_booth_ids: event.event_booth_ids = False continue # booths to keep: those that are not available booths_to_remove = event.event_booth_ids.filtered( lambda booth: booth.is_available) command = [Command.unlink(booth.id) for booth in booths_to_remove] if event.event_type_id.event_type_booth_ids: command += [ Command.create({ attribute_name: line[attribute_name] if not isinstance(line[attribute_name], models.BaseModel) else line[attribute_name].id for attribute_name in self.env['event.type.booth']. _get_event_booth_fields_whitelist() }) for line in event.event_type_id.event_type_booth_ids ] event.event_booth_ids = command
def unstar_all(self): """ Unstar messages for the current partner. """ partner_id = self.env.user.partner_id.id starred_messages = self.search([('starred_partner_ids', 'in', partner_id)]) starred_messages.write({'starred_partner_ids': [Command.unlink(partner_id)]}) ids = [m.id for m in starred_messages] notification = {'type': 'toggle_star', 'message_ids': ids, 'starred': False} self.env['bus.bus'].sendone((self._cr.dbname, 'res.partner', self.env.user.partner_id.id), notification)
def action_split(self): productions = self.production_id._split_productions({self.production_id: [detail.quantity for detail in self.production_detailed_vals_ids]}) for production, detail in zip(productions, self.production_detailed_vals_ids): production.user_id = detail.user_id production.date_planned_start = detail.date if self.production_split_multi_id: saved_production_split_multi_id = self.production_split_multi_id.id self.production_split_multi_id.production_ids = [Command.unlink(self.id)] action = self.env['ir.actions.actions']._for_xml_id('mrp.action_mrp_production_split_multi') action['res_id'] = saved_production_split_multi_id return action
def _unsubscribe_from_channels(self): """ This method un-subscribes users from private mail channels. Main purpose of this method is to prevent sending internal communication to archived / deleted users. We do not un-subscribes users from public channels because in most common cases, public channels are mailing list (e-mail based) and so users should always receive updates from public channels until they manually un-subscribe themselves. """ self.mapped('partner_id.channel_ids').filtered( lambda c: c.public != 'public').write({ 'channel_partner_ids': [Command.unlink(pid) for pid in self.mapped('partner_id').ids] })
def test_demote_user(self): """When a user is demoted to the status of portal/public, we should strip him of all his (previous) rights """ group_0 = self.env.ref( self.group0) # the group to which test_user already belongs group_U = self.env["res.groups"].create({ "name": "U", "implied_ids": [Command.set([self.grp_internal.id])] }) self.grp_internal.implied_ids = False # only there to simplify the test by not having to care about its trans_implied_ids self.test_user.write({'groups_id': [Command.link(group_U.id)]}) self.assertEqual( self.test_user.groups_id, (group_0 + group_U + self.grp_internal), "We should have our 2 groups and the implied user group", ) # Now we demote him. The JS framework sends 3 and 4 commands, # which is what we write here, but it should work even with a 5 command or whatever. self.test_user.write({ 'groups_id': [ Command.unlink(self.grp_internal.id), Command.unlink(self.grp_public.id), Command.link(self.grp_portal.id), ] }) # if we screw up the removing groups/adding the implied ids, we could end up in two situations: # 1. we have a portal user with way too much rights (e.g. 'Contact Creation', which does not imply any other group) # 2. because a group may be (transitively) implying group_user, then it would raise an exception # so as a compromise we remove all groups when demoting a user # (even technical display groups, e.g. TaxB2B, which could be re-added later) self.assertEqual( self.test_user.groups_id, (self.grp_portal), "Here the portal group does not imply any other group, so we should only have this group.", )
def toggle_message_starred(self): """ Toggle messages as (un)starred. Technically, the notifications related to uid are set to (un)starred. """ # a user should always be able to star a message he can read self.check_access_rule('read') starred = not self.starred if starred: self.sudo().write({'starred_partner_ids': [Command.link(self.env.user.partner_id.id)]}) else: self.sudo().write({'starred_partner_ids': [Command.unlink(self.env.user.partner_id.id)]}) notification = {'type': 'toggle_star', 'message_ids': [self.id], 'starred': starred} self.env['bus.bus'].sendone((self._cr.dbname, 'res.partner', self.env.user.partner_id.id), notification)
def test_m2m_store_trigger(self): group_user = self.env.ref('base.group_user') user = self.env['res.users'].create({ 'name': 'test', 'login': '******', 'groups_id': [Command.set([])], }) self.assertTrue(user.share) group_user.write({'users': [Command.link(user.id)]}) self.assertFalse(user.share) group_user.write({'users': [Command.unlink(user.id)]}) self.assertTrue(user.share)
def _action_unfollow(self, partner): self.message_unsubscribe(partner.ids) if partner not in self.with_context( active_test=False).channel_partner_ids: return True channel_info = self.channel_info('unsubscribe')[ 0] # must be computed before leaving the channel (access rights) result = self.write( {'channel_partner_ids': [Command.unlink(partner.id)]}) # side effect of unsubscribe that wasn't taken into account because # channel_info is called before actually unpinning the channel channel_info['is_pinned'] = False self.env['bus.bus'].sendone( (self._cr.dbname, 'res.partner', partner.id), channel_info) notification = _( '<div class="o_mail_notification">left the channel</div>') # post 'channel left' message as root since the partner just unsubscribed from the channel self.sudo().message_post(body=notification, subtype_xmlid="mail.mt_comment", author_id=partner.id) return result
def _compute_available_payment_method_ids(self): super()._compute_available_payment_method_ids() installed_acquirers = self.env['payment.acquirer'].sudo().search([]) method_information = self.env[ 'account.payment.method']._get_payment_method_information() pay_methods = self.env['account.payment.method'].search([ ('code', 'in', list(method_information.keys())) ]) pay_method_by_code = {x.code + x.payment_type: x for x in pay_methods} # On top of the basic filtering, filter to hide unavailable acquirers. # This avoid allowing payment method lines linked to an acquirer that has no record. for code, vals in method_information.items(): payment_method = pay_method_by_code.get(code + 'inbound') if not payment_method: continue for journal in self: to_remove = [] available_providers = installed_acquirers.filtered( lambda a: a.company_id == journal.company_id).mapped( 'provider') available = payment_method.code in available_providers if vals['mode'] == 'unique' and not available: to_remove.append(payment_method.id) journal.write({ 'available_payment_method_ids': [ Command.unlink(payment_method) for payment_method in to_remove ] })
def test_write_base_many2many(self): """ Write on many2many field. """ rec1 = self.env['test_performance.base'].create({'name': 'X'}) # create N tags on rec1: O(N) queries with self.assertQueryCount(4): rec1.invalidate_cache() rec1.write({'tag_ids': [Command.create({'name': 0})]}) self.assertEqual(len(rec1.tag_ids), 1) with self.assertQueryCount(14): rec1.invalidate_cache() rec1.write({ 'tag_ids': [Command.create({'name': val}) for val in range(1, 12)] }) self.assertEqual(len(rec1.tag_ids), 12) tags = rec1.tag_ids # update N tags: O(N) queries with self.assertQueryCount(3): rec1.invalidate_cache() rec1.write({ 'tag_ids': [Command.update(tag.id, {'name': 'X'}) for tag in tags[0]] }) self.assertEqual(rec1.tag_ids, tags) with self.assertQueryCount(3): rec1.invalidate_cache() rec1.write({ 'tag_ids': [Command.update(tag.id, {'name': 'X'}) for tag in tags[1:]] }) self.assertEqual(rec1.tag_ids, tags) # delete N tags: O(1) queries with self.assertQueryCount(__system__=8, demo=8): rec1.invalidate_cache() rec1.write( {'tag_ids': [Command.delete(tag.id) for tag in tags[0]]}) self.assertEqual(rec1.tag_ids, tags[1:]) with self.assertQueryCount(__system__=8, demo=8): rec1.invalidate_cache() rec1.write( {'tag_ids': [Command.delete(tag.id) for tag in tags[1:]]}) self.assertFalse(rec1.tag_ids) self.assertFalse(tags.exists()) rec1.write( {'tag_ids': [Command.create({'name': val}) for val in range(12)]}) tags = rec1.tag_ids # unlink N tags: O(1) queries with self.assertQueryCount(3): rec1.invalidate_cache() rec1.write( {'tag_ids': [Command.unlink(tag.id) for tag in tags[0]]}) self.assertEqual(rec1.tag_ids, tags[1:]) with self.assertQueryCount(3): rec1.invalidate_cache() rec1.write( {'tag_ids': [Command.unlink(tag.id) for tag in tags[1:]]}) self.assertFalse(rec1.tag_ids) self.assertTrue(tags.exists()) rec2 = self.env['test_performance.base'].create({'name': 'X'}) # link N tags from rec1 to rec2: O(1) queries with self.assertQueryCount(3): rec1.invalidate_cache() rec2.write({'tag_ids': [Command.link(tag.id) for tag in tags[0]]}) self.assertEqual(rec2.tag_ids, tags[0]) with self.assertQueryCount(3): rec1.invalidate_cache() rec2.write({'tag_ids': [Command.link(tag.id) for tag in tags[1:]]}) self.assertEqual(rec2.tag_ids, tags) with self.assertQueryCount(2): rec1.invalidate_cache() rec2.write({'tag_ids': [Command.link(tag.id) for tag in tags[1:]]}) self.assertEqual(rec2.tag_ids, tags) # empty N tags in rec2: O(1) queries with self.assertQueryCount(3): rec1.invalidate_cache() rec2.write({'tag_ids': [Command.clear()]}) self.assertFalse(rec2.tag_ids) self.assertTrue(tags.exists()) with self.assertQueryCount(2): rec1.invalidate_cache() rec2.write({'tag_ids': [Command.clear()]}) self.assertFalse(rec2.tag_ids) # set N tags in rec2: O(1) queries with self.assertQueryCount(3): rec1.invalidate_cache() rec2.write({'tag_ids': [Command.set(tags.ids)]}) self.assertEqual(rec2.tag_ids, tags) with self.assertQueryCount(3): rec1.invalidate_cache() rec2.write({'tag_ids': [Command.set(tags[:8].ids)]}) self.assertEqual(rec2.tag_ids, tags[:8]) with self.assertQueryCount(4): rec1.invalidate_cache() rec2.write({'tag_ids': [Command.set(tags[4:].ids)]}) self.assertEqual(rec2.tag_ids, tags[4:]) with self.assertQueryCount(3): rec1.invalidate_cache() rec2.write({'tag_ids': [Command.set(tags.ids)]}) self.assertEqual(rec2.tag_ids, tags) with self.assertQueryCount(2): rec1.invalidate_cache() rec2.write({'tag_ids': [Command.set(tags.ids)]}) self.assertEqual(rec2.tag_ids, tags)
def test_write_base_one2many(self): """ Write on one2many field. """ rec1 = self.env['test_performance.base'].create({'name': 'X'}) # create N lines on rec1: O(N) queries with self.assertQueryCount(3): rec1.invalidate_cache() rec1.write({'line_ids': [Command.create({'value': 0})]}) self.assertEqual(len(rec1.line_ids), 1) with self.assertQueryCount(15): rec1.invalidate_cache() rec1.write({ 'line_ids': [Command.create({'value': val}) for val in range(1, 12)] }) self.assertEqual(len(rec1.line_ids), 12) lines = rec1.line_ids # update N lines: O(N) queries with self.assertQueryCount(6): rec1.invalidate_cache() rec1.write({ 'line_ids': [Command.update(line.id, {'value': 42}) for line in lines[0]] }) self.assertEqual(rec1.line_ids, lines) with self.assertQueryCount(26): rec1.invalidate_cache() rec1.write({ 'line_ids': [ Command.update(line.id, {'value': 42 + line.id}) for line in lines[1:] ] }) self.assertEqual(rec1.line_ids, lines) # delete N lines: O(1) queries with self.assertQueryCount(14): rec1.invalidate_cache() rec1.write( {'line_ids': [Command.delete(line.id) for line in lines[0]]}) self.assertEqual(rec1.line_ids, lines[1:]) with self.assertQueryCount(12): rec1.invalidate_cache() rec1.write( {'line_ids': [Command.delete(line.id) for line in lines[1:]]}) self.assertFalse(rec1.line_ids) self.assertFalse(lines.exists()) rec1.write({ 'line_ids': [Command.create({'value': val}) for val in range(12)] }) lines = rec1.line_ids # unlink N lines: O(1) queries with self.assertQueryCount(14): rec1.invalidate_cache() rec1.write( {'line_ids': [Command.unlink(line.id) for line in lines[0]]}) self.assertEqual(rec1.line_ids, lines[1:]) with self.assertQueryCount(12): rec1.invalidate_cache() rec1.write( {'line_ids': [Command.unlink(line.id) for line in lines[1:]]}) self.assertFalse(rec1.line_ids) self.assertFalse(lines.exists()) rec1.write({ 'line_ids': [Command.create({'value': val}) for val in range(12)] }) lines = rec1.line_ids rec2 = self.env['test_performance.base'].create({'name': 'X'}) # link N lines from rec1 to rec2: O(1) queries with self.assertQueryCount(8): rec1.invalidate_cache() rec2.write( {'line_ids': [Command.link(line.id) for line in lines[0]]}) self.assertEqual(rec1.line_ids, lines[1:]) self.assertEqual(rec2.line_ids, lines[0]) with self.assertQueryCount(8): rec1.invalidate_cache() rec2.write( {'line_ids': [Command.link(line.id) for line in lines[1:]]}) self.assertFalse(rec1.line_ids) self.assertEqual(rec2.line_ids, lines) with self.assertQueryCount(4): rec1.invalidate_cache() rec2.write( {'line_ids': [Command.link(line.id) for line in lines[0]]}) self.assertEqual(rec2.line_ids, lines) with self.assertQueryCount(4): rec1.invalidate_cache() rec2.write( {'line_ids': [Command.link(line.id) for line in lines[1:]]}) self.assertEqual(rec2.line_ids, lines) # empty N lines in rec2: O(1) queries with self.assertQueryCount(13): rec1.invalidate_cache() rec2.write({'line_ids': [Command.clear()]}) self.assertFalse(rec2.line_ids) with self.assertQueryCount(3): rec1.invalidate_cache() rec2.write({'line_ids': [Command.clear()]}) self.assertFalse(rec2.line_ids) rec1.write({ 'line_ids': [Command.create({'value': val}) for val in range(12)] }) lines = rec1.line_ids # set N lines in rec2: O(1) queries with self.assertQueryCount(8): rec1.invalidate_cache() rec2.write({'line_ids': [Command.set(lines[0].ids)]}) self.assertEqual(rec1.line_ids, lines[1:]) self.assertEqual(rec2.line_ids, lines[0]) with self.assertQueryCount(6): rec1.invalidate_cache() rec2.write({'line_ids': [Command.set(lines.ids)]}) self.assertFalse(rec1.line_ids) self.assertEqual(rec2.line_ids, lines) with self.assertQueryCount(4): rec1.invalidate_cache() rec2.write({'line_ids': [Command.set(lines.ids)]}) self.assertEqual(rec2.line_ids, lines)
def _add_followers(self, res_model, res_ids, partner_ids, subtypes, check_existing=False, existing_policy='skip'): """ Internal method that generates values to insert or update followers. Callers have to handle the result, for example by making a valid ORM command, inserting or updating directly follower records, ... This method returns two main data * first one is a dict which keys are res_ids. Value is a list of dict of values valid for creating new followers for the related res_id; * second one is a dict which keys are follower ids. Value is a dict of values valid for updating the related follower record; :param subtypes: optional subtypes for new partner followers. This is a dict whose keys are partner IDs and value subtype IDs for that partner. :param channel_subtypes: optional subtypes for new channel followers. This is a dict whose keys are channel IDs and value subtype IDs for that channel. :param check_existing: if True, check for existing followers for given documents and handle them according to existing_policy parameter. Setting to False allows to save some computation if caller is sure there are no conflict for followers; :param existing policy: if check_existing, tells what to do with already existing followers: * skip: simply skip existing followers, do not touch them; * force: update existing with given subtypes only; * replace: replace existing with new subtypes (like force without old / new follower); * update: gives an update dict allowing to add missing subtypes (no subtype removal); """ _res_ids = res_ids or [0] data_fols, doc_pids = dict(), dict((i, set()) for i in _res_ids) if check_existing and res_ids: for fid, rid, pid, sids in self._get_subscription_data( [(res_model, res_ids)], partner_ids or None): if existing_policy != 'force': if pid: doc_pids[rid].add(pid) data_fols[fid] = (rid, pid, sids) if existing_policy == 'force': self.sudo().browse(data_fols.keys()).unlink() new, update = dict(), dict() for res_id in _res_ids: for partner_id in set(partner_ids or []): if partner_id not in doc_pids[res_id]: new.setdefault(res_id, list()).append({ 'res_model': res_model, 'partner_id': partner_id, 'subtype_ids': [Command.set(subtypes[partner_id])], }) elif existing_policy in ('replace', 'update'): fol_id, sids = next( ((key, val[2]) for key, val in data_fols.items() if val[0] == res_id and val[1] == partner_id), (False, [])) new_sids = set(subtypes[partner_id]) - set(sids) old_sids = set(sids) - set(subtypes[partner_id]) update_cmd = [] if fol_id and new_sids: update_cmd += [Command.link(sid) for sid in new_sids] if fol_id and old_sids and existing_policy == 'replace': update_cmd += [Command.unlink(sid) for sid in old_sids] if update_cmd: update[fol_id] = {'subtype_ids': update_cmd} return new, update
def test_mail_mail_attachment_access(self): mail = self.env['mail.mail'].create({ 'body_html': 'Test', 'email_to': '*****@*****.**', 'partner_ids': [(4, self.user_employee.partner_id.id)], 'attachment_ids': [ (0, 0, { 'name': 'file 1', 'datas': 'c2VjcmV0' }), (0, 0, { 'name': 'file 2', 'datas': 'c2VjcmV0' }), (0, 0, { 'name': 'file 3', 'datas': 'c2VjcmV0' }), (0, 0, { 'name': 'file 4', 'datas': 'c2VjcmV0' }), ], }) def _patched_check(self, *args, **kwargs): if self.env.is_superuser(): return if any(attachment.name in ('file 2', 'file 4') for attachment in self): raise AccessError('No access') mail.invalidate_recordset() new_attachment = self.env['ir.attachment'].create({ 'name': 'new file', 'datas': 'c2VjcmV0', }) with patch.object(type(self.env['ir.attachment']), 'check', _patched_check): # Sanity check self.assertEqual(mail.restricted_attachment_count, 2) self.assertEqual(len(mail.unrestricted_attachment_ids), 2) self.assertEqual(mail.unrestricted_attachment_ids.mapped('name'), ['file 1', 'file 3']) # Add a new attachment mail.write({ 'unrestricted_attachment_ids': [Command.link(new_attachment.id)], }) self.assertEqual(mail.restricted_attachment_count, 2) self.assertEqual(len(mail.unrestricted_attachment_ids), 3) self.assertEqual(mail.unrestricted_attachment_ids.mapped('name'), ['file 1', 'file 3', 'new file']) self.assertEqual(len(mail.attachment_ids), 5) # Remove an attachment mail.write({ 'unrestricted_attachment_ids': [Command.unlink(new_attachment.id)], }) self.assertEqual(mail.restricted_attachment_count, 2) self.assertEqual(len(mail.unrestricted_attachment_ids), 2) self.assertEqual(mail.unrestricted_attachment_ids.mapped('name'), ['file 1', 'file 3']) self.assertEqual(len(mail.attachment_ids), 4) # Reset command mail.invalidate_recordset() mail.write({'unrestricted_attachment_ids': [Command.clear()]}) self.assertEqual(len(mail.unrestricted_attachment_ids), 0) self.assertEqual(len(mail.attachment_ids), 2) # Read in SUDO mail.invalidate_recordset() self.assertEqual(mail.sudo().restricted_attachment_count, 2) self.assertEqual(len(mail.sudo().unrestricted_attachment_ids), 0)
def _write_from_google(self, gevent, vals): current_rrule = self.rrule # event_tz is written on event in Google but on recurrence in Odoo vals['event_tz'] = gevent.start.get('timeZone') super()._write_from_google(gevent, vals) base_event_time_fields = ['start', 'stop', 'allday'] new_event_values = self.env["calendar.event"]._odoo_values(gevent) # We update the attendee status for all events in the recurrence google_attendees = gevent.attendees or [] emails = [a.get('email') for a in google_attendees] partners = self._get_sync_partner(emails) existing_attendees = self.calendar_event_ids.attendee_ids for attendee in zip(emails, partners, google_attendees): email = attendee[0] if email in existing_attendees.mapped('email'): # Update existing attendees existing_attendees.filtered( lambda att: att.email == email).write( {'state': attendee[2].get('responseStatus')}) else: # Create new attendees if attendee[2].get('self'): partner = self.env.user.partner_id else: partner = attendee[1] self.calendar_event_ids.write({ 'attendee_ids': [(0, 0, { 'state': attendee[2].get('responseStatus'), 'partner_id': partner.id })] }) if attendee[2].get('displayName') and not partner.name: partner.name = attendee[2].get('displayName') for odoo_attendee_email in set(existing_attendees.mapped('email')): # Remove old attendees. Sometimes, several partners have the same email. if email_normalize(odoo_attendee_email) not in emails: attendees = existing_attendees.exists().filtered( lambda att: att.email == email_normalize( odoo_attendee_email)) self.calendar_event_ids.write({ 'need_sync': False, 'partner_ids': [Command.unlink(att.partner_id.id) for att in attendees] }) # Update the recurrence values old_event_values = self.base_event_id and self.base_event_id.read( base_event_time_fields)[0] if old_event_values and any( new_event_values[key] != old_event_values[key] for key in base_event_time_fields): # we need to recreate the recurrence, time_fields were modified. base_event_id = self.base_event_id non_equal_values = [ (key, old_event_values[key] and old_event_values[key].strftime('%m/%d/%Y, %H:%M:%S'), '-->', new_event_values[key] and new_event_values[key].strftime('%m/%d/%Y, %H:%M:%S')) for key in ['start', 'stop'] if new_event_values[key] != old_event_values[key] ] log_msg = f"Recurrence {self.id} {self.rrule} has all events ({len(self.calendar_event_ids.ids)}) deleted because of base event value change: {non_equal_values}" _logger.info(log_msg) # We archive the old events to recompute the recurrence. These events are already deleted on Google side. # We can't call _cancel because events without user_id would not be deleted (self.calendar_event_ids - base_event_id).google_id = False (self.calendar_event_ids - base_event_id).unlink() base_event_id.with_context(dont_notify=True).write( dict(new_event_values, google_id=False, need_sync=False)) if self.rrule == current_rrule: # if the rrule has changed, it will be recalculated below # There is no detached event now self.with_context(dont_notify=True)._apply_recurrence() else: time_fields = (self.env["calendar.event"]._get_time_fields() | self.env["calendar.event"]._get_recurrent_fields()) # We avoid to write time_fields because they are not shared between events. self._write_events( dict( { field: value for field, value in new_event_values.items() if field not in time_fields }, need_sync=False)) # We apply the rrule check after the time_field check because the google_id are generated according # to base_event start datetime. if self.rrule != current_rrule: detached_events = self._apply_recurrence() detached_events.google_id = False log_msg = f"Recurrence #{self.id} | current rule: {current_rrule} | new rule: {self.rrule} | remaining: {len(self.calendar_event_ids)} | removed: {len(detached_events)}" _logger.info(log_msg) detached_events.unlink()