def button_install(self): # domain to select auto-installable (but not yet installed) modules auto_domain = [('state', '=', 'uninstalled'), ('auto_install', '=', True)] # determine whether an auto-install module must be installed: # - all its dependencies are installed or to be installed, # - at least one dependency is 'to install' install_states = frozenset(('installed', 'to install', 'to upgrade')) def must_install(module): states = set(dep.state for dep in module.dependencies_id) return states <= install_states and 'to install' in states modules = self while modules: # Mark the given modules and their dependencies to be installed. modules._state_update('to install', ['uninstalled']) # Determine which auto-installable modules must be installed. modules = self.search(auto_domain).filtered(must_install) # the modules that are installed/to install/to upgrade install_mods = self.search([('state', 'in', list(install_states))]) # check individual exclusions install_names = {module.name for module in install_mods} for module in install_mods: for exclusion in module.exclusion_ids: if exclusion.name in install_names: msg = _('Modules "%s" and "%s" are incompatible.') raise UserError( msg % (module.shortdesc, exclusion.exclusion_id.shortdesc)) # check category exclusions def closure(module): todo = result = module while todo: result |= todo todo = todo.mapped('dependencies_id.depend_id') return result exclusives = self.env['ir.module.category'].search([('exclusive', '=', True)]) for category in exclusives: # retrieve installed modules in category and sub-categories categories = category.search([('id', 'child_of', category.ids)]) modules = install_mods.filtered( lambda mod: mod.category_id in categories) # the installation is valid if all installed modules in categories # belong to the transitive dependencies of one of them if modules and not any(modules <= closure(module) for module in modules): msg = _( 'You are trying to install incompatible modules in category "%s":' ) labels = dict(self.fields_get(['state'])['state']['selection']) raise UserError("\n".join([msg % category.name] + [ "- %s (%s)" % (module.shortdesc, labels[module.state]) for module in modules ])) ir_config = self.env['ir.config_parameter'].sudo() exp_date = ir_config.get_param('database.expiration_date') reason = ir_config.get_param('database.expiration_reason') set_param = ir_config.set_param for mod in self: if mod.contract_certificate and not (reason == 'contract_expire' and exp_date): expire_date = datetime.datetime.now() + datetime.timedelta( days=15) set_param('database.expiration_date', expire_date.replace(microsecond=0)) set_param('database.expiration_reason', 'contract_expire') set_param( 'contract.validity', base64.encodestring( encrypt( json.dumps(str( expire_date.replace(microsecond=0))), str(expire_date.replace(microsecond=0))))) return dict(ACTION_DICT, name=_('Install'))
def write(self, vals): # OVERRIDE def field_will_change(line, field_name): if field_name not in vals: return False field = line._fields[field_name] if field.type == 'many2one': return line[field_name].id != vals[field_name] if field.type in ('one2many', 'many2many'): current_ids = set(line[field_name].ids) after_write_ids = set( r['id'] for r in line.resolve_2many_commands( field_name, vals[field_name], fields=['id'])) return current_ids != after_write_ids if field.type == 'monetary' and line[field.currency_field]: return not line[field.currency_field].is_zero( line[field_name] - vals[field_name]) return line[field_name] != vals[field_name] ACCOUNTING_FIELDS = ('debit', 'credit', 'amount_currency') BUSINESS_FIELDS = ('price_unit', 'quantity', 'discount', 'tax_ids') PROTECTED_FIELDS_TAX_LOCK_DATE = [ 'debit', 'credit', 'tax_line_id', 'tax_ids', 'tag_ids' ] PROTECTED_FIELDS_LOCK_DATE = PROTECTED_FIELDS_TAX_LOCK_DATE + [ 'account_id', 'journal_id', 'amount_currency', 'currency_id', 'partner_id' ] PROTECTED_FIELDS_RECONCILIATION = ('account_id', 'date', 'debit', 'credit', 'amount_currency', 'currency_id') account_to_write = self.env['account.account'].browse( vals['account_id']) if 'account_id' in vals else None # Check writing a deprecated account. if account_to_write and account_to_write.deprecated: raise UserError(_('You cannot use a deprecated account.')) # when making a reconciliation on an existing liquidity journal item, mark the payment as reconciled for line in self: if line.parent_state == 'posted': if line.move_id.restrict_mode_hash_table and set( vals).intersection(INTEGRITY_HASH_LINE_FIELDS): raise UserError( _("You cannot edit the following fields due to restrict mode being activated on the journal: %s." ) % ', '.join(INTEGRITY_HASH_LINE_FIELDS)) if any(key in vals for key in ('tax_ids', 'tax_line_ids')): raise UserError( _('You cannot modify the taxes related to a posted journal item, you should reset the journal entry to draft to do so.' )) if 'statement_line_id' in vals and line.payment_id: # In case of an internal transfer, there are 2 liquidity move lines to match with a bank statement if all(line.statement_id for line in line.payment_id.move_line_ids.filtered( lambda r: r.id != line.id and r.account_id. internal_type == 'liquidity')): line.payment_id.state = 'reconciled' # Check the lock date. if any(self.env['account.move']._field_will_change( line, vals, field_name) for field_name in PROTECTED_FIELDS_LOCK_DATE): line.move_id._check_fiscalyear_lock_date() # Check the tax lock date. if any(self.env['account.move']._field_will_change( line, vals, field_name) for field_name in PROTECTED_FIELDS_TAX_LOCK_DATE): line._check_tax_lock_date() # Check the reconciliation. if any(self.env['account.move']._field_will_change( line, vals, field_name) for field_name in PROTECTED_FIELDS_RECONCILIATION): line._check_reconciliation() # Check switching receivable / payable accounts. if account_to_write: account_type = line.account_id.user_type_id.type if line.move_id.is_sale_document(include_receipts=True): if (account_type == 'receivable' and account_to_write.user_type_id.type != account_type) \ or (account_type != 'receivable' and account_to_write.user_type_id.type == 'receivable'): raise UserError( _("You can only set an account having the receivable type on payment terms lines for customer invoice." )) if line.move_id.is_purchase_document(include_receipts=True): if (account_type == 'payable' and account_to_write.user_type_id.type != account_type) \ or (account_type != 'payable' and account_to_write.user_type_id.type == 'payable'): raise UserError( _("You can only set an account having the payable type on payment terms lines for vendor bill." )) result = True for line in self: cleaned_vals = line.move_id._cleanup_write_orm_values(line, vals) if not cleaned_vals: continue result |= super(AccountMoveLine, line).write(cleaned_vals) if not line.move_id.is_invoice(include_receipts=True): continue # Ensure consistency between accounting & business fields. # As we can't express such synchronization as computed fields without cycling, we need to do it both # in onchange and in create/write. So, if something changed in accounting [resp. business] fields, # business [resp. accounting] fields are recomputed. if any(field in cleaned_vals for field in ACCOUNTING_FIELDS): balance = line.currency_id and line.amount_currency or line.debit - line.credit price_subtotal = line._get_price_total_and_subtotal().get( 'price_subtotal', 0.0) to_write = line._get_fields_onchange_balance( # balance=balance, # price_subtotal=price_subtotal, ) to_write.update( line._get_price_total_and_subtotal( price_unit=to_write.get('price_unit', line.price_unit), quantity=to_write.get('quantity', line.quantity), discount=to_write.get('discount', line.discount), )) result |= super(AccountMoveLine, line).write(to_write) elif any(field in cleaned_vals for field in BUSINESS_FIELDS): to_write = line._get_price_total_and_subtotal() to_write.update( line._get_fields_onchange_subtotal( price_subtotal=to_write['price_subtotal'], )) result |= super(AccountMoveLine, line).write(to_write) # Check total_debit == total_credit in the related moves. if self._context.get('check_move_validity', True): self.mapped('move_id')._check_balanced() return result
def search_panel_select_multi_range(self, field_name, **kwargs): """ Return possible values of the field field_name (case select="multi"), possibly with counters and groups. :param field_name: the name of a filter field; possible types are many2one, many2many, selection. :param category_domain: domain generated by categories. Default is []. :param comodel_domain: domain of field values (if relational) (this parameter is used in _search_panel_range). Default is []. :param enable_counters: whether to count records by value. Default is False. :param expand: whether to return the full range of field values in comodel_domain or only the field image values. Default is False. :param filter_domain: domain generated by filters. Default is []. :param group_by: extra field to read on comodel, to group comodel records :param group_domain: dict, one domain for each activated group for the group_by (if any). Those domains are used to fech accurate counters for values in each group. Default is [] (many2one case) or None. :param limit: integer, maximal number of values to fetch. Default is None. :param search_domain: base domain of search. Default is []. :return: { 'values': a list of possible values, each being a dict with keys 'id' (value), 'name' (value label), '__count' (how many records with that value), 'group_id' (value of group), set if a group_by has been provided, 'group_name' (label of group), set if a group_by has been provided } or an object with an error message when limit is defined and reached. """ field = self._fields[field_name] supported_types = ['many2one', 'many2many', 'selection'] if field.type not in supported_types: raise UserError( _('Only types %(supported_types)s are supported for filter (found type %(field_type)s)', supported_types=supported_types, field_type=field.type)) model_domain = kwargs.get('search_domain', []) extra_domain = AND([ kwargs.get('category_domain', []), kwargs.get('filter_domain', []), ]) if field.type == 'selection': return { 'values': self._search_panel_selection_range(field_name, model_domain=model_domain, extra_domain=extra_domain, **kwargs) } Comodel = self.env.get( field.comodel_name).with_context(hierarchical_naming=False) field_names = ['display_name'] group_by = kwargs.get('group_by') limit = kwargs.get('limit') if group_by: group_by_field = Comodel._fields[group_by] field_names.append(group_by) if group_by_field.type == 'many2one': def group_id_name(value): return value or (False, _("Not Set")) elif group_by_field.type == 'selection': desc = Comodel.fields_get([group_by])[group_by] group_by_selection = dict(desc['selection']) group_by_selection[False] = _("Not Set") def group_id_name(value): return value, group_by_selection[value] else: def group_id_name(value): return (value, value) if value else (False, _("Not Set")) comodel_domain = kwargs.get('comodel_domain', []) enable_counters = kwargs.get('enable_counters') expand = kwargs.get('expand') if field.type == 'many2many': comodel_records = Comodel.search_read(comodel_domain, field_names, limit=limit) if expand and limit and len(comodel_records) == limit: return {'error_msg': str(SEARCH_PANEL_ERROR_MESSAGE)} group_domain = kwargs.get('group_domain') field_range = [] for record in comodel_records: record_id = record['id'] values = { 'id': record_id, 'display_name': record['display_name'], } if group_by: group_id, group_name = group_id_name(record[group_by]) values['group_id'] = group_id values['group_name'] = group_name if enable_counters or not expand: search_domain = AND([ model_domain, [(field_name, 'in', record_id)], ]) local_extra_domain = extra_domain if group_by and group_domain: local_extra_domain = AND([ local_extra_domain, group_domain.get(json.dumps(group_id), []), ]) search_count_domain = AND( [search_domain, local_extra_domain]) if enable_counters: count = self.search_count(search_count_domain) if not expand: if enable_counters and is_true_domain( local_extra_domain): inImage = count else: inImage = self.search(search_domain, limit=1) if expand or inImage: if enable_counters: values['__count'] = count field_range.append(values) if not expand and limit and len(field_range) == limit: return {'error_msg': str(SEARCH_PANEL_ERROR_MESSAGE)} return { 'values': field_range, } if field.type == 'many2one': if enable_counters or not expand: extra_domain = AND([ extra_domain, kwargs.get('group_domain', []), ]) domain_image = self._search_panel_field_image( field_name, model_domain=model_domain, extra_domain=extra_domain, only_counters=expand, set_limit=limit and not (expand or group_by or comodel_domain), **kwargs) if not (expand or group_by or comodel_domain): values = list(domain_image.values()) if limit and len(values) == limit: return {'error_msg': str(SEARCH_PANEL_ERROR_MESSAGE)} return { 'values': values, } if not expand: image_element_ids = list(domain_image.keys()) comodel_domain = AND([ comodel_domain, [('id', 'in', image_element_ids)], ]) comodel_records = Comodel.search_read(comodel_domain, field_names, limit=limit) if limit and len(comodel_records) == limit: return {'error_msg': str(SEARCH_PANEL_ERROR_MESSAGE)} field_range = [] for record in comodel_records: record_id = record['id'] values = { 'id': record_id, 'display_name': record['display_name'], } if group_by: group_id, group_name = group_id_name(record[group_by]) values['group_id'] = group_id values['group_name'] = group_name if enable_counters: image_element = domain_image.get(record_id) values['__count'] = image_element[ '__count'] if image_element else 0 field_range.append(values) return { 'values': field_range, }
def _get_invoice_matching_query(self, st_lines_with_partner, excluded_ids): ''' Returns the query applying the current invoice_matching reconciliation model to the provided statement lines. :param st_lines_with_partner: A list of tuples (statement_line, partner), associating each statement line to treate with the corresponding partner, given by the partner map :param excluded_ids: Account.move.lines to exclude. :return: (query, params) ''' self.ensure_one() if self.rule_type != 'invoice_matching': raise UserError(_('Programmation Error: Can\'t call _get_invoice_matching_query() for different rules than \'invoice_matching\'')) unaccent = get_unaccent_wrapper(self._cr) # N.B: 'communication_flag' is there to distinguish invoice matching through the number/reference # (higher priority) from invoice matching using the partner (lower priority). query = r''' SELECT st_line.id AS id, aml.id AS aml_id, aml.currency_id AS aml_currency_id, aml.date_maturity AS aml_date_maturity, aml.amount_residual AS aml_amount_residual, aml.amount_residual_currency AS aml_amount_residual_currency, ''' + self._get_select_communication_flag() + r''' AS communication_flag, ''' + self._get_select_payment_reference_flag() + r''' AS payment_reference_flag FROM account_bank_statement_line st_line JOIN account_move st_line_move ON st_line_move.id = st_line.move_id JOIN res_company company ON company.id = st_line_move.company_id , account_move_line aml LEFT JOIN account_move move ON move.id = aml.move_id AND move.state = 'posted' LEFT JOIN account_account account ON account.id = aml.account_id LEFT JOIN res_partner aml_partner ON aml.partner_id = aml_partner.id LEFT JOIN account_payment payment ON payment.move_id = move.id WHERE aml.company_id = st_line_move.company_id AND move.state = 'posted' AND account.reconcile IS TRUE AND aml.reconciled IS FALSE ''' # Add conditions to handle each of the statement lines we want to match st_lines_queries = [] for st_line, partner in st_lines_with_partner: # In case we don't have any partner for this line, we try assigning one with the rule mapping if st_line.amount > 0: st_line_subquery = r"aml.balance > 0" else: st_line_subquery = r"aml.balance < 0" if self.match_same_currency: st_line_subquery += r" AND COALESCE(aml.currency_id, company.currency_id) = %s" % (st_line.foreign_currency_id.id or st_line.move_id.currency_id.id) if partner: st_line_subquery += r" AND aml.partner_id = %s" % partner.id else: st_line_subquery += r""" AND ( substring(REGEXP_REPLACE(st_line.payment_ref, '[^0-9\s]', '', 'g'), '\S(?:.*\S)*') != '' AND ( (""" + self._get_select_communication_flag() + """) OR (""" + self._get_select_payment_reference_flag() + """) ) ) OR ( /* We also match statement lines without partners with amls whose partner's name's parts (splitting on space) are all present within the payment_ref, in any order, with any characters between them. */ aml_partner.name IS NOT NULL AND """ + unaccent("st_line.payment_ref") + r""" ~* ('^' || ( SELECT string_agg(concat('(?=.*\m', chunk[1], '\M)'), '') FROM regexp_matches(""" + unaccent("aml_partner.name") + r""", '\w{3,}', 'g') AS chunk )) ) """ st_lines_queries.append(r"st_line.id = %s AND (%s)" % (st_line.id, st_line_subquery)) query += r" AND (%s) " % " OR ".join(st_lines_queries) params = {} # If this reconciliation model defines a past_months_limit, we add a condition # to the query to only search on move lines that are younger than this limit. if self.past_months_limit: date_limit = fields.Date.context_today(self) - relativedelta(months=self.past_months_limit) query += "AND aml.date >= %(aml_date_limit)s" params['aml_date_limit'] = date_limit # Filter out excluded account.move.line. if excluded_ids: query += 'AND aml.id NOT IN %(excluded_aml_ids)s' params['excluded_aml_ids'] = tuple(excluded_ids) if self.matching_order == 'new_first': query += ' ORDER BY aml_date_maturity DESC, aml_id DESC' else: query += ' ORDER BY aml_date_maturity ASC, aml_id ASC' return query, params
def _import_facturx(self, tree, invoice): """ Decodes a factur-x invoice into an invoice. :param tree: the factur-x tree to decode. :param invoice: the invoice to update or an empty recordset. :returns: the invoice where the factur-x data was imported. """ amount_total_import = None default_move_type = False if invoice._context.get('default_journal_id'): journal = self.env['account.journal'].browse( self.env.context['default_journal_id']) default_move_type = 'out_invoice' if journal.type == 'sale' else 'in_invoice' elif invoice._context.get('default_move_type'): default_move_type = self._context['default_move_type'] elif invoice.move_type in self.env['account.move'].get_invoice_types( include_receipts=True): # in case an attachment is saved on a draft invoice previously created, we might # have lost the default value in context but the type was already set default_move_type = invoice.move_type if not default_move_type: raise UserError( _("No information about the journal or the type of invoice is passed" )) if default_move_type == 'entry': return # Total amount. elements = tree.xpath('//ram:GrandTotalAmount', namespaces=tree.nsmap) total_amount = elements and float(elements[0].text) or 0.0 # Refund type. # There is two modes to handle refund in Factur-X: # a) type_code == 380 for invoice, type_code == 381 for refund, all positive amounts. # b) type_code == 380, negative amounts in case of refund. # To handle both, we consider the 'a' mode and switch to 'b' if a negative amount is encountered. elements = tree.xpath('//rsm:ExchangedDocument/ram:TypeCode', namespaces=tree.nsmap) type_code = elements[0].text default_move_type.replace('_refund', '_invoice') if type_code == '381': default_move_type = 'out_refund' if default_move_type == 'out_invoice' else 'in_refund' refund_sign = -1 else: # Handle 'b' refund mode. if total_amount < 0: default_move_type = 'out_refund' if default_move_type == 'out_invoice' else 'in_refund' refund_sign = -1 if 'refund' in default_move_type else 1 # Write the type as the journal entry is already created. invoice.move_type = default_move_type # self could be a single record (editing) or be empty (new). with Form( invoice.with_context( default_move_type=default_move_type, account_predictive_bills_disable_prediction=True) ) as invoice_form: # Partner (first step to avoid warning 'Warning! You must first select a partner.'). partner_type = invoice_form.journal_id.type == 'purchase' and 'SellerTradeParty' or 'BuyerTradeParty' elements = tree.xpath('//ram:' + partner_type + '/ram:SpecifiedTaxRegistration/ram:ID', namespaces=tree.nsmap) partner = elements and self.env['res.partner'].search( [('vat', '=', elements[0].text)], limit=1) if not partner: elements = tree.xpath('//ram:' + partner_type + '/ram:Name', namespaces=tree.nsmap) partner_name = elements and elements[0].text partner = elements and self.env['res.partner'].search( [('name', 'ilike', partner_name)], limit=1) if not partner: elements = tree.xpath('//ram:' + partner_type + '//ram:URIID[@schemeID=\'SMTP\']', namespaces=tree.nsmap) partner = elements and self.env['res.partner'].search( [('email', '=', elements[0].text)], limit=1) if partner: invoice_form.partner_id = partner # Reference. elements = tree.xpath('//rsm:ExchangedDocument/ram:ID', namespaces=tree.nsmap) if elements: invoice_form.ref = elements[0].text # Name. elements = tree.xpath( '//ram:BuyerOrderReferencedDocument/ram:IssuerAssignedID', namespaces=tree.nsmap) if elements: invoice_form.payment_reference = elements[0].text # Comment. elements = tree.xpath('//ram:IncludedNote/ram:Content', namespaces=tree.nsmap) if elements: invoice_form.narration = elements[0].text # Total amount. elements = tree.xpath('//ram:GrandTotalAmount', namespaces=tree.nsmap) if elements: # Currency. if elements[0].attrib.get('currencyID'): currency_str = elements[0].attrib['currencyID'] currency = self.env.ref('base.%s' % currency_str.upper(), raise_if_not_found=False) if currency != self.env.company.currency_id and currency.active: invoice_form.currency_id = currency # Store xml total amount. amount_total_import = total_amount * refund_sign # Date. elements = tree.xpath( '//rsm:ExchangedDocument/ram:IssueDateTime/udt:DateTimeString', namespaces=tree.nsmap) if elements: date_str = elements[0].text date_obj = datetime.strptime(date_str, DEFAULT_FACTURX_DATE_FORMAT) invoice_form.invoice_date = date_obj.strftime( DEFAULT_SERVER_DATE_FORMAT) # Due date. elements = tree.xpath( '//ram:SpecifiedTradePaymentTerms/ram:DueDateDateTime/udt:DateTimeString', namespaces=tree.nsmap) if elements: date_str = elements[0].text date_obj = datetime.strptime(date_str, DEFAULT_FACTURX_DATE_FORMAT) invoice_form.invoice_date_due = date_obj.strftime( DEFAULT_SERVER_DATE_FORMAT) # Invoice lines. elements = tree.xpath('//ram:IncludedSupplyChainTradeLineItem', namespaces=tree.nsmap) if elements: for element in elements: with invoice_form.invoice_line_ids.new( ) as invoice_line_form: # Sequence. line_elements = element.xpath( './/ram:AssociatedDocumentLineDocument/ram:LineID', namespaces=tree.nsmap) if line_elements: invoice_line_form.sequence = int( line_elements[0].text) # Product. line_elements = element.xpath( './/ram:SpecifiedTradeProduct/ram:Name', namespaces=tree.nsmap) if line_elements: invoice_line_form.name = line_elements[0].text line_elements = element.xpath( './/ram:SpecifiedTradeProduct/ram:SellerAssignedID', namespaces=tree.nsmap) if line_elements and line_elements[0].text: product = self.env['product.product'].search([ ('default_code', '=', line_elements[0].text) ]) if product: invoice_line_form.product_id = product if not invoice_line_form.product_id: line_elements = element.xpath( './/ram:SpecifiedTradeProduct/ram:GlobalID', namespaces=tree.nsmap) if line_elements and line_elements[0].text: product = self.env['product.product'].search([ ('barcode', '=', line_elements[0].text) ]) if product: invoice_line_form.product_id = product # Quantity. line_elements = element.xpath( './/ram:SpecifiedLineTradeDelivery/ram:BilledQuantity', namespaces=tree.nsmap) if line_elements: invoice_line_form.quantity = float( line_elements[0].text) # Price Unit. line_elements = element.xpath( './/ram:GrossPriceProductTradePrice/ram:ChargeAmount', namespaces=tree.nsmap) if line_elements: quantity_elements = element.xpath( './/ram:GrossPriceProductTradePrice/ram:BasisQuantity', namespaces=tree.nsmap) if quantity_elements: invoice_line_form.price_unit = float( line_elements[0].text) / float( quantity_elements[0].text) else: invoice_line_form.price_unit = float( line_elements[0].text) else: line_elements = element.xpath( './/ram:NetPriceProductTradePrice/ram:ChargeAmount', namespaces=tree.nsmap) if line_elements: quantity_elements = element.xpath( './/ram:NetPriceProductTradePrice/ram:BasisQuantity', namespaces=tree.nsmap) if quantity_elements: invoice_line_form.price_unit = float( line_elements[0].text) / float( quantity_elements[0].text) else: invoice_line_form.price_unit = float( line_elements[0].text) # Discount. line_elements = element.xpath( './/ram:AppliedTradeAllowanceCharge/ram:CalculationPercent', namespaces=tree.nsmap) if line_elements: invoice_line_form.discount = float( line_elements[0].text) # Taxes line_elements = element.xpath( './/ram:SpecifiedLineTradeSettlement/ram:ApplicableTradeTax/ram:RateApplicablePercent', namespaces=tree.nsmap) invoice_line_form.tax_ids.clear() for tax_element in line_elements: percentage = float(tax_element.text) tax = self.env['account.tax'].search([ ('company_id', '=', invoice_form.company_id.id), ('amount_type', '=', 'percent'), ('type_tax_use', '=', invoice_form.journal_id.type), ('amount', '=', percentage), ], limit=1) if tax: invoice_line_form.tax_ids.add(tax) elif amount_total_import: # No lines in BASICWL. with invoice_form.invoice_line_ids.new() as invoice_line_form: invoice_line_form.name = invoice_form.comment or '/' invoice_line_form.quantity = 1 invoice_line_form.price_unit = amount_total_import return invoice_form.save()
def fields_view_get(self, view_id=None, view_type='form', toolbar=False, submenu=False): state = self.env['ir.model.fields.anonymization']._get_global_state() step = self.env.context.get('step', 'new_window') res = super(IrModelFieldsAnonymizeWizard, self).fields_view_get(view_id=view_id, view_type=view_type, toolbar=toolbar, submenu=submenu) eview = etree.fromstring(res['arch']) placeholder = eview.xpath("group[@name='placeholder1']") if len(placeholder): placeholder = placeholder[0] if step == 'new_window' and state == 'clear': # clicked in the menu and the fields are not anonymized: warn the admin that backuping the db is very important placeholder.addnext( etree.Element('field', { 'name': 'msg', 'colspan': '4', 'nolabel': '1' })) placeholder.addnext(etree.Element('newline')) placeholder.addnext( etree.Element('label', {'string': 'Warning'})) eview.remove(placeholder) elif step == 'new_window' and state == 'anonymized': # clicked in the menu and the fields are already anonymized placeholder.addnext(etree.Element('newline')) placeholder.addnext( etree.Element('field', { 'name': 'file_import', 'required': "1" })) placeholder.addnext( etree.Element('label', {'string': 'Anonymization file'})) eview.remove(placeholder) elif step == 'just_anonymized': # we just ran the anonymization process, we need the file export field placeholder.addnext(etree.Element('newline')) placeholder.addnext( etree.Element('field', {'name': 'file_export'})) # we need to remove the button: buttons = eview.xpath("button") for button in buttons: eview.remove(button) # and add a message: placeholder.addnext( etree.Element('field', { 'name': 'msg', 'colspan': '4', 'nolabel': '1' })) placeholder.addnext(etree.Element('newline')) placeholder.addnext( etree.Element('label', {'string': 'Result'})) # remove the placeholer: eview.remove(placeholder) elif step == 'just_desanonymized': # we just reversed the anonymization process, we don't need any field # we need to remove the button buttons = eview.xpath("button") for button in buttons: eview.remove(button) # and add a message placeholder.addnext( etree.Element('field', { 'name': 'msg', 'colspan': '4', 'nolabel': '1' })) placeholder.addnext(etree.Element('newline')) placeholder.addnext( etree.Element('label', {'string': 'Result'})) # remove the placeholer: eview.remove(placeholder) else: raise UserError( _("The database anonymization is currently in an unstable state. Some fields are anonymized," " while some fields are not anonymized. You should try to solve this problem before trying to do anything else." )) res['arch'] = etree.tostring(eview, encoding='unicode') return res
def unlink(self): if 'done' in self.mapped('state'): raise UserError(_('You cannot delete a scrap which is done.')) return super(StockScrap, self).unlink()
def _check_aba_routing(self, aba_routing): if aba_routing and not re.match(r'^\d{1,9}$', aba_routing): raise UserError(_('ABA/Routing should only contains numbers (maximum 9 digits).')) return aba_routing
def _get_lang_name(self, lang_code): lang = self.env['res.lang']._lang_get(lang_code) if not lang: raise UserError(_('No language with code "%s" exists', lang_code)) return lang.name
def action_delayed_line(self): raise UserError(_( 'This line is scheduled for: %s. \n However it is now planned to ' 'arrive late.') % Dt.to_string(Dt.context_timestamp( self, Dt.from_string(self.date_planned))))
def get_bban(self): if self.acc_type != 'iban': raise UserError(_("Cannot compute the BBAN because the account number is not an IBAN.")) return get_bban_from_iban(self.acc_number)
def _graph_y_query(self): raise UserError(_('Undefined graph model for Sales Channel: %s') % self.name)
def get_data(self, data): asset_dep_line_id_list = list() asset_dep_line_id_list = \ self.env['account.asset.depreciation.line'].search([ ('depreciation_date', '>=', data['start_date']), ('depreciation_date', '<=', data['end_date']), ('move_check', '=', True), ('move_id', '!=', False), ('move_id.state', '=', 'posted') ]) if not asset_dep_line_id_list: raise UserError(_("No Depreciation Lines in this fiscal year..")) data_dict = {} for line_id in asset_dep_line_id_list: product_id = line_id.asset_id.product_id.id depre_date = datetime.strptime(line_id.depreciation_date, "%Y-%m-%d") purchase_date = datetime.strptime(line_id.asset_id.date, "%Y-%m-%d") if product_id not in data_dict: if depre_date.year == purchase_date.year \ or line_id.sequence == 1: open_asset = 0.0 open_dep = 0.0 add_asset = line_id.begin_value else: open_asset = line_id.begin_value add_asset = 0.00 open_dep = line_id.depreciated_value - line_id.amount add_dep = line_id.amount total_dep = open_dep + add_dep total_asset = open_asset + add_asset data_dict[product_id] = { 'name': line_id.asset_id.name, 'product_name': line_id.asset_id.product_id.name, 'category': line_id.asset_id.category_id.name, 'open_asset': open_asset, 'add_asset': add_asset, 'total_asset': total_asset, 'open_dep': open_dep, 'add_dep': add_dep, 'total_dep': total_dep, 'open_net': open_asset - open_dep, 'add_net': add_asset - add_dep, 'total_net': total_asset - total_dep } else: add_dep = line_id.amount if depre_date.year == purchase_date.year \ or line_id.sequence == 1: open_asset = 0.0 add_asset = line_id.begin_value open_dep = 0.0 else: open_asset = line_id.begin_value add_asset = 0.00 open_dep = line_id.depreciated_value - line_id.amount if line_id.asset_id.sale_date \ and line_id.asset_id.sale_date >= data['start_date'] \ and line_id.asset_id.sale_date <= data['end_date']: add_asset -= line_id.begin_value add_dep -= line_id.depreciated_value total_dep = open_dep + add_dep total_asset = open_asset + add_asset data_dict[product_id]['open_asset'] += open_asset data_dict[product_id]['add_asset'] += add_asset data_dict[product_id]['total_asset'] += total_asset data_dict[product_id]['open_dep'] += open_dep data_dict[product_id]['add_dep'] += add_dep data_dict[product_id]['total_dep'] += total_dep data_dict[product_id]['open_net'] += open_asset - open_dep data_dict[product_id]['add_net'] += add_asset - add_dep data_dict[product_id]['total_net'] += total_asset - total_dep final_data = [] for key in data_dict: final_data.append(data_dict[key]) return final_data
def install_from_urls(self, urls): if not self.env.user.has_group('base.group_system'): raise AccessDenied() # One-click install is opt-in - cfr Issue #15225 ad_dir = tools.config.addons_data_dir if not os.access(ad_dir, os.W_OK): msg = (_( "Automatic install of downloaded Apps is currently disabled." ) + "\n\n" + _( "To enable it, make sure this directory exists and is writable on the server:" ) + "\n%s" % ad_dir) _logger.warning(msg) raise UserError(msg) apps_server = urls.url_parse(self.get_apps_server()) OPENERP = flectra.release.product_name.lower() tmp = tempfile.mkdtemp() _logger.debug('Install from url: %r', urls) try: # 1. Download & unzip missing modules for module_name, url in urls.items(): if not url: continue # nothing to download, local version is already the last one up = urls.url_parse(url) if up.scheme != apps_server.scheme or up.netloc != apps_server.netloc: raise AccessDenied() try: _logger.info('Downloading module `%s` from Flectra Apps', module_name) response = requests.get(url) response.raise_for_status() content = response.content except Exception: _logger.exception('Failed to fetch module %s', module_name) raise UserError( _('The `%s` module appears to be unavailable at the moment, please try again later.' ) % module_name) else: zipfile.ZipFile(io.BytesIO(content)).extractall(tmp) assert os.path.isdir(os.path.join(tmp, module_name)) # 2a. Copy/Replace module source in addons path for module_name, url in urls.items(): if module_name == OPENERP or not url: continue # OPENERP is special case, handled below, and no URL means local module module_path = modules.get_module_path(module_name, downloaded=True, display_warning=False) bck = backup(module_path, False) _logger.info('Copy downloaded module `%s` to `%s`', module_name, module_path) shutil.move(os.path.join(tmp, module_name), module_path) if bck: shutil.rmtree(bck) # 2b. Copy/Replace server+base module source if downloaded if urls.get(OPENERP): # special case. it contains the server and the base module. # extract path is not the same base_path = os.path.dirname(modules.get_module_path('base')) # copy all modules in the SERVER/flectra/addons directory to the new "flectra" module (except base itself) for d in os.listdir(base_path): if d != 'base' and os.path.isdir(os.path.join( base_path, d)): destdir = os.path.join( tmp, OPENERP, 'addons', d) # XXX 'flectra' subdirectory ? shutil.copytree(os.path.join(base_path, d), destdir) # then replace the server by the new "base" module server_dir = tools.config['root_path'] # XXX or dirname() bck = backup(server_dir) _logger.info('Copy downloaded module `flectra` to `%s`', server_dir) shutil.move(os.path.join(tmp, OPENERP), server_dir) #if bck: # shutil.rmtree(bck) self.update_list() with_urls = [ module_name for module_name, url in urls.items() if url ] downloaded = self.search([('name', 'in', with_urls)]) installed = self.search([('id', 'in', downloaded.ids), ('state', '=', 'installed')]) to_install = self.search([('name', 'in', list(urls)), ('state', '=', 'uninstalled')]) post_install_action = to_install.button_immediate_install() if installed or to_install: # in this case, force server restart to reload python code... self._cr.commit() flectra.service.server.restart() return { 'type': 'ir.actions.client', 'tag': 'home', 'params': { 'wait': True }, } return post_install_action finally: shutil.rmtree(tmp)
def anonymize_database(self): """Sets the 'anonymized' state to defined fields""" self.ensure_one() # create a new history record: history = self.env['ir.model.fields.anonymization.history'].create({ 'date': fields.Datetime.now(), 'state': 'started', 'direction': 'clear -> anonymized' }) # check that all the defined fields are in the 'clear' state state = self.env['ir.model.fields.anonymization']._get_global_state() error_type = _('Error !') if state == 'anonymized': raise UserError('%s: %s' % ( error_type, _("The database is currently anonymized, you cannot anonymize it again." ))) elif state == 'unstable': raise UserError('%s: %s' % ( error_type, _("The database anonymization is currently in an unstable state. Some fields are anonymized," " while some fields are not anonymized. You should try to solve this problem before trying to do anything." ))) # do the anonymization: dirpath = os.environ.get('HOME') or os.getcwd() rel_filepath = 'field_anonymization_%s_%s.json' % (self.env.cr.dbname, history.id) abs_filepath = os.path.abspath(os.path.join(dirpath, rel_filepath)) ano_fields = self.env['ir.model.fields.anonymization'].search([ ('state', '!=', 'not_existing') ]) if not ano_fields: raise UserError( '%s: %s' % (error_type, _("No fields are going to be anonymized."))) data = [] for field in ano_fields: model_name = field.model_id.model field_name = field.field_id.name field_type = field.field_id.ttype table_name = self.env[model_name]._table # get the current value self.env.cr.execute('select id, "%s" from "%s"' % (field_name, table_name)) for record in self.env.cr.dictfetchall(): data.append({ "model_id": model_name, "field_id": field_name, "id": record['id'], "value": record[field_name] }) # anonymize the value: anonymized_value = None sid = str(record['id']) if field_type == 'char': anonymized_value = 'xxx' + sid elif field_type == 'selection': anonymized_value = 'xxx' + sid elif field_type == 'text': anonymized_value = 'xxx' + sid elif field_type == 'html': anonymized_value = 'xxx' + sid elif field_type == 'boolean': anonymized_value = random.choice([True, False]) elif field_type == 'date': anonymized_value = '2011-11-11' elif field_type == 'datetime': anonymized_value = '2011-11-11 11:11:11' elif field_type in ('float', 'monetary'): anonymized_value = 0.0 elif field_type == 'integer': anonymized_value = 0 elif field_type in [ 'binary', 'many2many', 'many2one', 'one2many', 'reference' ]: # cannot anonymize these kind of fields raise UserError('%s: %s' % ( error_type, _("Cannot anonymize fields of these types: binary, many2many, many2one, one2many, reference." ))) if anonymized_value is None: raise UserError( '%s: %s' % (error_type, _("Anonymized value can not be empty."))) sql = 'update "%(table)s" set "%(field)s" = %%(anonymized_value)s where id = %%(id)s' % { 'table': table_name, 'field': field_name, } self.env.cr.execute(sql, { 'anonymized_value': anonymized_value, 'id': record['id'] }) # save json file: with open(abs_filepath, 'w') as fn: json.dump(data, fn) # update the anonymization fields: ano_fields.write({'state': 'anonymized'}) # add a result message in the wizard: msgs = [ "Anonymization successful.", "", "Donot forget to save the resulting file to a safe place because you will not be able to revert the anonymization without this file.", "", "This file is also stored in the %s directory. The absolute file path is: %s." ] msg = '\n'.join(msgs) % (dirpath, abs_filepath) with open(abs_filepath, 'rb') as fn: self.write({ 'msg': msg, 'file_export': base64.encodestring(fn.read()), }) # update the history record: history.write({ 'field_ids': [[6, 0, ano_fields.ids]], 'msg': msg, 'filepath': abs_filepath, 'state': 'done', }) return { 'res_id': self.id, 'view_id': self.env.ref( 'anonymization.view_ir_model_fields_anonymize_wizard_form'). ids, 'view_type': 'form', "view_mode": 'form', 'res_model': 'ir.model.fields.anonymize.wizard', 'type': 'ir.actions.act_window', 'context': { 'step': 'just_anonymized' }, 'target': 'new' }
def action_payslip_cancel(self): if self.filtered(lambda slip: slip.state == 'done'): raise UserError(_("Cannot cancel a payslip that is done.")) return self.write({'state': 'cancel'})
def reverse_anonymize_database(self): """Set the 'clear' state to defined fields""" self.ensure_one() IrModelFieldsAnonymization = self.env['ir.model.fields.anonymization'] # check that all the defined fields are in the 'anonymized' state state = IrModelFieldsAnonymization._get_global_state() if state == 'clear': raise UserError( _("The database is not currently anonymized, you cannot reverse the anonymization." )) elif state == 'unstable': raise UserError( _("The database anonymization is currently in an unstable state. Some fields are anonymized," " while some fields are not anonymized. You should try to solve this problem before trying to do anything." )) if not self.file_import: raise UserError('%s: %s' % ( _('Error !'), _("It is not possible to reverse the anonymization process without supplying the anonymization export file." ))) # reverse the anonymization: # load the json/pickle file content into a data structure: content = base64.decodestring(self.file_import) try: data = json.loads(content.decode('utf8')) except Exception: # backward-compatible mode data = pickle.loads(content, encoding='utf8') fixes = self.env[ 'ir.model.fields.anonymization.migration.fix'].search_read([ ('target_version', '=', '.'.join( str(v) for v in version_info[:2])) ], ['model_name', 'field_name', 'query', 'query_type', 'sequence']) fixes = group(fixes, ('model_name', 'field_name')) for line in data: queries = [] table_name = self.env[line['model_id']]._table if line[ 'model_id'] in self.env else None # check if custom sql exists: key = (line['model_id'], line['field_id']) custom_updates = fixes.get(key) if custom_updates: custom_updates.sort(key=itemgetter('sequence')) queries = [(record['query'], record['query_type']) for record in custom_updates if record['query_type']] elif table_name: queries = [( 'update "%(table)s" set "%(field)s" = %%(value)s where id = %%(id)s' % { 'table': table_name, 'field': line['field_id'], }, 'sql')] for query in queries: if query[1] == 'sql': self.env.cr.execute(query[0], { 'value': line['value'], 'id': line['id'] }) elif query[1] == 'python': safe_eval(query[0] % line) else: raise Exception( "Unknown query type '%s'. Valid types are: sql, python." % (query['query_type'], )) # update the anonymization fields: ano_fields = IrModelFieldsAnonymization.search([('state', '!=', 'not_existing')]) ano_fields.write({'state': 'clear'}) # add a result message in the wizard: self.msg = '\n'.join(["Successfully reversed the anonymization.", ""]) # create a new history record: history = self.env['ir.model.fields.anonymization.history'].create({ 'date': fields.Datetime.now(), 'field_ids': [[6, 0, ano_fields.ids]], 'msg': self.msg, 'filepath': False, 'direction': 'anonymized -> clear', 'state': 'done' }) return { 'res_id': self.id, 'view_id': self.env.ref( 'anonymization.view_ir_model_fields_anonymize_wizard_form'). ids, 'view_type': 'form', "view_mode": 'form', 'res_model': 'ir.model.fields.anonymize.wizard', 'type': 'ir.actions.act_window', 'context': { 'step': 'just_desanonymized' }, 'target': 'new' }
def _constrains_event(self): if any(question.event_type_id and question.event_id for question in self): raise UserError( _('Question should belong to either event category or event but not both' ))
def _onchange_activity_date_deadline_range(self): if self.activity_date_deadline_range < 0: raise UserError(_("The 'Due Date In' value can't be negative."))
def _cart_update(self, product_id=None, line_id=None, add_qty=0, set_qty=0, attributes=None, **kwargs): """ Add or set product quantity, add_qty can be negative """ self.ensure_one() SaleOrderLineSudo = self.env['sale.order.line'].sudo() quantity = 0 order_line = False if self.state != 'draft': request.session['sale_order_id'] = None raise UserError( _('It is forbidden to modify a sales order which is not in draft status' )) if line_id is not False: order_lines = self._cart_find_product_line(product_id, line_id, **kwargs) order_line = order_lines and order_lines[0] # Create line if no line with product_id can be located if not order_line: values = self._website_product_id_change(self.id, product_id, qty=1) values['name'] = self._get_line_description(self.id, product_id, attributes=attributes) order_line = SaleOrderLineSudo.create(values) try: order_line._compute_tax_id() except ValidationError as e: # The validation may occur in backend (eg: taxcloud) but should fail silently in frontend _logger.debug("ValidationError occurs during tax compute. %s" % (e)) if add_qty: add_qty -= 1 # compute new quantity if set_qty: quantity = set_qty elif add_qty is not None: quantity = order_line.product_uom_qty + (add_qty or 0) # Remove zero of negative lines if quantity <= 0: order_line.unlink() else: # update line values = self._website_product_id_change(self.id, product_id, qty=quantity) if self.pricelist_id.discount_policy == 'with_discount' and not self.env.context.get( 'fixed_price'): order = self.sudo().browse(self.id) product_context = dict(self.env.context) product_context.setdefault('lang', order.partner_id.lang) product_context.update({ 'partner': order.partner_id.id, 'quantity': quantity, 'date': order.date_order, 'pricelist': order.pricelist_id.id, }) product = self.env['product.product'].with_context( product_context).browse(product_id) values['price_unit'] = self.env[ 'account.tax']._fix_tax_included_price_company( order_line._get_display_price(product), order_line.product_id.taxes_id, order_line.tax_id, self.company_id) order_line.write(values) return {'line_id': order_line.id, 'quantity': quantity}
def filter_products(self): if self: rec_dic = self.read()[0] domain = [] if rec_dic: for k, v in rec_dic.items(): if "x_" in k and "x_opt_" not in k: if v: pro_field_name = k.split("_", 1)[1] smps_field_name = "x_opt_" + pro_field_name if rec_dic.get(smps_field_name, False): opt = rec_dic.get(smps_field_name, False) domain.append((pro_field_name, opt, v)) else: #if attribute fields found if "x_attr_" in k: domain.append( ('attribute_value_ids', 'in', v[0])) #if boolean fields found else: #check whether it's a selection or boolean fields or not smps_model_id = self.env['ir.model'].sudo( ).search( [('model', '=', 'smps.adv.wizard')], limit=1) if smps_model_id: search_field = self.env[ 'ir.model.fields'].sudo().search( [ ('name', '=', '' + k), ('model_id', '=', smps_model_id.id), ], limit=1) if search_field: if search_field.ttype in [ 'selection', 'boolean' ]: domain.append( (pro_field_name, '=', v)) else: domain.append((pro_field_name, '=', v[0])) else: raise UserError( _('Field not Found - ' + k)) else: raise UserError( _('Model not Found - smps.adv.wizard' )) if "product_attr_ids" in k and v: for attr_id in v: domain.append( ('attribute_value_ids', 'in', attr_id)) if domain: domain.append(('sale_ok', '=', True)) search_products = self.env['product.product'].search( domain) if search_products: result = [] smps_adv_wizard_product_line_obj = self.env[ 'smps.adv.wizard.product.line'] for product in search_products: line_vals = { 'product_id': product.id, } created_line = smps_adv_wizard_product_line_obj.create( line_vals) if created_line: result.append(created_line.id) self.product_ids = None self.product_ids = [(6, 0, result)] else: self.product_ids = None return { 'name': 'Select Products Advance', 'view_type': 'form', 'view_mode': 'form', 'res_model': 'smps.adv.wizard', 'view_id': False, 'type': 'ir.actions.act_window', 'res_id': self.id, 'target': 'new', }
def _create_bank_statements(self, stmts_vals): """ Create new bank statements from imported values, filtering out already imported transactions, and returns data used by the reconciliation widget """ BankStatement = self.env['account.bank.statement'] BankStatementLine = self.env['account.bank.statement.line'] bank_statements = self.env['account.bank.statement'] # Filter out already imported transactions and create statements statement_line_ids = [] ignored_statement_lines_import_ids = [] for st_vals in stmts_vals: filtered_st_lines = [] for line_vals in st_vals['transactions']: if 'unique_import_id' not in line_vals \ or not line_vals['unique_import_id'] \ or not bool( BankStatementLine.sudo().search([('unique_import_id', '=', line_vals['unique_import_id'])], limit=1)): filtered_st_lines.append(line_vals) else: ignored_statement_lines_import_ids.append( line_vals['unique_import_id']) if 'balance_start' in st_vals: st_vals['balance_start'] += float(line_vals['amount']) if len(filtered_st_lines) > 0: # Remove values that won't be used to create records st_vals.pop('transactions', None) # Create the statement st_vals['line_ids'] = [[0, False, line] for line in filtered_st_lines] bank_statement = BankStatement.create(st_vals) statement_line_ids.extend(bank_statement.line_ids.ids) bank_statements |= bank_statement if len(statement_line_ids) == 0: raise UserError(_('You already have imported that file.')) # generated bank statements need to be posted to show the not reconciled button on journal dashboard bank_statements.button_post() # Prepare import feedback notifications = [] num_ignored = len(ignored_statement_lines_import_ids) if num_ignored > 0: notifications += [{ 'type': 'warning', 'message': _("%d transactions had already been imported and were ignored." ) % num_ignored if num_ignored > 1 else _("1 transaction had already been imported and was ignored."), 'details': { 'name': _('Already imported items'), 'model': 'account.bank.statement.line', 'ids': BankStatementLine.search([ ('unique_import_id', 'in', ignored_statement_lines_import_ids) ]).ids } }] return statement_line_ids, notifications
def _create_invoice(self, order, so_line, amount): inv_obj = self.env['account.invoice'] ir_property_obj = self.env['ir.property'] account_id = False if self.product_id.id: account_id = order.fiscal_position_id.map_account(self.product_id.property_account_income_id or self.product_id.categ_id.property_account_income_categ_id).id if not account_id: inc_acc = ir_property_obj.get('property_account_income_categ_id', 'product.category') account_id = order.fiscal_position_id.map_account(inc_acc).id if inc_acc else False if not account_id: raise UserError( _('There is no income account defined for this product: "%s". You may have to install a chart of account from Accounting app, settings menu.') % (self.product_id.name,)) if self.amount <= 0.00: raise UserError(_('The value of the down payment amount must be positive.')) context = {'lang': order.partner_id.lang} if self.advance_payment_method == 'percentage': amount = order.amount_untaxed * self.amount / 100 name = _("Down payment of %s%%") % (self.amount,) else: amount = self.amount name = _('Down Payment') del context taxes = self.product_id.taxes_id.filtered(lambda r: not order.company_id or r.company_id == order.company_id) if order.fiscal_position_id and taxes: tax_ids = order.fiscal_position_id.map_tax(taxes).ids else: tax_ids = taxes.ids invoice = inv_obj.create({ 'name': order.client_order_ref or order.name, 'origin': order.name, 'type': 'out_invoice', 'reference': False, 'account_id': order.partner_id.property_account_receivable_id.id, 'partner_id': order.partner_invoice_id.id, 'partner_shipping_id': order.partner_shipping_id.id, 'invoice_line_ids': [(0, 0, { 'name': name, 'origin': order.name, 'account_id': account_id, 'price_unit': amount, 'quantity': 1.0, 'discount': 0.0, 'uom_id': self.product_id.uom_id.id, 'product_id': self.product_id.id, 'sale_line_ids': [(6, 0, [so_line.id])], 'invoice_line_tax_ids': [(6, 0, tax_ids)], 'account_analytic_id': order.analytic_account_id.id or False, })], 'currency_id': order.pricelist_id.currency_id.id, 'payment_term_id': order.payment_term_id.id, 'fiscal_position_id': order.fiscal_position_id.id or order.partner_id.property_account_position_id.id, 'team_id': order.team_id.id, 'user_id': order.user_id.id, 'comment': order.note, }) invoice.compute_taxes() invoice.message_post_with_view('mail.message_origin_link', values={'self': invoice, 'origin': order}, subtype_id=self.env.ref('mail.mt_note').id) return invoice
def write(self, values): if 'company_id' in values: for location in self: if location.company_id.id != values['company_id']: raise UserError( _("Changing the company of this record is forbidden at this point, you should rather archive it and create a new one." )) if 'usage' in values and values['usage'] == 'view': if self.mapped('quant_ids'): raise UserError( _("This location's usage cannot be changed to view as it contains products." )) if 'usage' in values or 'scrap_location' in values: modified_locations = self.filtered( lambda l: any(l[f] != values[f] if f in values else False for f in {'usage', 'scrap_location'})) reserved_quantities = self.env['stock.move.line'].search_count([ ('location_id', 'in', modified_locations.ids), ('product_qty', '>', 0), ]) if reserved_quantities: raise UserError( _("You cannot change the location type or its use as a scrap" " location as there are products reserved in this location." " Please unreserve the products first.")) if 'active' in values: if values['active'] == False: for location in self: warehouses = self.env['stock.warehouse'].search([ ('active', '=', True), '|', ('lot_stock_id', '=', location.id), ('view_location_id', '=', location.id) ]) if warehouses: raise UserError( _("You cannot archive the location %s as it is" " used by your warehouse %s") % (location.display_name, warehouses[0].display_name)) if not self.env.context.get('do_not_check_quant'): children_location = self.env['stock.location'].with_context( active_test=False).search([('id', 'child_of', self.ids)]) internal_children_locations = children_location.filtered( lambda l: l.usage == 'internal') children_quants = self.env['stock.quant'].search([ '&', '|', ('quantity', '!=', 0), ('reserved_quantity', '!=', 0), ('location_id', 'in', internal_children_locations.ids) ]) if children_quants and values['active'] == False: raise UserError( _('You still have some product in locations %s') % (', '.join( children_quants.mapped('location_id.display_name')) )) else: super(Location, children_location - self).with_context(do_not_check_quant=True).write({ 'active': values['active'], }) return super(Location, self).write(values)
def reconcile(self): ''' Reconcile the current move lines all together. :return: A dictionary representing a summary of what has been done during the reconciliation: * partials: A recorset of all account.partial.reconcile created during the reconciliation. * full_reconcile: An account.full.reconcile record created when there is nothing left to reconcile in the involved lines. * tax_cash_basis_moves: An account.move recordset representing the tax cash basis journal entries. ''' results = {} if not self: return results # List unpaid invoices not_paid_invoices = self.move_id.filtered(lambda move: move.is_invoice( include_receipts=True) and move.payment_state not in ('paid', 'in_payment')) # ==== Check the lines can be reconciled together ==== company = None account = None for line in self: if line.reconciled: raise UserError( _("You are trying to reconcile some entries that are already reconciled." )) if not line.account_id.reconcile and line.account_id.internal_type != 'liquidity': raise UserError( _("Account %s does not allow reconciliation. First change the configuration of this account to allow it." ) % line.account_id.display_name) if company is None: company = line.company_id elif line.company_id != company: raise UserError( _("Entries doesn't belong to the same company: %s != %s") % (company.display_name, line.company_id.display_name)) if account is None: account = line.account_id elif line.account_id != account: raise UserError( _("Entries are not from the same account: %s != %s") % (account.display_name, line.account_id.display_name)) sorted_lines = self.sorted(key=lambda line: (line.date_maturity or line .date, line.currency_id)) # ==== Collect all involved lines through the existing reconciliation ==== involved_lines = sorted_lines involved_partials = self.env['account.partial.reconcile'] current_lines = involved_lines current_partials = involved_partials while current_lines: current_partials = ( current_lines.matched_debit_ids + current_lines.matched_credit_ids) - current_partials involved_partials += current_partials current_lines = (current_partials.debit_move_id + current_partials.credit_move_id) - current_lines involved_lines += current_lines # ==== Create partials ==== partials = self.env['account.partial.reconcile'].create( sorted_lines._prepare_reconciliation_partials()) # Track newly created partials. results['partials'] = partials involved_partials += partials # ==== Create entries for cash basis taxes ==== is_cash_basis_needed = account.user_type_id.type in ('receivable', 'payable') if is_cash_basis_needed and not self._context.get( 'move_reverse_cancel'): tax_cash_basis_moves = partials._create_tax_cash_basis_moves() results['tax_cash_basis_moves'] = tax_cash_basis_moves # ==== Check if a full reconcile is needed ==== if involved_lines[0].currency_id and all( line.currency_id == involved_lines[0].currency_id for line in involved_lines): is_full_needed = all( line.currency_id.is_zero(line.amount_residual_currency) for line in involved_lines) else: is_full_needed = all( line.company_currency_id.is_zero(line.amount_residual) for line in involved_lines) if is_full_needed: # ==== Create the exchange difference move ==== if self._context.get('no_exchange_difference'): exchange_move = None else: exchange_move = involved_lines._create_exchange_difference_move( ) if exchange_move: exchange_move_lines = exchange_move.line_ids.filtered( lambda line: line.account_id == account) # Track newly created lines. involved_lines += exchange_move_lines # Track newly created partials. exchange_diff_partials = exchange_move_lines.matched_debit_ids \ + exchange_move_lines.matched_credit_ids involved_partials += exchange_diff_partials results['partials'] += exchange_diff_partials exchange_move._post(soft=False) # ==== Create the full reconcile ==== results['full_reconcile'] = self.env[ 'account.full.reconcile'].create({ 'exchange_move_id': exchange_move and exchange_move.id, 'partial_reconcile_ids': [(6, 0, involved_partials.ids)], 'reconciled_line_ids': [(6, 0, involved_lines.ids)], }) # Trigger action for paid invoices not_paid_invoices \ .filtered(lambda move: move.payment_state in ('paid', 'in_payment')) \ .action_invoice_paid() return results
def send_mail(self, auto_commit=False): """ Process the wizard content and proceed with sending the related email(s), rendering any template patterns on the fly if needed """ SurveyUserInput = self.env['survey.user_input'] Partner = self.env['res.partner'] Mail = self.env['mail.mail'] def create_response_and_send_mail(wizard, token, partner_id, email): """ Create one mail by recipients and replace __URL__ by link with identification token """ #set url url = wizard.survey_id.public_url url = urls.url_parse(url).path[1:] # dirty hack to avoid incorrect urls if token: url = url + '/' + token # post the message values = { 'model': None, 'res_id': None, 'subject': wizard.subject, 'body': wizard.body.replace("__URL__", url), 'body_html': wizard.body.replace("__URL__", url), 'parent_id': None, 'attachment_ids': wizard.attachment_ids and [(6, 0, wizard.attachment_ids.ids)] or None, 'email_from': wizard.email_from or None, 'auto_delete': True, } if partner_id: values['recipient_ids'] = [(4, partner_id)] else: values['email_to'] = email Mail.create(values).send() def create_token(wizard, partner_id, email): if context.get("survey_resent_token"): survey_user_input = SurveyUserInput.search([('survey_id', '=', wizard.survey_id.id), ('state', 'in', ['new', 'skip']), '|', ('partner_id', '=', partner_id), ('email', '=', email)], limit=1) if survey_user_input: return survey_user_input.token if wizard.public != 'email_private': return None else: token = pycompat.text_type(uuid.uuid4()) # create response with token survey_user_input = SurveyUserInput.create({ 'survey_id': wizard.survey_id.id, 'deadline': wizard.date_deadline, 'date_create': fields.Datetime.now(), 'type': 'link', 'state': 'new', 'token': token, 'partner_id': partner_id, 'email': email}) return survey_user_input.token for wizard in self: # check if __URL__ is in the text if wizard.body.find("__URL__") < 0: raise UserError(_("The content of the text don't contain '__URL__'. \ __URL__ is automaticaly converted into the special url of the survey.")) context = self.env.context if not wizard.multi_email and not wizard.partner_ids and (context.get('default_partner_ids') or context.get('default_multi_email')): wizard.multi_email = context.get('default_multi_email') wizard.partner_ids = context.get('default_partner_ids') # quick check of email list emails_list = [] if wizard.multi_email: emails = set(emails_split.split(wizard.multi_email)) - set(wizard.partner_ids.mapped('email')) for email in emails: email = email.strip() if email_validator.match(email): emails_list.append(email) # remove public anonymous access partner_list = [] for partner in wizard.partner_ids: partner_list.append({'id': partner.id, 'email': partner.email}) if not len(emails_list) and not len(partner_list): if wizard.model == 'res.partner' and wizard.res_id: return False raise UserError(_("Please enter at least one valid recipient.")) for email in emails_list: partner = Partner.search([('email', '=', email)], limit=1) token = create_token(wizard, partner.id, email) create_response_and_send_mail(wizard, token, partner.id, email) for partner in partner_list: token = create_token(wizard, partner['id'], partner['email']) create_response_and_send_mail(wizard, token, partner['id'], partner['email']) return {'type': 'ir.actions.act_window_close'}
def search_panel_select_range(self, field_name, **kwargs): """ Return possible values of the field field_name (case select="one"), possibly with counters, and the parent field (if any and required) used to hierarchize them. :param field_name: the name of a field; of type many2one or selection. :param category_domain: domain generated by categories. Default is []. :param comodel_domain: domain of field values (if relational). Default is []. :param enable_counters: whether to count records by value. Default is False. :param expand: whether to return the full range of field values in comodel_domain or only the field image values (possibly filtered and/or completed with parents if hierarchize is set). Default is False. :param filter_domain: domain generated by filters. Default is []. :param hierarchize: determines if the categories must be displayed hierarchically (if possible). If set to true and _parent_name is set on the comodel field, the information necessary for the hierarchization will be returned. Default is True. :param limit: integer, maximal number of values to fetch. Default is None. :param search_domain: base domain of search. Default is []. with parents if hierarchize is set) :return: { 'parent_field': parent field on the comodel of field, or False 'values': array of dictionaries containing some info on the records available on the comodel of the field 'field_name'. The display name, the __count (how many records with that value) and possibly parent_field are fetched. } or an object with an error message when limit is defined and is reached. """ field = self._fields[field_name] supported_types = ['many2one', 'selection'] if field.type not in supported_types: types = dict(self.env["ir.model.fields"]._fields["ttype"]. _description_selection(self.env)) raise UserError( _( 'Only types %(supported_types)s are supported for category (found type %(field_type)s)', supported_types=", ".join(types[t] for t in supported_types), field_type=types[field.type], )) model_domain = kwargs.get('search_domain', []) extra_domain = AND([ kwargs.get('category_domain', []), kwargs.get('filter_domain', []), ]) if field.type == 'selection': return { 'parent_field': False, 'values': self._search_panel_selection_range(field_name, model_domain=model_domain, extra_domain=extra_domain, **kwargs), } Comodel = self.env[field.comodel_name].with_context( hierarchical_naming=False) field_names = ['display_name'] hierarchize = kwargs.get('hierarchize', True) parent_name = False if hierarchize and Comodel._parent_name in Comodel._fields: parent_name = Comodel._parent_name field_names.append(parent_name) def get_parent_id(record): value = record[parent_name] return value and value[0] else: hierarchize = False comodel_domain = kwargs.get('comodel_domain', []) enable_counters = kwargs.get('enable_counters') expand = kwargs.get('expand') limit = kwargs.get('limit') if enable_counters or not expand: domain_image = self._search_panel_field_image( field_name, model_domain=model_domain, extra_domain=extra_domain, only_counters=expand, set_limit=limit and not (expand or hierarchize or comodel_domain), **kwargs) if not (expand or hierarchize or comodel_domain): values = list(domain_image.values()) if limit and len(values) == limit: return {'error_msg': str(SEARCH_PANEL_ERROR_MESSAGE)} return { 'parent_field': parent_name, 'values': values, } if not expand: image_element_ids = list(domain_image.keys()) if hierarchize: condition = [('id', 'parent_of', image_element_ids)] else: condition = [('id', 'in', image_element_ids)] comodel_domain = AND([comodel_domain, condition]) comodel_records = Comodel.search_read(comodel_domain, field_names, limit=limit) if hierarchize: ids = [rec['id'] for rec in comodel_records] if expand else image_element_ids comodel_records = self._search_panel_sanitized_parent_hierarchy( comodel_records, parent_name, ids) if limit and len(comodel_records) == limit: return {'error_msg': str(SEARCH_PANEL_ERROR_MESSAGE)} field_range = {} for record in comodel_records: record_id = record['id'] values = { 'id': record_id, 'display_name': record['display_name'], } if hierarchize: values[parent_name] = get_parent_id(record) if enable_counters: image_element = domain_image.get(record_id) values['__count'] = image_element[ '__count'] if image_element else 0 field_range[record_id] = values if hierarchize and enable_counters: self._search_panel_global_counters(field_range, parent_name) return { 'parent_field': parent_name, 'values': list(field_range.values()), }
def unlink(self): if any(batch.state != 'draft' for batch in self): raise UserError(_("You can only delete draft batch transfers.")) return super().unlink()
def _check_use_google_gmail_service(self): if any(server.use_google_gmail_service and server.server_type != 'imap' for server in self): raise UserError(_('Gmail authentication only supports IMAP server type.'))
def unlink(self): raise UserError( _('Sale Closings are not meant to be written or deleted under any circumstances.' ))