Пример #1
0
 def _address_fields(self):
     """ Returns the list of address fields that are synced from the parent
     when the `use_parent_address` flag is set.
     """
     fields = super(ResPartner, self)._address_fields()
     fields.append('company')
     return fields
Пример #2
0
 def _address_fields(self):
     """ Returns the list of address fields that are synced from the parent
     when the `use_parent_address` flag is set.
     """
     fields = super(ResPartner, self)._address_fields()
     fields.append('company')
     return fields
 def _get_opportunity_fields(self, cr, uid, context=None):
     ids = context.get('active_ids', [])
     fields = []
     for key in self._field_mappings.keys():
         value = self._field_mappings[key]
         table = value[0]
         field = value[1]
         alias = value[2] or field
         if table:
             fields.append('{}.{} as {}'.format(table, field, alias))
         else:
             fields.append('{} as {}'.format(field, alias))
     
     sql = '''
     SELECT {}
     FROM crm_lead 
     LEFT JOIN res_partner ON (crm_lead.partner_id=res_partner.id)
     LEFT JOIN crm_case_stage ON (crm_lead.stage_id = crm_case_stage.id) 
     LEFT JOIN res_country ON (res_partner.country_id = res_country.id)
     LEFT JOIN ccg_offer_name ON (crm_lead.offer_name_id = ccg_offer_name.id)
     LEFT JOIN res_partner partner_contact ON (crm_lead.contact_name_id=partner_contact.id)
     LEFT JOIN res_users users ON (res_partner.user_id=users.id)
     LEFT JOIN res_partner sales ON (users.partner_id=sales.id)
     LEFT JOIN crm_lost_reason ON (crm_lead.lost_reason_id = crm_lost_reason.id)
     WHERE crm_lead.id in ({})
     '''.format(','.join([f for f in fields ]), ','.join([str(i) for i in ids]))
     print sql
     cr.execute(sql)
     return  cr.dictfetchall()
Пример #4
0
 def _get_invoice_line_key_cols(self):
     fields = [
         'name', 'origin', 'discount', 'invoice_line_tax_ids', 'price_unit',
         'product_id', 'account_id', 'account_analytic_id', 'uom_id'
     ]
     for field in ['analytics_id']:
         if field in self.env['account.invoice.line']._fields:
             fields.append(field)
     return fields
Пример #5
0
    def default_get(self, fields):
        model = self.env.context.get('model')
        view_id = self.env.context.get('tree_view_id', None)
        model_obj = self.env[model]
        fields_view = model_obj.fields_view_get(view_id=view_id,
                                                view_type='tree')
        field_ids = self.env['ir.model.fields'].search([
            ('model', '=', model), ('name', 'in', fields_view['fields'].keys())
        ])
        view_name = (
            self.env['ir.ui.view'].browse(view_id).name or _('Additional')
        ) if not self.env.context.get('new_view') else _('Additional')
        priority = (self.env['ir.ui.view'].browse(view_id).priority
                    or 5000) if not self.env.context.get('new_view') else 5000
        group_ids = [
            (6, 0, self.env['ir.ui.view'].browse(view_id).groups_id.ids)
        ] if not self.env.context.get('new_view') else []

        fields = []
        hidden_fields = []
        for child in etree.fromstring(fields_view['arch']):
            if child.tag == 'field':
                field_id = field_ids.filtered(
                    lambda r: r.name == child.attrib['name'])
                if len(field_id):
                    invisible = 'modifiers' in child.attrib and json.loads(
                        child.attrib['modifiers']).get('invisible',
                                                       '0') not in ('0', False)
                    if child.attrib.get('invisible', '0') != '0' or invisible:
                        hidden_fields.append(
                            (field_id.id, json.dumps(dict(child.attrib))))
                    else:
                        fields.append(
                            (field_id.id, json.dumps(dict(child.attrib))))

        sequence_count = count(start=1)
        hidden_sequence_count = count(start=1)
        fields = [(0, 0, {
            'field_id': f,
            'sequence': sequence_count.next(),
            'attributes': attrs
        }) for (f, attrs) in fields]
        hidden_fields = [(0, 0, {
            'field_id': f,
            'sequence': hidden_sequence_count.next(),
            'attributes': attrs
        }) for (f, attrs) in hidden_fields]
        return {
            'field_ids': fields,
            'hidden_field_ids': hidden_fields,
            'model': model,
            'wizard_id': self.id,
            'name': view_name,
            'priority': priority,
            'group_ids': group_ids,
        }
Пример #6
0
        def wrapper(self, fields, data):
            """Try to identify rows by other pseudo-unique keys.

            It searches for rows that have no XMLID specified, and gives them
            one if any :attr:`~.field_ids` combination is found. With a valid
            XMLID in place, Odoo will understand that it must *update* the
            record instead of *creating* a new one.
            """
            newdata = list()

            # Data conversion to ORM format
            import_fields = map(models.fix_import_export_id_paths, fields)
            converted_data = self._convert_records(
                self._extract_records(import_fields, data))

            # Mock Odoo to believe the user is importing the ID field
            if "id" not in fields:
                fields.append("id")
                import_fields.append(["id"])

            # Needed to match with converted data field names
            clean_fields = [f[0] for f in import_fields]

            for dbid, xmlid, record, info in converted_data:
                row = dict(zip(clean_fields, data[info["record"]]))
                match = self

                if xmlid:
                    # Skip rows with ID, they do not need all this
                    row["id"] = xmlid
                elif dbid:
                    # Find the xmlid for this dbid
                    match = self.browse(dbid)
                else:
                    # Store records that match a combination
                    match = self.env["base_import.match"]._match_find(
                        self, record, row)

                # Give a valid XMLID to this row if a match was found
                row["id"] = (match._BaseModel__export_xml_id()
                             if match else row.get("id", u""))

                # Store the modified row, in the same order as fields
                newdata.append(tuple(row[f] for f in clean_fields))

            # Leave the rest to Odoo itself
            del data
            return wrapper.origin(self, fields, newdata)
Пример #7
0
 def _add_line_data(self, fields, data):
     if 'line_ids/fund_id.id' not in fields:
         new_data = []
         for data_line in data:
             data_line = data_line + (u'NSTDA', )
             new_data.append(data_line)
         fields.append('line_ids/fund_id')
         return fields, new_data
     else:
         fund = self.env.ref('base.fund_nstda')  # NSTDA Fund
         fund_idx = fields.index('line_ids/fund_id.id')
         new_data = []
         for data_line in data:
             if not data_line[fund_idx]:
                 lst = list(data_line)
                 lst[fund_idx] = fund.id
                 data_line = tuple(lst)
             new_data.append(data_line)
         return fields, new_data
Пример #8
0
 def _add_line_data(self, fields, data):
     if 'line_ids/fund_id.id' not in fields:
         new_data = []
         for data_line in data:
             data_line = data_line + (u'NSTDA',)
             new_data.append(data_line)
         fields.append('line_ids/fund_id')
         return fields, new_data
     else:
         fund = self.env.ref('base.fund_nstda')  # NSTDA Fund
         fund_idx = fields.index('line_ids/fund_id.id')
         new_data = []
         for data_line in data:
             if not data_line[fund_idx]:
                 lst = list(data_line)
                 lst[fund_idx] = fund.id
                 data_line = tuple(lst)
             new_data.append(data_line)
         return fields, new_data
Пример #9
0
 def introspection(self):
     db = coredb.DB(self.source_type, data=self.data)
     conn = db.connect(self.host, self.port, self.database, self.username, self.password)
     tables = db.show_tables()
     table = self.env['etileno.table']
     for k,v in tables.items():
         print v
         # get fields
         fields = []
         for i in v['fields']:
             fields.append((0,0, {
                 'name': i[0],
                 'field_type': i[1],
                 'pk': i[0] in v['pk'] and (v['pk'].index(i[0]) + 1)
             }))
         # create table and fields
         data = {
             'source_id': self.id,
             'name': k,
             'rows': v['count'],
             'field_ids': fields
         }
         table.create(data)
Пример #10
0
 def read(self,
          cr,
          uid,
          ids,
          fields=None,
          context=None,
          load='_classic_read'):
     # Hack for binary compatibility
     add_datas_size = False
     if context.get('bin_size') and 'datas' in fields:
         fields.remove('datas')
         fields.append('human_file_size')
         add_datas_size = True
     res = super(ProductMedia, self).read(cr,
                                          uid,
                                          ids,
                                          fields=fields,
                                          context=context,
                                          load=load)
     if add_datas_size:
         for record in res:
             record['datas'] = record['human_file_size']
     return res
Пример #11
0
 def read(self, fields, load='_classic_read'):
     if fields:
         fields.append('id')
     results = super(ProductProduct, self).read(fields, load=load)
     # Note if fields is empty => read all, so look at history table
     if not fields or any([f in PRODUCT_FIELD_HISTORIZE for f in fields]):
         p_history = self.env['product.price.history']
         company_id = (self.env.context.get('company_id', False) or
                       self.env.user.company_id.id)
         # if fields is empty we read all price fields
         if not fields:
             p_fields = PRODUCT_FIELD_HISTORIZE
         # Otherwise we filter on price fields asked in read
         else:
             p_fields = [f for f in PRODUCT_FIELD_HISTORIZE if f in fields]
         prod_prices = p_history._get_historic_price(
             product_ids=self.ids, company_id=company_id,
             datetime=self.env.context.get('to_date', False),
             field_names=p_fields)
         if prod_prices:
             for result in results:
                 dict_value = prod_prices[result['id']]
                 result.update(dict_value)
     return results
    def make_lines(self, res_file):
        lines = []

        # first header line : _field_separator + _record_separator + _field_separator
        # so the receiver can detect which separators we use
        lines.append(self._field_separator + self._record_separator +
                     self._field_separator)

        # second header line : give the id code of the CCI, the number of details lines and the email address
        # for the sending of the reception of this file by the federation mail robot

        # we obtain the id key of the CCI in the federation
        res_company = self.env['res.company'].browse(1)
        lines.append(res_company.federation_key + self._field_separator +
                     str(len(res_file)).rjust(6, '0') + self._field_separator +
                     str(self.email_rcp).strip() + self._field_separator)

        # Let's build a list of certificates objects
        certificates_ids = [x[0] for x in res_file]
        obj_certificate = self.env['cci_missions.certificate']

        # create of list of value, then concatenate them with _field_separator for each certificate
        sequence_num = 0
        total_value = 0
        for cert in certificates_ids:
            fields = []
            sequence_num += 1
            certificate = obj_certificate.browse(cert)
            fields.append(str(sequence_num).rjust(6, '0'))
            fields.append(
                certificate.digital_number
                and str(int(certificate.digital_number))
                or (certificate.type_id.id_letter +
                    certificate.name.rpartition('/')[0].rpartition('/')[2] +
                    certificate.name.rpartition('/')[2].rjust(6, '0'))
            )  # extract the right part of the number of the certificate (CO/2008/25 -> '2008' + '25' the left justify with '0' -> '2008000025' )
            fields.append(certificate.dossier_id.asker_name or '')
            fields.append(certificate.asker_address or '')
            fields.append(certificate.asker_zip_id.name or '')
            fields.append(certificate.asker_zip_id.city or '')
            if certificate.order_partner_id.vat and certificate.order_partner_id.vat[
                    0:3].lower() == 'be0':
                fields.append(certificate.order_partner_id.vat[3:12])
            else:
                fields.append('000000000')
            fields.append(certificate.dossier_id.sender_name or '')
            fields.append(certificate.dossier_id.destination_id.code or '')
            fields.append(str(
                int(certificate.dossier_id.goods_value *
                    100)))  # to have the value in cents, without , or .
            total_value += int(
                certificate.dossier_id.goods_value * 100
            )  # I do this now, because, if I do this just before lines.append, i've got a bug !! If someone has an explanatio, I'm ready
            fields.append(certificate.dossier_id.date.replace(
                '-', ''))  # to correct '2008-05-28' to '20080528'
            fields.append('Y')
            custom_codes_string = ''
            for custom_code in certificate.customs_ids:
                custom_codes_string += custom_code.name + self._field_separator
            fields.append(
                custom_codes_string
            )  # YES, there will be TWO fields separators at the end of this list, to mark the end of the list, exactly
            origins_string = ''
            for country_code in certificate.origin_ids:
                origins_string += country_code.code + self._field_separator  # country code and not country name
            fields.append(
                origins_string
            )  # YES, there will be TWO fields separators at the end of this list, to mark the end of the list, exactly
            lines.append(
                self._field_separator.join(fields) + self._field_separator)

        # Trailer : the sum of all the values in cents of the included certificates
        lines.append('999999' + self._field_separator + str(total_value) +
                     self._field_separator)

        # Since we send this file to the federation, we indicate this date in the field
        # obj_certificate.write(cr, uid,certificates_ids, {'sending_spf' : time.strftime('%Y-%m-%d')})
        return lines
    def make_lines(self,res_file):
        lines=[]
        
        # first header line : _field_separator + _record_separator + _field_separator
        # so the receiver can detect which separators we use
        lines.append( self._field_separator + self._record_separator + self._field_separator)
        
        # second header line : give the id code of the CCI, the number of details lines and the email address
        # for the sending of the reception of this file by the federation mail robot
        
        # we obtain the id key of the CCI in the federation
        res_company = self.env['res.company'].browse(1)
        lines.append( res_company.federation_key + self._field_separator + str(len(res_file)).rjust(6,'0') + self._field_separator + str(self.email_rcp).strip() + self._field_separator )
        
        # Let's build a list of certificates objects
        certificates_ids = [x[0] for x in res_file]
        obj_certificate = self.env['cci_missions.certificate']

        # create of list of value, then concatenate them with _field_separator for each certificate
        sequence_num = 0
        total_value = 0
        for cert in certificates_ids:
            fields = []
            sequence_num += 1
            certificate = obj_certificate.browse(cert)
            fields.append( str(sequence_num).rjust(6,'0') )
            fields.append( certificate.digital_number and str(int(certificate.digital_number)) or (certificate.type_id.id_letter + certificate.name.rpartition('/')[0].rpartition('/')[2] + certificate.name.rpartition('/')[2].rjust(6,'0')) )  # extract the right part of the number of the certificate (CO/2008/25 -> '2008' + '25' the left justify with '0' -> '2008000025' )
            fields.append( certificate.dossier_id.asker_name or '')
            fields.append( certificate.asker_address or '')
            fields.append( certificate.asker_zip_id.name or '')
            fields.append( certificate.asker_zip_id.city or '')
            if certificate.order_partner_id.vat and certificate.order_partner_id.vat[0:3].lower() == 'be0':
                fields.append( certificate.order_partner_id.vat[3:12] )
            else:
                fields.append( '000000000' )
            fields.append( certificate.dossier_id.sender_name or '')
            fields.append( certificate.dossier_id.destination_id.code or '')
            fields.append( str( int( certificate.dossier_id.goods_value * 100 )) ) # to have the value in cents, without , or .
            total_value += int( certificate.dossier_id.goods_value * 100 ) # I do this now, because, if I do this just before lines.append, i've got a bug !! If someone has an explanatio, I'm ready
            fields.append( certificate.dossier_id.date.replace('-','') )  # to correct '2008-05-28' to '20080528'
            fields.append( 'Y' )
            custom_codes_string = ''
            for custom_code in certificate.customs_ids:
                custom_codes_string += custom_code.name + self._field_separator
            fields.append( custom_codes_string ) # YES, there will be TWO fields separators at the end of this list, to mark the end of the list, exactly
            origins_string = ''
            for country_code in certificate.origin_ids:
                origins_string += country_code.code + self._field_separator  # country code and not country name
            fields.append( origins_string ) # YES, there will be TWO fields separators at the end of this list, to mark the end of the list, exactly
            lines.append( self._field_separator.join(fields) + self._field_separator )
        
        # Trailer : the sum of all the values in cents of the included certificates
        lines.append( '999999' + self._field_separator + str( total_value ) + self._field_separator )

        # Since we send this file to the federation, we indicate this date in the field  
        # obj_certificate.write(cr, uid,certificates_ids, {'sending_spf' : time.strftime('%Y-%m-%d')})
        return lines
Пример #14
0
    def _auto_init(self, cr, context=None):
        """

        Call _field_create and, unless _auto is False:

        - create the corresponding table in database for the model,
        - possibly add the parent columns in database,
        - possibly add the columns 'create_uid', 'create_date', 'write_uid',
          'write_date' in database if _log_access is True (the default),
        - report on database columns no more existing in _columns,
        - remove no more existing not null constraints,
        - alter existing database columns to match _columns,
        - create database tables to match _columns,
        - add database indices to match _columns,
        - save in self._foreign_keys a list a foreign keys to create (see
          _auto_end).

        """
        res = super(fts_model, self)._auto_init(cr, context)

        #~ cr.execute("SELECT res_id FROM ir_model_data WHERE module = 'base' and name = 'user_root';")
        #~ res = cr.dictfetchone()
        #~ user_id = res['res_id']
        #~ _logger.warn('\n\nuser_id: %s' % user_id)
        #~ env = api.Environment(cr, user_id, context)
        #~ env[self._name]._init_fts_fields()
        #~ return res

        #~ @api.model
        #~ def _init_fts_fields(self):
        #~ cr, context = self._cr, self._context
        cr.execute(
            "DROP FUNCTION IF EXISTS website_fts_translate_term(language text, term text, model text, field text, t_name text, obj_id integer);"
        )
        cr.execute(
            """CREATE FUNCTION website_fts_translate_term(language text, term text, model text, field text, t_name text, obj_id integer) RETURNS text AS $$
DECLARE result text;
    BEGIN
        result := model || ',' || field;
        SELECT value INTO result FROM ir_translation it WHERE
            it.name = result AND
            it.lang = language AND
            it.res_id = obj_id AND
            it.value != '' AND
            it.src = term
        ORDER BY id
        LIMIT 1;
        return COALESCE(result, term);
    END;
$$ LANGUAGE plpgsql;""")
        # TODO: Replace this with _fts_get_langs and test it
        langs = set()
        cr.execute(
            "SELECT code from res_lang WHERE id in (SELECT lang_id FROM website_lang_rel);"
        )
        for d in cr.dictfetchall():
            langs.add(d['code'])
        _logger.debug('FTS languages: %s' % ', '.join(langs))
        columns = self._select_column_data(cr)
        fts_fields = self._get_fts_fields()
        for lang in langs:
            update_index = False
            ps_lang, col_name = self._lang_o2pg(cr, lang, context)
            if self._auto and col_name not in columns:
                # Add _fts_vector column to the table
                _logger.debug('Adding column %s.%s' % (self._table, col_name))
                cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" tsvector' %
                           (self._table, col_name))
                update_index = True
            if self._auto and fts_fields and '_fts_trigger' in columns:
                self._fts_drop_obsolete_sql_stuff(cr, context)
                trigger_name = 'upd%s' % col_name
                func_name = '%s%s_trigger' % (self._table, col_name)

                cr.execute("DROP TRIGGER IF EXISTS %s ON %s;" %
                           (trigger_name, self._table))

                # Create function that updates the new _fts_vector column.
                cr.execute("DROP FUNCTION IF EXISTS %s();" % func_name)
                # Declarations of extra variables the function needs
                declares = []
                # SQL code to select data into the variables
                selects = []
                # Code to build a part of the document
                fields = []
                var_count = 0
                # TODO: Add support for more column types (float and int)
                for field in fts_fields:
                    if field.get('trigger_only'):
                        continue
                    if '.' in field['name']:
                        # Hande related fields dot notation
                        relational_fields = field['name'].split('.')
                        field_obj = self._fields[relational_fields[0]]
                    else:
                        # Handle related fields
                        field_obj = self._fields[field['name']]
                        relational_fields = field_obj.related
                    _logger.debug('related fields: %s' % (relational_fields, ))
                    if field.get('sql_vector'):
                        # TODO: Test that this actually works
                        declares += field.get('sql_vars', [])
                        selects += [
                            s.format(lang=lang,
                                     ps_lang=ps_lang,
                                     col_name=col_name,
                                     weight=field.get('weight', 'D'))
                            for s in field.get('sql_selects', [])
                        ]
                        fields += [
                            f.format(lang=lang,
                                     ps_lang=ps_lang,
                                     col_name=col_name,
                                     weight=field.get('weight', 'D'))
                            for f in field['sql_vector']
                        ]
                    elif relational_fields:
                        var_name = "fts_tmp_%s" % var_count
                        var_count += 1
                        declares.append("DECLARE %s text;" % var_name)
                        selects.append(
                            self._fts_get_related_select(
                                cr, var_name, lang, ps_lang, self,
                                relational_fields, context))
                        fields.append(
                            "setweight(to_tsvector('%s', COALESCE(%s, '')), '%s')"
                            % (ps_lang, var_name, field.get('weight', 'D')))
                    elif field_obj.translate:
                        var_name = "fts_tmp_%s" % var_count
                        var_count += 1
                        declares.append("DECLARE %s text;\n" % var_name)
                        selects.append(
                            "SELECT website_fts_translate_term('%s', new.%s, '%s', '%s', '%s', new.id) INTO %s;"
                            % (lang, field['name'], self._name, field['name'],
                               self._table, var_name))
                        fields.append(
                            "setweight(to_tsvector('%s', COALESCE(%s, '')), '%s')"
                            % (ps_lang, var_name, field.get('weight', 'D')))
                    else:
                        fields.append(
                            "setweight(to_tsvector('%s', COALESCE(new.%s, '')), '%s')"
                            %
                            (ps_lang, field['name'], field.get('weight', 'D')))
                fields = ' ||\n        '.join(fields) + ';'
                declares = ['\n%s' % d for d in declares]
                selects = ['\n    %s' % s for s in selects]
                expr = "CREATE FUNCTION %s() RETURNS trigger AS $$%s\n" \
                "BEGIN%s\n" \
                "    new.%s := %s\n" \
                "    return new;\n" \
                "END;\n" \
                "$$ LANGUAGE plpgsql;" % (func_name, ''.join(declares), ''.join(selects), col_name, fields)
                _logger.debug(expr)
                cr.execute(expr)
                expr = "CREATE TRIGGER %s BEFORE INSERT OR UPDATE OF _fts_trigger ON %s " \
                    "FOR EACH ROW EXECUTE PROCEDURE %s();" % (trigger_name, self._table, func_name)
                _logger.debug(expr)
                cr.execute(expr)
                if update_index:
                    # Create index on the _fts_vector column.
                    cr.execute("CREATE INDEX %s%s_idx ON %s USING GIST (%s);" %
                               (self._table, col_name, self._table, col_name))
        return res
Пример #15
0
 def _address_fields(self):
     fields = super(ResPartner, self)._address_fields()
     fields.append('street3')
     return fields
Пример #16
0
 def _commercial_fields(self):
     fields = super(ResPartner, self)._commercial_fields()
     fields.append('use_prepayment')
     return fields