def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'): ''' @param self : The object pointer @param cr: A database cursor @param uid: ID of the user currently logged in @param ids: List of ids selected @param fields: List of fields @param context: A standard dictionary @param load: a parameter used for reading the values of functional fields @return:List of dictionary of fields,values pair ''' if context is None: context = {} if fields and 'cc_number' in fields and 'key' not in fields: fields.append('key') vals = super(res_partner_bank, self).read(cr, uid, ids, fields, context, load) if isinstance(vals, list): for val in vals: if val.get('cc_number', False) : dec_data = rsa_encrypt.decrypt(val.get('cc_number', False), val.get('key', '')) val['cc_number'] = dec_data if context.get('cc_no', '') != 'no_mask': i = len(val['cc_number']) - 4 val['cc_number'] = 'X' * i + val['cc_number'][-4:len(val['cc_number'])] else: if vals.get('cc_number', False) : dec_data = rsa_encrypt.decrypt(vals.get('cc_number', False), vals.get('key', '')) vals['cc_number'] = dec_data if context.get('cc_no', '') != 'no_mask': i = len(vals['cc_number']) - 4 vals['cc_number'] = 'X' * i + vals['cc_number'][-4:len(vals)] return vals
def _read_flat(self, cr, uid, ids, fields, context=None, load="_classic_read"): if context is None: context = {} if fields: fields.append("id") results = super(product_template, self)._read_flat(cr, uid, ids, fields, context=context, load=load) # Note if fields is empty => read all, so look at history table if not fields or any([f in PRODUCT_FIELD_HISTORIZE for f in fields]): date_crit = False p_history = self.pool.get("product.price.history") company_id = self._get_transaction_company_id(cr, uid, context=context) if context.get("to_date"): date_crit = context["to_date"] # if fields is empty we read all price fields if not fields: p_fields = PRODUCT_FIELD_HISTORIZE # Otherwise we filter on price fields asked in read else: p_fields = [f for f in PRODUCT_FIELD_HISTORIZE if f in fields] prod_prices = p_history._get_historic_price( cr, uid, ids, company_id, datetime=date_crit, field_names=p_fields, context=context ) for result in results: dict_value = prod_prices[result["id"]] result.update(dict_value) return results
def _address_fields(self, cr, uid, context=None): """ Returns the list of address fields that are synced from the parent when the `use_parent_address` flag is set. """ fields = super(res_partner, self)._address_fields(cr, uid, context=context) fields.append('company') return fields
def import_carriers(self,cr,uid,ids,context={}): myself = self.browse(cr,uid,ids[0],context=context) encoded = ":".join([myself.api_key or 'api_key',myself.api_secret or 'api_secret']) headers = { 'Authorization': "Basic "+ base64.b64encode(encoded) } base_url = self.get_shipstation_url(cr,uid,context) url = urlparse.urljoin(base_url,helper.endpoints['list_carriers']) response = requests.get(url,headers=headers).json() for carrier in response: fields = helper.carrier_carrier_map.values() fields.append('id') c = self.pool.get('carrier.carrier').search_read(cr,uid,[('code','=',carrier['code'])],fields=fields,limit=1) vals = {} for x,y in helper.carrier_carrier_map.iteritems(): vals.update({y:carrier.get(x)}) if len(c) > 0: #Just overwrite id = c[0]['id'] del c[0]['id'] for key,val in c.pop().iteritems(): self.pool.get('carrier.carrier').write(cr,uid,id,vals,context) else: #Create a new carrier self.pool.get('carrier.carrier').create(cr,uid,vals,context)
def extra_fields(self, cr, uid, ids, field_names, arg, context=None): result = {} for metric in self.browse(cr, uid, ids, context=context): fields = [] for field in metric.field_ids: fields.append({ 'id': field.id, 'name': field.name, 'sequence': field.sequence, 'reference': field.reference, 'domain_field_path': field.domain_field_path, 'sql_name': field.sql_name, 'type_names': field.type_names, 'field_description': field.field_description, 'period': field.period, 'field_model': field.field_model }) model_details = { 'id': metric.model.id, 'name': metric.model.name, 'model' : metric.model.model } defaults = {} model = self.pool.get(metric.model.model) if hasattr(model, '_metrics_sql') and metric.query_name in model._metrics_sql: query = model._metrics_sql[metric.query_name] defaults = query['defaults'] if isinstance(query, dict) and 'defaults' in query else {} result[metric.id] = { 'fields': fields, 'model_details': model_details, 'defaults': defaults } return result
def _read_flat(self, cr, uid, ids, fields, context=None, load='_classic_read'): if context is None: context = {} if not fields: fields = [] else: fields = fields[:] # avoid to modify the callee's list if fields and not 'id' in fields: fields.append('id') pt_obj = self.pool.get('product.template') historized_fields = [f for f in fields if f in PRODUCT_FIELD_HISTORIZE] remove_tmpl_field = False if fields and not 'product_tmpl_id' in fields and historized_fields: remove_tmpl_field = True fields.append('product_tmpl_id') results = super(product_product, self)._read_flat(cr, uid, ids, fields, context=context, load=load) # Note if fields is empty => read all, so look at history table if not fields or historized_fields: date_crit = False price_history = self.pool.get('product.price.history') company_id = pt_obj._get_transaction_company_id(cr, uid, context=context) if context.get('to_date'): date_crit = context['to_date'] if load == '_classic_write': # list of ids tmpl_ids = [row['product_tmpl_id'] for row in results] else: # list of (id, name) tmpl_ids = [row['product_tmpl_id'][0] for row in results] prod_prices = price_history._get_historic_price( cr, uid, tmpl_ids, company_id, datetime=date_crit, field_names=historized_fields or PRODUCT_FIELD_HISTORIZE, context=context) for result in results: if load == '_classic_write': tmpl_id = result['product_tmpl_id'] else: tmpl_id = result['product_tmpl_id'][0] dict_value = prod_prices[tmpl_id] result.update(dict_value) if remove_tmpl_field: for row in results: # this field was not asked by the callee del row['product_tmpl_id'] return results
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'): if not fields: fields = self.fields_get(cr, uid, context=context).keys() group_fields, fields = partition(is_reified_group, fields) if not 'groups_id' in fields: fields.append('groups_id') res = super(users_view, self).read(cr, uid, ids, fields, context=context, load=load) for values in (res if isinstance(res, list) else [res]): self._get_reified_groups(group_fields, values) return res
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'): fields_get = fields if fields is not None else self.fields_get(cr, uid, context=context).keys() group_fields, _ = partition(is_reified_group, fields_get) inject_groups_id = group_fields and fields and 'groups_id' not in fields if inject_groups_id: fields.append('groups_id') res = super(users_view, self).read(cr, uid, ids, fields, context=context, load=load) if res and group_fields: for values in (res if isinstance(res, list) else [res]): self._get_reified_groups(group_fields, values) if inject_groups_id: values.pop('groups_id', None) return res
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'): ''' Decrypt credit card number after reading from database and display last four digits if there is no no_mask in context @param self : The object pointer @param cr: A database cursor @param uid: ID of the user currently logged in @param ids: List of ids selected @param fields: List of fields @param context: A standard dictionary @param load: a parameter used for reading the values of functional fields @return:List of dictionary of fields,values pair ''' if context is None: context = {} if fields and 'cc_number' in fields and 'key' not in fields: fields.append('key')
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'): """ Calls :meth:`read<openerp.models.Model.read>` and then looks for potential numeric values that might be actually ``null`` instead of ``0`` (as Odoo interprets every numeric value as ``0`` when it finds ``null`` in the database) and fixes the return value accordingly. Rounds all floats to n decimal places, where n is the number specified in the digits tuple that is an attribute of the field definition on the model. :returns: dictionary with the read values :rtype: dict """ nolist = False if not isinstance(ids, list): ids = [ids] nolist = True if fields and 'null_values' not in fields: fields.append('null_values') res = super(NhClinicalPatientObservation, self).read( cr, uid, ids, fields=fields, context=context, load=load) if res: for d in res: for key in d.keys(): if key in self._columns \ and self._columns[key]._type == 'float': if not self._columns[key].digits: _logger.warn( "You might be reading a wrong float from the " "DB. Define digits attribute for float columns" " to avoid this problem.") else: d[key] = round( d[key], self._columns[key].digits[1]) for obs in isinstance(res, (tuple, list)) and res or [res]: for nv in eval(obs['null_values'] or '{}'): if nv in obs.keys(): obs[nv] = False res = res[0] if nolist and len(res) > 0 else res return res
def _address_fields(self, cr, uid, context=None): fields = super(res_partner, self )._address_fields(cr, uid, context=context) fields.append('street3') return fields
def get_fields(self, cr, uid, model, context=None, depth=FIELDS_RECURSION_LIMIT): """ Recursively get fields for the provided model (through fields_get) and filter them according to importability The output format is a list of ``Field``, with ``Field`` defined as: .. class:: Field .. attribute:: id (str) A non-unique identifier for the field, used to compute the span of the ``required`` attribute: if multiple ``required`` fields have the same id, only one of them is necessary. .. attribute:: name (str) The field's logical (OpenERP) name within the scope of its parent. .. attribute:: string (str) The field's human-readable name (``@string``) .. attribute:: required (bool) Whether the field is marked as required in the model. Clients must provide non-empty import values for all required fields or the import will error out. .. attribute:: fields (list(Field)) The current field's subfields. The database and external identifiers for m2o and m2m fields; a filtered and transformed fields_get for o2m fields (to a variable depth defined by ``depth``). Fields with no sub-fields will have an empty list of sub-fields. :param str model: name of the model to get fields form :param int landing: depth of recursion into o2m fields """ fields = [{ 'id': 'id', 'name': 'id', 'string': _("External ID"), 'required': False, 'fields': [], }] fields_got = self.pool[model].fields_get(cr, uid, context=context) for name, field in fields_got.iteritems(): # an empty string means the field is deprecated, @deprecated must # be absent or False to mean not-deprecated if field.get('deprecated', False) is not False: continue if field.get('readonly'): states = field.get('states') if not states: continue # states = {state: [(attr, value), (attr2, value2)], state2:...} if not any(attr == 'readonly' and value is False for attr, value in itertools.chain.from_iterable( states.itervalues())): continue f = { 'id': name, 'name': name, 'string': field['string'], # Y U NO ALWAYS HAS REQUIRED 'required': bool(field.get('required')), 'fields': [], } if field['type'] in ('many2many', 'many2one'): f['fields'] = [ dict(f, name='id', string=_("External ID")), dict(f, name='.id', string=_("Database ID")), ] elif field['type'] == 'one2many' and depth: f['fields'] = self.get_fields( cr, uid, field['relation'], context=context, depth=depth-1) fields.append(f) # TODO: cache on model? return fields
def _commercial_fields(self, cr, uid, context=None): fields = super(res_partner, self)._commercial_fields(cr, uid, context=context) fields.append('use_prepayment') return fields
def _address_fields(self, cr, uid, context=None): fields = super(res_partner, self)._address_fields(cr, uid, context=context) fields.append('street3') return fields
def load(self, cr, uid, fields, data, context=None): print "--------------------------------------------------------" print "load(): Commands BEFORE original load method is called" print "--------------------------------------------------------" print "load() uid: %s" % uid print "load() fields: %s" % fields print "load() data: %s" % data print "load() context: %s" % context # TODO use export_data() to make a full backup of all products and post it as a message to a admin newsgroup # Add the ID field if not already present try: id_index = fields.index('id') except: # Add id to the end of fields list fields.append('id') # Add an empty string to the end of the tuples inside the data list data = [record + (u'',) for record in data] # prepare the index variable - this should also be available outside the try statement ?!? id_index = fields.index('id') print "load() fields after id field added: %s" % fields print "load() data after id field added: %s" % data # Make sure if there is a CSB-Nummer it is correctly used for the id field (external id) try: openat_csb_nummer_index = fields.index('openat_csb_nummer') except: print "load() no CSB Number found - But that's probably ok!" else: # Transfer the csb-nummer to the id field - don't change any other id field for x in range(0, len(data)): record = list(data[x]) if record[openat_csb_nummer_index]: record[id_index] = u'__export__.' + record[openat_csb_nummer_index] data[x] = tuple(record) print "load() data after csb number transfered: %s" % str(data) print "load() record: %s" % record # Check if the state fields exists and if not add it and set the right state try: state_index = fields.index('state') except: print "load() no State field found - But that's probably ok!" # Add state field to the end of fields list fields.append('state') # Add an empty string to the end of the tuples inside the data list data = [record + (u'',) for record in data] # prepare the index variable. state_index = fields.index('state') print "load() fields after state field added: %s" % fields print "load() data after state field added: %s" % data # Add the correct state for x in range(0, len(data)): record = list(data[x]) # Todo: Get all available Translation terms for product.template.state:ppapproved #source = self.pool.get('ir.translation')._get_source #print "load() self.pool.get('ir.translation')._get_source: %s" % source # End Todo if record[state_index] != u'ppapproved': try: # If no CSB Number Field is present at all openat_csb_nummer_index will be undefined # therefore we have to use try if record[openat_csb_nummer_index]: record[state_index] = u'pptocheck' except: record[state_index] = u'ppnew' data[x] = tuple(record) print "load() data after state corrected: %s" % str(data) # Add the Import User name # ToDo find a way for the "First Import" user user = self.pool.get('res.users').browse(cr, uid, uid) print "load() user.name: %s " % user.name fields.append('user_update') data = [record + (unicode(user.name),) for record in data] # ToDo Check if a workflow is used instead of manually doing it - if the worklfow is also respected on import return super(product_product, self).load(cr, uid, fields, data, context=context)
def get_data(self, cr, uid, data): filter_data = [] account_list = [] account_selected = [] account_lines = {} account_balance = {} account_conciliation = {} library_obj = self.pool.get('account.webkit.report.library') filter_type = self.get_filter(data) chart_account = self.get_chart_account_id(data) if filter_type == 'filter_date': start_date = self.get_date_from(data) stop_date = self.get_date_to(data) filter_data.append(start_date) filter_data.append(stop_date) elif filter_type == 'filter_period': start_period = self.get_start_period( data) #return the period object stop_period = self.get_end_period(data) filter_data.append(start_period) filter_data.append(stop_period) else: filter_type = '' fiscalyear = self.get_fiscalyear(data) target_move = self.get_target_move(data) #From the wizard can select specific account, extract this accounts account_selected = self.get_accounts_ids(data) if not account_selected: account_selected = [chart_account.id] account_list_ids = library_obj.get_account_child_ids( cr, uid, account_selected ) #get all the accounts in the chart_account_id #Extract the id for the browse record else: account_ids = [] for account in account_selected: account_ids.append(account.id) account_list_ids = library_obj.get_account_child_ids( cr, uid, account_ids) #get all the accounts in the chart_account_id account_list_obj = self.pool.get('account.account').browse( cr, uid, account_list_ids) #Get the move_lines for each account. move_lines = library_obj.get_move_lines( cr, 1, account_list_ids, filter_type=filter_type, filter_data=filter_data, fiscalyear=fiscalyear, target_move=target_move, order_by='account_id asc, date asc, ref asc') #Reconcile -> show reconcile in the mako. ''' First, if the account permit reconcile (reconcile == True), add to the dictionary. If the account don't allow the reconcile, search if the lines have reconcile_id or partial_reconcile_id If the account allow the reconcile or the lines have reconcile_id or partial_reconcile_id, add in the dictionary and show in the mako the column "Reconcile" the final result is: {account_id: {line.id: [conciliation_name]}} ''' #Search if the move_lines have partial or reconcile id for line in move_lines: #If the account have reconcile, add to the dictionary if line.account_id.id not in account_conciliation: account_conciliation[line.account_id.id] = {} if line.reconcile_id and line.reconcile_id.name != '': account_conciliation[line.account_id.id][ line.id] = line.reconcile_id.name elif line.reconcile_partial_id and line.reconcile_partial_id.name != '': str_name = 'P: ' + line.reconcile_partial_id.name #conciliation_lines.append(str_name) account_conciliation[line.account_id.id][line.id] = str_name if line.account_id.id not in account_lines: account_lines[line.account_id.id] = [] account_lines[line.account_id.id].append(line) fields = ['balance'] if self.get_amount_currency(data): fields.append('foreign_balance') if filter_type == 'filter_date': account_balance = library_obj.get_account_balance( cr, uid, account_list_ids, fields, initial_balance=True, company_id=chart_account.company_id.id, fiscal_year_id=fiscalyear.id, state=target_move, start_date=start_date, end_date=stop_date, chart_account_id=chart_account.id, filter_type=filter_type) elif filter_type == 'filter_period': account_balance = library_obj.get_account_balance( cr, uid, account_list_ids, fields, initial_balance=True, company_id=chart_account.company_id.id, fiscal_year_id=fiscalyear.id, state=target_move, start_period_id=start_period.id, end_period_id=stop_period.id, chart_account_id=chart_account.id, filter_type=filter_type) else: account_balance = library_obj.get_account_balance( cr, uid, account_list_ids, fields, initial_balance=True, company_id=chart_account.company_id.id, fiscal_year_id=fiscalyear.id, state=target_move, chart_account_id=chart_account.id, filter_type=filter_type) return account_list_obj, account_lines, account_conciliation, account_balance
def load(self, cr, uid, fields, data, context=None): print "--------------------------------------------------------" print "load(): Commands BEFORE original load method is called" print "--------------------------------------------------------" print "load() uid: %s" % uid print "load() fields: %s" % fields print "load() data: %s" % data print "load() context: %s" % context # TODO use export_data() to make a full backup of all products and post it as a message to a admin newsgroup # Add the ID field if not already present try: id_index = fields.index('id') except: # Add id to the end of fields list fields.append('id') # Add an empty string to the end of the tuples inside the data list data = [record + (u'', ) for record in data] # prepare the index variable - this should also be available outside the try statement ?!? id_index = fields.index('id') print "load() fields after id field added: %s" % fields print "load() data after id field added: %s" % data # Make sure if there is a CSB-Nummer it is correctly used for the id field (external id) try: openat_csb_nummer_index = fields.index('openat_csb_nummer') except: print "load() no CSB Number found - But that's probably ok!" else: # Transfer the csb-nummer to the id field - don't change any other id field for x in range(0, len(data)): record = list(data[x]) if record[openat_csb_nummer_index]: record[id_index] = u'__export__.' + record[ openat_csb_nummer_index] data[x] = tuple(record) print "load() data after csb number transfered: %s" % str( data) print "load() record: %s" % record # Check if the state fields exists and if not add it and set the right state try: state_index = fields.index('state') except: print "load() no State field found - But that's probably ok!" # Add state field to the end of fields list fields.append('state') # Add an empty string to the end of the tuples inside the data list data = [record + (u'', ) for record in data] # prepare the index variable. state_index = fields.index('state') print "load() fields after state field added: %s" % fields print "load() data after state field added: %s" % data # Add the correct state for x in range(0, len(data)): record = list(data[x]) # Todo: Get all available Translation terms for product.template.state:ppapproved #source = self.pool.get('ir.translation')._get_source #print "load() self.pool.get('ir.translation')._get_source: %s" % source # End Todo if record[state_index] != u'ppapproved': try: # If no CSB Number Field is present at all openat_csb_nummer_index will be undefined # therefore we have to use try if record[openat_csb_nummer_index]: record[state_index] = u'pptocheck' except: record[state_index] = u'ppnew' data[x] = tuple(record) print "load() data after state corrected: %s" % str(data) # Add the Import User name # ToDo find a way for the "First Import" user user = self.pool.get('res.users').browse(cr, uid, uid) print "load() user.name: %s " % user.name fields.append('user_update') data = [record + (unicode(user.name), ) for record in data] # ToDo Check if a workflow is used instead of manually doing it - if the worklfow is also respected on import return super(product_product, self).load(cr, uid, fields, data, context=context)
def _compute_progress_rates(self, obj, cr, uid, ids=None, context=None): context = context or {} if not ids: ids = obj.search(cr, uid, []) if isinstance(ids, (int, long)): ids = [ids] for object in obj.browse(cr, uid, ids): context['active_id'] = object.id instance_pool = self.pool.get('checklist.task.instance') instance_ids = instance_pool.search(cr, uid, [('checklist_task_id.checklist_id.model_id.model', '=', obj._name), ('res_id', '=', object.id)], context={'active_test': True}) # The following line exists because of a bug in ORM instance_ids = [inst['id'] for inst in instance_pool.read(cr, uid, instance_ids, ['active']) if inst['active']] instances = instance_pool.browse(cr, uid, instance_ids) total_progress_rate = total_progress_rate_mandatory = 100 total_progress_rates = {} for instance in instances: progress_rate = 100 if instance.checklist_task_id.field_ids: progress_rate -= (float(len(instance.field_ids_to_fill)) / float(len(instance.checklist_task_id.field_ids))) * 100 instance_pool.write(cr, uid, instance.id, {'progress_rate': progress_rate}) total_progress_rates.setdefault('total_progress_rate', 0.0) total_progress_rates.setdefault('instances_count', 0) total_progress_rates['total_progress_rate'] += progress_rate total_progress_rates['instances_count'] += 1 if instance.checklist_task_id.action_id and instance.progress_rate != progress_rate == 100: checklist_task = instance.checklist_task_id action = instance.checklist_task_id.action_id try: self.pool.get('ir.actions.server').run(cr, uid, [action.id], context) _logger.debug('Action: %s, User: %s, Resource: %s, Origin: checklist.task,%s' % (action.id, uid, object.id, checklist_task.id)) except Exception, e: stack = traceback.format_exc() self.pool.get('checklist.exception').create(cr, uid, {'checklist_task_id': checklist_task.id, 'exception_type': 'action', 'res_id': object.id, 'action_id': action.id, 'exception': e, 'stack': stack}) _logger.error('Action: %s, User: %s, Resource: %s, Origin: checklist.task,%s, Exception: %s' % (action.id, uid, object.id, checklist_task.id, tools.ustr(e))) company_id = self.pool.get('res.users').read(cr, uid, uid, ['company_id'])['company_id'][0] self.pool.get('checklist.exception').ChecklistExceptionEmail(cr, uid, company_id, 'run', checklist_task.id, object.id, action.id, tools.ustr(e), tools.ustr(stack)) continue if instance.checklist_task_id.checklist_id.active_field and instance.mandatory: total_progress_rates.setdefault('total_progress_rate_mandatory', 0.0) total_progress_rates['total_progress_rate_mandatory'] += progress_rate total_progress_rates.setdefault('instances_count_mandatory', 0) total_progress_rates['instances_count_mandatory'] += 1 if total_progress_rates.get('instances_count', False): total_progress_rate = total_progress_rates['total_progress_rate'] / total_progress_rates['instances_count'] fields = ['total_progress_rate'] values = ["%.2f" % total_progress_rate] if instances and instances[0].checklist_task_id.checklist_id.active_field: if total_progress_rates.get('instances_count_mandatory', False): total_progress_rate_mandatory = total_progress_rates['total_progress_rate_mandatory'] \ / total_progress_rates['instances_count_mandatory'] fields.append('total_progress_rate_mandatory') values.append("%.2f" % total_progress_rate_mandatory) if object.total_progress_rate_mandatory != total_progress_rate_mandatory == 100: fields.append('active') values.append("TRUE") cr.execute("UPDATE " + obj._table + " SET (" + ','.join(fields) + ") = %s WHERE id = %s", (tuple(values), object.id)) if instances and instances[0].checklist_task_id.checklist_id.action_id and object.total_progress_rate != total_progress_rate == 100: checklist = instances[0].checklist_task_id.checklist_id action = checklist.action_id try: self.pool.get('ir.actions.server').run(cr, uid, [action.id], context) _logger.debug('Action: %s, User: %s, Resource: %s, Origin: checklist,%s' % (action.id, uid, object.id, checklist.id)) except Exception, e: stack = traceback.format_exc() self.pool.get('checklist.exception').create(cr, uid, {'checklist_id': checklist.id, 'exception_type': 'action', 'res_id': object.id, 'action_id': action.id, 'exception': e, 'stack': stack}) _logger.error('Action: %s, User: %s, Resource: %s, Origin: checklist,%s, Exception: %s' % (action.id, uid, object.id, checklist.id, tools.ustr(e))) continue
def fields_to_check(self, cr, uid): fields = super(product_product, self).fields_to_check(cr, uid) if fields: fields.append( 'valuation1' ) fields.append( 'valuation2' ) return fields
def _get_invoice_line_key_cols(self, cr, uid, ids, context=None): # Modificada fields = super(account_invoice, self)._get_invoice_line_key_cols(cr, uid, ids, context) fields.append('vehicle_id') fields.append('employee_id') return fields
def get_fields(self, cr, uid, model, context=None, depth=FIELDS_RECURSION_LIMIT): """ Recursively get fields for the provided model (through fields_get) and filter them according to importability The output format is a list of ``Field``, with ``Field`` defined as: .. class:: Field .. attribute:: id (str) A non-unique identifier for the field, used to compute the span of the ``required`` attribute: if multiple ``required`` fields have the same id, only one of them is necessary. .. attribute:: name (str) The field's logical (OpenERP) name within the scope of its parent. .. attribute:: string (str) The field's human-readable name (``@string``) .. attribute:: required (bool) Whether the field is marked as required in the model. Clients must provide non-empty import values for all required fields or the import will error out. .. attribute:: fields (list(Field)) The current field's subfields. The database and external identifiers for m2o and m2m fields; a filtered and transformed fields_get for o2m fields (to a variable depth defined by ``depth``). Fields with no sub-fields will have an empty list of sub-fields. :param str model: name of the model to get fields form :param int landing: depth of recursion into o2m fields """ fields = [{"id": "id", "name": "id", "string": _("External ID"), "required": False, "fields": []}] fields_got = self.pool[model].fields_get(cr, uid, context=context) for name, field in fields_got.iteritems(): # an empty string means the field is deprecated, @deprecated must # be absent or False to mean not-deprecated if field.get("deprecated", False) is not False: continue if field.get("readonly"): states = field.get("states") if not states: continue # states = {state: [(attr, value), (attr2, value2)], state2:...} if not any( attr == "readonly" and value is False for attr, value in itertools.chain.from_iterable(states.itervalues()) ): continue f = { "id": name, "name": name, "string": field["string"], # Y U NO ALWAYS HAS REQUIRED "required": bool(field.get("required")), "fields": [], } if field["type"] in ("many2many", "many2one"): f["fields"] = [ dict(f, name="id", string=_("External ID")), dict(f, name=".id", string=_("Database ID")), ] elif field["type"] == "one2many" and depth: f["fields"] = self.get_fields(cr, uid, field["relation"], context=context, depth=depth - 1) if self.pool["res.users"].has_group(cr, uid, "base.group_no_one"): f["fields"].append( {"id": ".id", "name": ".id", "string": _("Database ID"), "required": False, "fields": []} ) fields.append(f) # TODO: cache on model? return fields
def generate_csv \ ( self, cr, uid , table_obj , search=[] , header=True , field_separator="," , decimal_point="." , quote='"' , line_separator="\n" ) : result = [] # header if header : fields = [] for k,v in table_obj._columns.items () : if k == "id" : continue # yes, this can happen! if v._type not in self._unprintable : fields.append(self._u2a(v.string, quote, field_separator, line_separator)) result.append(field_separator.join(fields)) if search : where = " where %s" % (" and ".join('"%s" %s %s' % (s[0], s[1], s[2]) for s in search)) else : where = "" sql = "select id from %s%s order by id;" % (table_obj._table, where) cr.execute(sql) res = cr.fetchall() for id in [x[0] for x in res] : obj = table_obj.browse(cr, uid, id) # reduce memory consumption! fields = [] for k,v in table_obj._columns.items () : if k == "id" : continue # yes, this can happen! if v._type not in self._unprintable : attr = getattr (obj, k) if attr is False and v._type != "boolean" and v.required : print _('No value for required attribute "%s" of table "%s" with id=%s.' % (k, table_obj._name, id) ) if v._type == "boolean" : fields.append(str(attr)) elif attr is False : fields.append("") elif v._type == "float" : fields.append(str(attr).replace(".", decimal_point)) elif v._type == "integer" : fields.append(str(attr)) elif v._type in ["one2one", "many2one"] : fields.append(self._u2a(attr.name, quote, field_separator, line_separator)) else : fields.append(self._u2a(attr, quote, field_separator, line_separator)) result.append(field_separator.join(fields)) return line_separator.join(result)
def get_fields(self, cr, uid, model, context=None, depth=FIELDS_RECURSION_LIMIT): """ Recursively get fields for the provided model (through fields_get) and filter them according to importability The output format is a list of ``Field``, with ``Field`` defined as: .. class:: Field .. attribute:: id (str) A non-unique identifier for the field, used to compute the span of the ``required`` attribute: if multiple ``required`` fields have the same id, only one of them is necessary. .. attribute:: name (str) The field's logical (Odoo) name within the scope of its parent. .. attribute:: string (str) The field's human-readable name (``@string``) .. attribute:: required (bool) Whether the field is marked as required in the model. Clients must provide non-empty import values for all required fields or the import will error out. .. attribute:: fields (list(Field)) The current field's subfields. The database and external identifiers for m2o and m2m fields; a filtered and transformed fields_get for o2m fields (to a variable depth defined by ``depth``). Fields with no sub-fields will have an empty list of sub-fields. :param str model: name of the model to get fields form :param int landing: depth of recursion into o2m fields """ model_obj = self.pool[model] fields = [{ 'id': 'id', 'name': 'id', 'string': _("External ID"), 'required': False, 'fields': [], }] fields_got = model_obj.fields_get(cr, uid, context=context) blacklist = orm.MAGIC_COLUMNS + [model_obj.CONCURRENCY_CHECK_FIELD] for name, field in fields_got.iteritems(): if name in blacklist: continue # an empty string means the field is deprecated, @deprecated must # be absent or False to mean not-deprecated if field.get('deprecated', False) is not False: continue if field.get('readonly'): states = field.get('states') if not states: continue # states = {state: [(attr, value), (attr2, value2)], state2:...} if not any(attr == 'readonly' and value is False for attr, value in itertools.chain.from_iterable( states.itervalues())): continue f = { 'id': name, 'name': name, 'string': field['string'], # Y U NO ALWAYS HAS REQUIRED 'required': bool(field.get('required')), 'fields': [], } if field['type'] in ('many2many', 'many2one'): f['fields'] = [ dict(f, name='id', string=_("External ID")), dict(f, name='.id', string=_("Database ID")), ] elif field['type'] == 'one2many' and depth: f['fields'] = self.get_fields( cr, uid, field['relation'], context=context, depth=depth-1) if self.user_has_groups(cr, uid, 'base.group_no_one'): f['fields'].append({'id' : '.id', 'name': '.id', 'string': _("Database ID"), 'required': False, 'fields': []}) fields.append(f) # TODO: cache on model? return fields