def get_products(self, cr, uid, pricelist_id, partner_id, args, fields, context=None): prod_model = self.pool.get('product.product') if context == None: context = {} context['lang'] = "es_ES" if not args: args = [] args += [("web_visible","=",True)] prod_ids = prod_model.search(cr, uid, args) fields.append("parent_prod_id") prods = prod_model.read(cr, uid, prod_ids, fields, context=context) for prod in prods: if prod["parent_prod_id"]: # Se obtiene el precio del producto padre prod_id = prod["parent_prod_id"][0] else: prod_id = prod["id"] product_price = self.get_pricelist(cr, uid, [prod_id], pricelist_id, partner_id)[prod_id][pricelist_id] prod["price"] = "{0:.2f}".format(product_price) return prods
def get_order(self, cr, uid, order_id, fields, context=None): if context == None: context = {} context['lang'] = "es_ES" order_model = self.pool.get('sale.order') line_model = self.pool.get('sale.order.line') product_model = self.pool.get('product.product') order = order_model.read(cr, uid, [order_id], fields)[0] order["partner_id"] = order_model.browse(cr, uid, [order_id])[0].partner_id.id lines = {} fields = ["product_uom_qty", "price_unit", "price_subtotal"] fields.append("product_id") if order["order_line"]: lines = line_model.read(cr, uid, order["order_line"], fields) for line in lines: product = product_model.read(cr, uid, line["product_id"][0], ["name", "image_small"], context=context) line["product_name"] = product["name"] line["product_image"] = product["image_small"] return {"order": order, "lines": lines}
def gather_columns_info(self, mclass): """Returns the information of all columns defined in the given class""" fields = [] if hasattr(mclass, '_columns'): columns = getattr(mclass, '_columns') defaults = getattr(mclass, '_defaults', []) for key in columns: field = { 'field_name': key, 'field_type': columns[key].__class__.__name__ } if key in defaults: field['default_value'] = defaults[key]\ if not inspect.isfunction(defaults[key])\ else defaults[key].__name__ field['default_value'] = str(field['default_value'])\ .replace('<', '') field['default_value'] = str(field['default_value'])\ .replace('>', '') field['default_value'] = str(field['default_value'])\ .replace('unbound method', '') if field['field_type'] in\ ['one2one', 'many2one', 'one2many', 'many2many']: field['reference_class'] = columns[key]._obj if field['field_type'] == 'many2many': field['reference_table'] = columns[key]._rel fields.append(field) return fields
def _sql_query_get(self, cr, uid, ids, prop, unknow_none, context=None, where_plus=[], limit=None, offset=None): """ Get sql query which return on sql_query field. @return: Dictionary of sql query. """ result = {} for obj in self.browse(cr, uid, ids, context=context): fields = [] groupby = [] i = 0 for f in obj.field_ids: # Allowing to use count(*) if not f.field_id.model and f.group_method == 'count': fields.insert(0, ('count(*) as column_count')) continue t = self.pool.get(f.field_id.model_id.model)._table if f.group_method == 'group': fields.append('\t' + t + '.' + f.field_id.name + ' as field' + str(i)) groupby.append(t + '.' + f.field_id.name) else: fields.append('\t' + f.group_method + '(' + t + '.' + f.field_id.name + ')' + ' as field' + str(i)) i += 1 models = self._path_get(cr, uid, obj.model_ids, obj.filter_ids) check = self._id_get(cr, uid, ids[0], context) if check <> False: fields.insert(0, (check + ' as id')) if models: result[obj.id] = """select %s from %s """ % (',\n'.join(fields), models) if groupby: result[obj.id] += "group by\n\t" + ', '.join(groupby) if where_plus: result[obj.id] += "\nhaving \n\t" + "\n\t and ".join( where_plus) if limit: result[obj.id] += " limit " + str(limit) if offset: result[obj.id] += " offset " + str(offset) else: result[obj.id] = False return result
def create_index(sself, cr, model_id): cr.execute("SELECT model FROM ir_model WHERE id=%s", (model_id, )) tuple = cr.fetchone() model_name = tuple[0] pool = pooler.get_pool(cr.dbname) table_name = pool.get(model_name)._table cr.execute( "SELECT f.name, p.name FROM fts_full_text_index i, fts_priority p, ir_model_fields f WHERE i.field_id=f.id AND i.priority=p.id AND f.model_id=%s", (model_id, )) tsVector = [] fields = [] for record in cr.fetchall(): name = record[0] weight = record[1] tsVector.append( "setweight( to_tsvector('default', COALESCE(%s::TEXT,'')), '%s' )" % (name, weight)) fields.append('%s|%s' % (str(name), weight)) tsVector = ' || '.join(tsVector) fields = ','.join(fields) if tsVector != "''": cr.execute( """ DELETE FROM fts_full_text_search WHERE model = (SELECT id FROM ir_model WHERE model=%s) """, (model_name, )) cr.commit() cr.execute(""" INSERT INTO fts_full_text_search(model,reference,message) SELECT model.id, tbl.id, %s FROM \"%s\" AS tbl, (SELECT id FROM ir_model WHERE model='%s') AS model """ % (tsVector, table_name, model_name)) cr.commit() cr.execute("SELECT id FROM ir_model WHERE model=%s", (model_name, )) cr.execute( "DROP TRIGGER IF EXISTS \"%s_fts_full_text_search\" ON \"%s\"" % (table_name, table_name)) cr.commit() cr.execute( "CREATE TRIGGER \"" + table_name + "_fts_full_text_search\" BEFORE INSERT OR UPDATE OR DELETE ON \"" + table_name + "\" FOR EACH ROW EXECUTE PROCEDURE fts_full_text_search_trigger(%s,%s)", (model_id, fields)) cr.commit()
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'): if not fields: fields = self.fields_get(cr, uid, context=context).keys() group_fields, fields = partition(is_reified_group, fields) if not 'groups_id' in fields: fields.append('groups_id') res = super(users_view, self).read(cr, uid, ids, fields, context=context, load=load) for values in (res if isinstance(res, list) else [res]): self._get_reified_groups(group_fields, values) return res
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context={}, toolbar=False): result = super(osv.osv, self).fields_view_get(cr, uid, view_id,view_type,context,toolbar=toolbar) if view_type=='tree' and context.get('journal_id',False): title = self.view_header_get(cr, uid, view_id, view_type, context) journal = self.pool.get('account.journal').browse(cr, uid, context['journal_id']) # if the journal view has a state field, color lines depending on # its value state = '' for field in journal.view_id.columns_id: if field.field=='state': state = ' colors="red:state==\'draft\'"' #xml = '''<?xml version="1.0"?>\n<tree string="%s" editable="top" refresh="5"%s>\n\t''' % (title, state) xml = '''<?xml version="1.0"?>\n<tree string="%s" editable="top" refresh="5" on_write="_on_create_write"%s>\n\t''' % (title, state) fields = [] widths = { 'ref': 50, 'statement_id': 50, 'state': 60, 'tax_code_id': 50, 'move_id': 40, } for field in journal.view_id.columns_id: fields.append(field.field) attrs = [] if field.field=='debit': attrs.append('sum="Total debit"') elif field.field=='credit': attrs.append('sum="Total credit"') elif field.field=='account_tax_id': attrs.append('domain="[(\'parent_id\',\'=\',False)]"') elif field.field=='account_id' and journal.id: attrs.append('domain="[(\'journal_id\', \'=\', '+str(journal.id)+'),(\'type\',\'<>\',\'view\'), (\'type\',\'<>\',\'closed\')]" on_change="onchange_account_id(account_id, partner_id)"') elif field.field == 'partner_id': attrs.append('on_change="onchange_partner_id(move_id,partner_id,account_id,debit,credit,date,((\'journal_id\' in context) and context[\'journal_id\']) or {})"') if field.readonly: attrs.append('readonly="1"') if field.required: attrs.append('required="1"') else: attrs.append('required="0"') if field.field in ('amount_currency','currency_id'): attrs.append('on_change="onchange_currency(account_id,amount_currency,currency_id,date,((\'journal_id\' in context) and context[\'journal_id\']) or {})"') if field.field in widths: attrs.append('width="'+str(widths[field.field])+'"') xml += '''<field name="%s" %s/>\n''' % (field.field,' '.join(attrs)) xml += '''</tree>''' result['arch'] = xml result['fields'] = self.fields_get(cr, uid, fields, context) return result
def _sql_query_get(self, cr, uid, ids, prop, unknow_none, context=None, where_plus=[], limit=None, offset=None): """ Get sql query which return on sql_query field. @return: Dictionary of sql query. """ result = {} for obj in self.browse(cr, uid, ids, context=context): fields = [] groupby = [] i = 0 for f in obj.field_ids: # Allowing to use count(*) if not f.field_id.model and f.group_method == 'count': fields.insert(0, ('count(*) as column_count')) continue t = self.pool.get(f.field_id.model_id.model)._table if f.group_method == 'group': fields.append('\t'+t+'.'+f.field_id.name+' as field'+str(i)) groupby.append(t+'.'+f.field_id.name) else: fields.append('\t'+f.group_method+'('+t+'.'+f.field_id.name+')'+' as field'+str(i)) i += 1 models = self._path_get(cr, uid, obj.model_ids, obj.filter_ids) check = self._id_get(cr, uid, ids[0], context) if check<>False: fields.insert(0, (check + ' as id')) if models: result[obj.id] = """select %s from %s """ % (',\n'.join(fields), models) if groupby: result[obj.id] += "group by\n\t"+', '.join(groupby) if where_plus: result[obj.id] += "\nhaving \n\t"+"\n\t and ".join(where_plus) if limit: result[obj.id] += " limit "+str(limit) if offset: result[obj.id] += " offset "+str(offset) else: result[obj.id] = False return result
def get_by_partner(self, cr, uid, partner_id, fields): order_model = self.pool.get('sale.order') stock_model = self.pool.get('stock.picking.out') args = [("partner_id","=",partner_id)] order_ids = order_model.search(cr, uid, args) orders = {} if order_ids: fields.append("picking_ids") orders = order_model.read(cr, uid, order_ids, fields) for order in orders: if order["picking_ids"]: sched_dates = stock_model.read(cr, uid, order["picking_ids"], ["min_date"]) sched_dates.sort(key=lambda x: datetime.strptime(x["min_date"], '%Y-%m-%d %H:%M:%S'), reverse=True) order["sched_date"] = sched_dates[0]["min_date"].partition(" ")[0] return orders
def read(self, cr, uid, ids, fields, context=None, load='_classic_read'): if not fields: group_fields, fields = [], self.fields_get(cr, uid, context).keys() else: group_fields, fields = partition(is_field_group, fields) if group_fields: group_obj = self.pool.get('res.groups') fields.append('groups_id') # read the normal fields (and 'groups_id') res = super(users_view, self).read(cr, uid, ids, fields, context, load) records = res if isinstance(res, list) else [res] for record in records: # get the field 'groups_id' and insert the group_fields groups = set(record['groups_id']) for f in group_fields: if is_boolean_group(f): record[f] = get_boolean_group(f) in groups elif is_boolean_groups(f): record[f] = not groups.isdisjoint(get_boolean_groups(f)) elif is_selection_groups(f): selected = groups.intersection(get_selection_groups(f)) record[f] = group_obj.get_maximal(cr, uid, selected, context) return res return super(users_view, self).read(cr, uid, ids, fields, context, load)
def export_file( self, cr,uid, id, model_ids, context ): if not id: return False format = self.browse( cr, uid, id, context ) objects = self.pool.get( format.model_id.model ).browse( cr, uid, model_ids, context ) logger = netsvc.Logger() header_line = [] lines=[] for object in objects: fields = [] headers = [] for field in format.field_ids: try: field_eval = eval( field.expression.replace( '$', 'object.' ) ) except: field_eval='' logger.notifyChannel( 'FILE FORMAT', netsvc.LOG_INFO, _( "The expression to export for the %s file is %s and it's val: %s" ) % ( format.name, field.expression, field_eval ) ) if ( isinstance( field_eval, int ) or isinstance( field_eval, float ) ): if field.format_number: field_eval = field.format_number%field_eval field_eval = str( field_eval ).replace( '.', unaccent( field.decimal_character ) or '' ) ffield = unaccent( field_eval ) # If the length of the field is 0, it's means that dosen't matter how many chars it take if field.length != 0: #If fill_char field not exists raise WARNING if field.fill_char: if field.align == 'right' : ffield = ffield.rjust( field.length, unaccent( field.fill_char ) ) else: ffield = ffield.ljust( field.length, unaccent( field.fill_char ) ) else: logger.notifyChannel( 'FILE FORMAT', netsvc.LOG_WARNING, _( "The field 'Fill Char' of the %s is required, because you have selected and specific length" ) % field.name ) return False ffield = ffield[ :field.length ] field_header = unaccent( field.name ) if format.quote: if format.quote == '"': ffield = ffield.replace( '"', "'" ) elif format.quote == "'": ffield = ffield.replace( "'", '"' ) ffield = format.quote + ffield + format.quote field_header = format.quote + field_header + format.quote fields.append( ffield ) headers.append( field_header ) separator = format.separator or '' lines.append( separator.join( fields ) ) if not header_line: header_line.append( separator.join( headers ) ) try: file_path = format.path + "/" + format.file_name # Control if we need the headers + if the path file dosen't exists and is a file. To add the headers or not if format.header and not os.path.isfile( file_path ): # Write the headers in the file file = open( file_path, 'w' ) for header in header_line: file.write( header + "\r\n" ) file.close() # Put the information in the file file = open( file_path, 'a+' ) for line in lines: file.write( line + "\r\n" ) file.close() logger.notifyChannel( 'FILE FORMAT', netsvc.LOG_INFO, _( "the file %s is write correctly" ) % format.file_name ) except: pass
def _compute_next_monthly_date(self, cr, uid, frequence_id): ''' Compute the next date when the frequence is a monthly frequence ''' if not isinstance(frequence_id, (int, long)): raise osv.except_osv(_('Error'), _('You should pass a integer to the _compute_next_weekly_date')) frequence = self.browse(cr, uid, frequence_id) if frequence.name != 'monthly': return False else: if frequence.monthly_one_day: day = self.get_datetime_day(frequence.monthly_choose_day) if frequence.last_run: from_date = strptime(frequence.last_run, '%Y-%m-%d') return max(today(), from_date + RelativeDate(months=+frequence.monthly_frequency, weekday=(day,frequence.monthly_choose_freq))) else: start_date = strptime(frequence.start_date, '%Y-%m-%d') if start_date < today(): start_date = today() next_date = start_date + RelativeDate(weekday=(day,frequence.monthly_choose_freq)) while next_date < start_date: next_date = next_date + RelativeDate(months=1, weekday=(day,frequence.monthly_choose_freq)) return next_date elif frequence.monthly_repeating_ok: days_ok = [] # Get all fields for choosen days fields = [] for col in self._columns: if re.match('^monthly_day[0-9]', col): fields.append(col) frequence_read = self.read(cr, uid, [frequence_id], fields)[0] for f in fields: if frequence_read[f]: days_ok.append(int(f[-2:])) days_ok.sort() if frequence.last_run: from_date = strptime(frequence.last_run, '%Y-%m-%d')+RelativeDateTime(days=1) force = True else: from_date = strptime(frequence.start_date, '%Y-%m-%d') if from_date < today(): from_date = today() force = False if from_date.day > days_ok[-1]: # switch to next month if force: from_date += RelativeDate(day=days_ok[0], months=frequence.monthly_frequency) return max(today(), from_date) else: from_date += RelativeDate(day=days_ok[0], months=1) return from_date days = filter(lambda a: a>=from_date.day , days_ok) from_date += RelativeDate(day=days[0]) if force: return max(today(), from_date) return from_date return False
def generate_csv \ ( self, cr, uid , table_obj , search=[] , header=True , field_separator="," , decimal_point="." , quote='"' , line_separator="\n" ) : result = [] # header if header : fields = [] for k,v in table_obj._columns.items () : if k == "id" : continue # yes, this can happen! if v._type not in self._unprintable : fields.append(self._u2a(v.string, quote, field_separator, line_separator)) result.append(field_separator.join(fields)) if search : where = " where %s" % (" and ".join('"%s" %s %s' % (s[0], s[1], s[2]) for s in search)) else : where = "" sql = "select id from %s%s order by id;" % (table_obj._table, where) cr.execute(sql) res = cr.fetchall() for id in [x[0] for x in res] : obj = table_obj.browse(cr, uid, id) # reduce memory consumption! fields = [] for k,v in table_obj._columns.items () : if k == "id" : continue # yes, this can happen! if v._type not in self._unprintable : attr = getattr (obj, k) if attr is False and v._type != "boolean" and v.required : print _('No value for required attribute "%s" of table "%s" with id=%s.' % (k, table_obj._name, id) ) if v._type == "boolean" : fields.append(str(attr)) elif attr is False : fields.append("") elif v._type == "float" : fields.append(str(attr).replace(".", decimal_point)) elif v._type == "integer" : fields.append(str(attr)) elif v._type in ["one2one", "many2one"] : fields.append(self._u2a(attr.name, quote, field_separator, line_separator)) else : fields.append(self._u2a(attr, quote, field_separator, line_separator)) result.append(field_separator.join(fields)) return line_separator.join(result)
def export_file(self, cr, uid, id, model_ids, context): if not id: return False format = self.browse(cr, uid, id, context) objects = self.pool.get(format.model_id.model).browse( cr, uid, model_ids, context) logger = netsvc.Logger() header_line = [] lines = [] for object in objects: fields = [] headers = [] for field in format.field_ids: try: field_eval = eval(field.expression.replace('$', 'object.')) except: field_eval = '' logger.notifyChannel( 'FILE FORMAT', netsvc.LOG_INFO, _("The expression to export for the %s file is %s and it's val: %s" ) % (format.name, field.expression, field_eval)) if (isinstance(field_eval, int) or isinstance(field_eval, float)): if field.format_number: field_eval = field.format_number % field_eval field_eval = str(field_eval).replace( '.', unaccent(field.decimal_character) or '') ffield = unaccent(field_eval) # If the length of the field is 0, it's means that dosen't matter how many chars it take if field.length != 0: #If fill_char field not exists raise WARNING if field.fill_char: if field.align == 'right': ffield = ffield.rjust(field.length, unaccent(field.fill_char)) else: ffield = ffield.ljust(field.length, unaccent(field.fill_char)) else: logger.notifyChannel( 'FILE FORMAT', netsvc.LOG_WARNING, _("The field 'Fill Char' of the %s is required, because you have selected and specific length" ) % field.name) return False ffield = ffield[:field.length] field_header = unaccent(field.name) if format.quote: if format.quote == '"': ffield = ffield.replace('"', "'") elif format.quote == "'": ffield = ffield.replace("'", '"') ffield = format.quote + ffield + format.quote field_header = format.quote + field_header + format.quote fields.append(ffield) headers.append(field_header) separator = format.separator or '' lines.append(separator.join(fields)) if not header_line: header_line.append(separator.join(headers)) try: file_path = format.path + "/" + format.file_name # Control if we need the headers + if the path file dosen't exists and is a file. To add the headers or not if format.header and not os.path.isfile(file_path): # Write the headers in the file file = open(file_path, 'w') for header in header_line: file.write(header + "\r\n") file.close() # Put the information in the file file = open(file_path, 'a+') for line in lines: file.write(line + "\r\n") file.close() logger.notifyChannel( 'FILE FORMAT', netsvc.LOG_INFO, _("the file %s is write correctly") % format.file_name) except: pass
def fields_to_check(self, cr, uid): fields = super(product_product, self).fields_to_check(cr, uid) if fields: fields.append( 'valuation1' ) fields.append( 'valuation2' ) return fields
def fields_to_check(self, cr, uid): fields = super(product_product, self).fields_to_check(cr, uid) if fields: fields.append('valuation1') fields.append('valuation2') return fields