def load(self, cr, uid, fields, data, context=None): #def import_data(self, cr, uid, fields, datas, mode='init', current_module='', noupdate=False, context=None, filename=None): try: payment_id_pos = fields.index('supplier_payment_expense_id') except: payment_id_pos = -1 fields_list_need=['amount','currency_id','exrate'] list_remove = set(fields).intersection(set(fields_list_need)) #Tra ve cac truong da co trong file import list_to_get=list(set(fields_list_need)-list_remove) list_field_to_import=fields list_field_to_import.extend(list_to_get) #raise osv.except_osv("E",data) payment_no_list =[] if payment_id_pos>=0: new_data={} for pos in range(len(data)): payment_no_list.append(str(data[pos][payment_id_pos])) new_data.update({str(data[pos][payment_id_pos]):data[pos]}) payment_nos =str(payment_no_list).replace("[","(").replace("]",")") #",".join(map(str,payment_no_list)) # raise osv.except_osv("KDVN Error",new_data) cr.execute("Select name from kderp_supplier_payment_expense krop where name in %s and state!='completed'" % (payment_nos)) if cr.rowcount>0: list1 =[] for pn in cr.fetchall(): list1.append(str(pn[0])) raise osv.except_osv("KDVN Error","State of payment must be BOD Approved !\n%s" % str(list1)) else: if list_to_get: list_field_str=str(list_to_get).replace("[","").replace("]","").replace("'","").replace('currency_id', "'''' || rc.name || ''''").replace('amount','total') cr.execute("Select krop.name,(%s) from kderp_supplier_payment_expense krop left join res_currency rc on currency_id=rc.id where krop.name in %s and state='completed'" % (list_field_str,payment_nos)) for key,value in cr.fetchall(): if type(value)==type(""): value=eval(value) new_data[key]=new_data[key]+value new_data2=[] for x in new_data: new_data2.append(new_data[x]) if new_data2: data=new_data2 return super(kderp_supplier_payment_expense_pay, self).load( cr, uid, list_field_to_import, data, context)
def load(self, cr, uid, fields, data, context=None): #def import_data(self, cr, uid, fields, datas, mode='init', current_module='', noupdate=False, context=None, filename=None): try: payment_id_pos = fields.index('import_line/payment_number') except: payment_id_pos = -1 data=list(set(data)) payment_no_list =[] payment_expense_no_list =[] if payment_id_pos>=0: for pos in range(len(data)): if data[pos][payment_id_pos].upper().find('IN')>=0: payment_no_list.append(str(data[pos][payment_id_pos])) elif data[pos][payment_id_pos].upper().find('EN')>=0: payment_expense_no_list.append(str(data[pos][payment_id_pos])) payment_nos =str(payment_no_list if payment_no_list else "['false']" ).replace("[","(").replace("]",")") payment_expense_nos=str(payment_expense_no_list if payment_expense_no_list else "['false']").replace("[","(").replace("]",")") # raise osv.except_osv("KDVN Error",new_data) cr.execute("""Select name from kderp_supplier_payment krop where name in %s and state!='completed' Union Select name from kderp_supplier_payment_expense krop where name in %s and state!='completed'""" % (payment_nos ,payment_expense_nos)) if cr.rowcount>0: list1 =[] for pn in cr.fetchall(): list1.append(str(pn[0])) raise osv.except_osv("KDVN Error","State of payment must be BOD Approved !\n%s" % str(list1)) return super(kderp_import_payment, self).load( cr, uid, fields, data, context)
def load(self, cr, uid, fields, data, context=None): print "--------------------------------------------------------" print "load(): Commands BEFORE original load method is called" print "--------------------------------------------------------" print "load() uid: %s" % uid print "load() fields: %s" % fields print "load() data: %s" % data print "load() context: %s" % context # TODO use export_data() to make a full backup of all products and post it as a message to a admin newsgroup # Add the ID field if not already present try: id_index = fields.index('id') except: # Add id to the end of fields list fields.append('id') # Add an empty string to the end of the tuples inside the data list data = [record + (u'',) for record in data] # prepare the index variable - this should also be available outside the try statement ?!? id_index = fields.index('id') print "load() fields after id field added: %s" % fields print "load() data after id field added: %s" % data # Make sure if there is a CSB-Nummer it is correctly used for the id field (external id) try: openat_csb_nummer_index = fields.index('openat_csb_nummer') except: print "load() no CSB Number found - But that's probably ok!" else: # Transfer the csb-nummer to the id field - don't change any other id field for x in range(0, len(data)): record = list(data[x]) if record[openat_csb_nummer_index]: record[id_index] = u'__export__.' + record[openat_csb_nummer_index] data[x] = tuple(record) print "load() data after csb number transfered: %s" % str(data) print "load() record: %s" % record # Check if the state fields exists and if not add it and set the right state try: state_index = fields.index('state') except: print "load() no State field found - But that's probably ok!" # Add state field to the end of fields list fields.append('state') # Add an empty string to the end of the tuples inside the data list data = [record + (u'',) for record in data] # prepare the index variable. state_index = fields.index('state') print "load() fields after state field added: %s" % fields print "load() data after state field added: %s" % data # Add the correct state for x in range(0, len(data)): record = list(data[x]) # Todo: Get all available Translation terms for product.template.state:ppapproved #source = self.pool.get('ir.translation')._get_source #print "load() self.pool.get('ir.translation')._get_source: %s" % source # End Todo if record[state_index] != u'ppapproved': try: # If no CSB Number Field is present at all openat_csb_nummer_index will be undefined # therefore we have to use try if record[openat_csb_nummer_index]: record[state_index] = u'pptocheck' except: record[state_index] = u'ppnew' data[x] = tuple(record) print "load() data after state corrected: %s" % str(data) # Add the Import User name # ToDo find a way for the "First Import" user user = self.pool.get('res.users').browse(cr, uid, uid) print "load() user.name: %s " % user.name fields.append('user_update') data = [record + (unicode(user.name),) for record in data] # ToDo Check if a workflow is used instead of manually doing it - if the worklfow is also respected on import return super(product_product, self).load(cr, uid, fields, data, context=context)
def tmp_xls_import(cr, inst, source, fields, poss, sheet_line_poss, model_nm): #def tmp_xls_import(cr, inst, source, fields, poss, sheet_line_poss): pool = pooler.get_pool(cr.dbname) logger = logging.getLogger('imp') product_pool = pool.get(model_nm) # product_pool = pool.get('product.product') uid = 1 datas = [] nullify_ids_list = [] supplier = inst.name exist_ids_list = product_pool.search(cr, uid, [('supplier_id', '=', supplier.id)]) updated_ids_list = [] config_ids = pool.get('imp.config').search(cr, 1, []) config = pool.get('imp.config').browse(cr, 1, config_ids)[0] fields_types = eval(config.import_fields_types) sheet = source.sheets()[sheet_line_poss[0]] fields += ['supplier_id.id'] ## fields += ['supplier.id'] #raise osv.except_osv(_('Warning'), str(fields)) # # category = inst.category_id # # if category: # # fields += ['categ_id.id'] # hardcode by Sasha # required fields for non-etalon catalog # yustas qty_pos = fields.index('quantity') price_pos = fields.index('price') defcode_pos = fields.index('default_code') name_pos = fields.index('name') # qty_pos = fields.index('imp_qty') # price_pos = fields.index('standard_price') # defcode_pos = fields.index('default_code') # name_pos = fields.index('name') new_prod_count = 0 exist_prod_count = 0 error_rows_list = [] for row in xrange(sheet_line_poss[1], sheet.nrows): vals = [] itr = 0 try: for col in poss: cell_value = sheet.cell(row, col).value cell_type = sheet.cell(row, col).ctype fld = fields[itr] if cell_type == 2: if fields_types[fld] == 'char': if isinstance(cell_value, float): if modf(cell_value)[0]: cell_value = str(cell_value) else: cell_value = str(int(cell_value)) else: if fields_types[fld] == 'float': try: if cell_value: cell_value = float(cell_value) else: cell_value = 0.0 except: if isinstance(cell_value, str) or isinstance( cell_value, unicode): cell_value = cell_value.replace(',', '.') for sign in ['<', '>']: if sign in cell_value: cell_value = cell_value.replace( sign, '') cell_value = float(cell_value) if fields_types[fld] == 'int': if cell_value: cell_value = int(cell_value) else: cell_value = 0 vals.append(cell_value) itr += 1 except: #vals = [] error_rows_list.append(row + 1) logger.error("Cannot import the line #%s", row + 1) if any(vals): exist_id = product_pool.search( cr, uid, [('default_code', '=', vals[defcode_pos]), ('supplier_id', '=', supplier.id)]) # exist_id = product_pool.search(cr, uid, [('name','=',vals[defcode_pos]),('supplier','=',supplier.id)]) # exist_id = product_pool.search(cr, uid, [('default_code','=',vals[defcode_pos]),('supplier','=',supplier.id)]) if not exist_id: vals += [supplier] ## if category: ## vals += [category] datas.append(vals) new_prod_count += 1 else: updated_ids_list.append(exist_id[0]) exist = product_pool.browse(cr, uid, exist_id[0]) product_rewrite = product_pool.write( cr, uid, exist.id, { 'quantity': vals[qty_pos], #'imp_qty':vals[qty_pos], 'price': vals[price_pos] # 'standard_price':vals[price_pos] }) if product_rewrite: logger.warning("\nProduct with ID = %s is rewrote with values:\nQTY = %s\nPRICE = %s" \ % (exist_id,vals[qty_pos],vals[price_pos])) # if exist.price_ref: # part_inf_rewrite = pool.get('pricelist.partnerinfo').write(cr, uid, exist.price_ref.id, # {'sup_quantity':vals[qty_pos],'price':vals[price_pos]}) # #{'min_quantity':vals[qty_pos],'price':vals[price_pos]}) # #yustas # if part_inf_rewrite:logger.warning("\nPartber info with ID = %s is rewrote with values:\nQTY = %s\nPRICE = %s" \ # % (exist.price_ref,vals[qty_pos],vals[price_pos])) exist_prod_count += 1 print unicode(exist_ids_list) print unicode(updated_ids_list) nullify_ids_list = list(set(exist_ids_list) - set(updated_ids_list)) product_pool.write(cr, uid, nullify_ids_list, { 'quantity': 0, }) report = u'* %s : %d %s:' % (time.strftime('%d.%m.%y %H:%M:%S'), new_prod_count + exist_prod_count, ('records imported')) # report = u'* %s : %d %s:' % (time.strftime('%d.%m.%y %H:%M:%S'),new_prod_count+exist_prod_count,_('records imported')) report += u'\n\t- %d %s' % (new_prod_count, ('records created')) #report += u'\n\t- %d %s' % (new_prod_count,_('records created')) report += u'\n\t- %d %s' % (exist_prod_count, ('records updated')) #report += u'\n\t- %d %s' % (exist_prod_count,_('records updated')) report += u'\n\t- %d %s' % (len(nullify_ids_list), ('records nullified')) if error_rows_list: report += u'\n\t- %s: %s' % ( ('could not import records on rows'), str(error_rows_list)[1:-1]) #report += u'\n\t- %s: %s' % (_('could not import records on rows'),str(error_rows_list)[1:-1]) if inst.etalon_catalog: fields += ['if_etalon'] for l in datas: l.append('True') return product_pool.import_data(cr, uid, fields, datas), report
def anonymize_database(self, cr, uid, ids, context=None): """Sets the 'anonymized' state to defined fields""" # create a new history record: anonymization_history_model = self.pool.get( 'ir.model.fields.anonymization.history') vals = { 'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'state': 'started', 'direction': 'clear -> anonymized', } history_id = anonymization_history_model.create(cr, uid, vals) # check that all the defined fields are in the 'clear' state state = self.pool.get( 'ir.model.fields.anonymization')._get_global_state(cr, uid, context=context) if state == 'anonymized': self._raise_after_history_update( cr, uid, history_id, _('Error !'), _("The database is currently anonymized, you cannot anonymize it again." )) elif state == 'unstable': msg = _("The database anonymization is currently in an unstable state. Some fields are anonymized," + \ " while some fields are not anonymized. You should try to solve this problem before trying to do anything.") self._raise_after_history_update(cr, uid, history_id, 'Error !', msg) _logger.info('Ready to run the anonymization.') # do the anonymization: dirpath = os.environ.get('HOME') or os.getcwd() rel_filepath = 'field_anonymization_%s_%s.pickle' % (cr.dbname, history_id) abs_filepath = os.path.abspath(os.path.join(dirpath, rel_filepath)) ir_model_fields_anonymization_model = self.pool.get( 'ir.model.fields.anonymization') field_ids = ir_model_fields_anonymization_model.search( cr, uid, [('state', '<>', 'not_existing')], context=context) fields = ir_model_fields_anonymization_model.browse(cr, uid, field_ids, context=context) if not fields: msg = "No fields are going to be anonymized." self._raise_after_history_update(cr, uid, history_id, 'Error !', msg) data = [] for field in fields: _logger.info('Running anonymization: %s/%s', fields.index(field), len(fields)) model_name = field.model_id.model field_name = field.field_id.name field_type = field.field_id.ttype table_name = self.pool.get(model_name)._table # get the current value sql = "select id, %s from %s" % (field_name, table_name) cr.execute(sql) records = cr.dictfetchall() for record in records: data.append({ "model_id": model_name, "field_id": field_name, "id": record['id'], "value": record[field_name] }) # anonymize the value: anonymized_value = None sid = str(record['id']) if field_type == 'char': anonymized_value = 'xxx' + sid elif field_type == 'selection': anonymized_value = 'xxx' + sid elif field_type == 'text': anonymized_value = 'xxx' + sid elif field_type == 'boolean': anonymized_value = random.choice([True, False]) elif field_type == 'date': anonymized_value = '2011-11-11' elif field_type == 'datetime': anonymized_value = '2011-11-11 11:11:11' elif field_type == 'float': anonymized_value = 0.0 elif field_type == 'integer': anonymized_value = 0 elif field_type in [ 'binary', 'many2many', 'many2one', 'one2many', 'reference' ]: # cannot anonymize these kind of fields msg = _( "Cannot anonymize fields of these types: binary, many2many, many2one, one2many, reference." ) self._raise_after_history_update(cr, uid, history_id, 'Error !', msg) if anonymized_value is None: self._raise_after_history_update( cr, uid, history_id, _('Error !'), _("Anonymized value is None. This cannot happens.")) sql = "update %(table)s set %(field)s = %%(anonymized_value)s where id = %%(id)s" % { 'table': table_name, 'field': field_name, } cr.execute(sql, { 'anonymized_value': anonymized_value, 'id': record['id'] }) # save pickle: _logger.info('Saving to pickle file.') fn = open(abs_filepath, 'w') pickle.dump(data, fn, pickle.HIGHEST_PROTOCOL) # update the anonymization fields: values = { 'state': 'anonymized', } ir_model_fields_anonymization_model.write(cr, uid, field_ids, values, context=context) # add a result message in the wizard: msgs = [ "Anonymization successful.", "", "Donot forget to save the resulting file to a safe place because you will not be able to revert the anonymization without this file.", "", "This file is also stored in the %s directory. The absolute file path is: %s.", ] msg = '\n'.join(msgs) % (dirpath, abs_filepath) fn = open(abs_filepath, 'r') self.write( cr, uid, ids, { 'msg': msg, # 'file_export': base64.encodestring(fn.read()), }) fn.close() # update the history record: anonymization_history_model.write( cr, uid, history_id, { 'field_ids': [[6, 0, field_ids]], 'msg': msg, 'filepath': abs_filepath, 'state': 'done', }) # handle the view: view_id = self._id_get(cr, uid, 'ir.ui.view', 'view_ir_model_fields_anonymize_wizard_form', 'anonymization') return { 'res_id': ids[0], 'view_id': [view_id], 'view_type': 'form', "view_mode": 'form', 'res_model': 'ir.model.fields.anonymize.wizard', 'type': 'ir.actions.act_window', 'context': { 'step': 'just_anonymized' }, 'target': 'new', }
def load(self, cr, uid, fields, data, context=None): print "--------------------------------------------------------" print "load(): Commands BEFORE original load method is called" print "--------------------------------------------------------" print "load() uid: %s" % uid print "load() fields: %s" % fields print "load() data: %s" % data print "load() context: %s" % context # TODO use export_data() to make a full backup of all products and post it as a message to a admin newsgroup # Add the ID field if not already present try: id_index = fields.index('id') except: # Add id to the end of fields list fields.append('id') # Add an empty string to the end of the tuples inside the data list data = [record + (u'', ) for record in data] # prepare the index variable - this should also be available outside the try statement ?!? id_index = fields.index('id') print "load() fields after id field added: %s" % fields print "load() data after id field added: %s" % data # Make sure if there is a CSB-Nummer it is correctly used for the id field (external id) try: openat_csb_nummer_index = fields.index('openat_csb_nummer') except: print "load() no CSB Number found - But that's probably ok!" else: # Transfer the csb-nummer to the id field - don't change any other id field for x in range(0, len(data)): record = list(data[x]) if record[openat_csb_nummer_index]: record[id_index] = u'__export__.' + record[ openat_csb_nummer_index] data[x] = tuple(record) print "load() data after csb number transfered: %s" % str( data) print "load() record: %s" % record # Check if the state fields exists and if not add it and set the right state try: state_index = fields.index('state') except: print "load() no State field found - But that's probably ok!" # Add state field to the end of fields list fields.append('state') # Add an empty string to the end of the tuples inside the data list data = [record + (u'', ) for record in data] # prepare the index variable. state_index = fields.index('state') print "load() fields after state field added: %s" % fields print "load() data after state field added: %s" % data # Add the correct state for x in range(0, len(data)): record = list(data[x]) # Todo: Get all available Translation terms for product.template.state:ppapproved #source = self.pool.get('ir.translation')._get_source #print "load() self.pool.get('ir.translation')._get_source: %s" % source # End Todo if record[state_index] != u'ppapproved': try: # If no CSB Number Field is present at all openat_csb_nummer_index will be undefined # therefore we have to use try if record[openat_csb_nummer_index]: record[state_index] = u'pptocheck' except: record[state_index] = u'ppnew' data[x] = tuple(record) print "load() data after state corrected: %s" % str(data) # Add the Import User name # ToDo find a way for the "First Import" user user = self.pool.get('res.users').browse(cr, uid, uid) print "load() user.name: %s " % user.name fields.append('user_update') data = [record + (unicode(user.name), ) for record in data] # ToDo Check if a workflow is used instead of manually doing it - if the worklfow is also respected on import return super(product_product, self).load(cr, uid, fields, data, context=context)
def tmp_xls_import(cr, inst, source, fields, poss, sheet_line_poss, model_nm): #def tmp_xls_import(cr, inst, source, fields, poss, sheet_line_poss): pool = pooler.get_pool(cr.dbname) logger = logging.getLogger('imp') product_pool = pool.get(model_nm) # product_pool = pool.get('product.product') uid = 1 datas = [] nullify_ids_list = [] supplier = inst.name exist_ids_list = product_pool.search(cr,uid,[('supplier_id','=', supplier.id)]) updated_ids_list = [] config_ids = pool.get('imp.config').search(cr,1,[]) config = pool.get('imp.config').browse(cr,1,config_ids)[0] fields_types = eval(config.import_fields_types) sheet = source.sheets()[sheet_line_poss[0]] fields += ['supplier_id.id'] ## fields += ['supplier.id'] #raise osv.except_osv(_('Warning'), str(fields)) # # category = inst.category_id # # if category: # # fields += ['categ_id.id'] # hardcode by Sasha # required fields for non-etalon catalog # yustas qty_pos = fields.index('quantity') price_pos = fields.index('price') defcode_pos = fields.index('default_code') name_pos = fields.index('name') # qty_pos = fields.index('imp_qty') # price_pos = fields.index('standard_price') # defcode_pos = fields.index('default_code') # name_pos = fields.index('name') new_prod_count = 0 exist_prod_count = 0 error_rows_list = [] for row in xrange(sheet_line_poss[1],sheet.nrows): vals = [] itr = 0 try: for col in poss: cell_value = sheet.cell(row,col).value cell_type = sheet.cell(row,col).ctype fld = fields[itr] if cell_type == 2: if fields_types[fld]=='char': if isinstance(cell_value,float): if modf(cell_value)[0]: cell_value = str(cell_value) else: cell_value = str(int(cell_value)) else: if fields_types[fld]=='float': try: if cell_value: cell_value = float(cell_value) else: cell_value = 0.0 except: if isinstance (cell_value,str) or isinstance (cell_value,unicode): cell_value = cell_value.replace(',','.') for sign in ['<','>']: if sign in cell_value: cell_value = cell_value.replace(sign, '') cell_value = float(cell_value) if fields_types[fld]=='int': if cell_value: cell_value = int(cell_value) else: cell_value = 0 vals.append(cell_value) itr += 1 except: #vals = [] error_rows_list.append(row+1) logger.error("Cannot import the line #%s", row+1) if any(vals): exist_id = product_pool.search(cr, uid, [('default_code','=',vals[defcode_pos]),('supplier_id','=',supplier.id)]) # exist_id = product_pool.search(cr, uid, [('name','=',vals[defcode_pos]),('supplier','=',supplier.id)]) # exist_id = product_pool.search(cr, uid, [('default_code','=',vals[defcode_pos]),('supplier','=',supplier.id)]) if not exist_id: vals += [supplier] ## if category: ## vals += [category] datas.append(vals) new_prod_count += 1 else: updated_ids_list.append(exist_id[0]) exist = product_pool.browse(cr, uid, exist_id[0]) product_rewrite = product_pool.write(cr, uid, exist.id, {'quantity':vals[qty_pos], #'imp_qty':vals[qty_pos], 'price':vals[price_pos] # 'standard_price':vals[price_pos] }) if product_rewrite: logger.warning("\nProduct with ID = %s is rewrote with values:\nQTY = %s\nPRICE = %s" \ % (exist_id,vals[qty_pos],vals[price_pos])) # if exist.price_ref: # part_inf_rewrite = pool.get('pricelist.partnerinfo').write(cr, uid, exist.price_ref.id, # {'sup_quantity':vals[qty_pos],'price':vals[price_pos]}) # #{'min_quantity':vals[qty_pos],'price':vals[price_pos]}) # #yustas # if part_inf_rewrite:logger.warning("\nPartber info with ID = %s is rewrote with values:\nQTY = %s\nPRICE = %s" \ # % (exist.price_ref,vals[qty_pos],vals[price_pos])) exist_prod_count += 1 print unicode(exist_ids_list) print unicode(updated_ids_list) nullify_ids_list = list(set(exist_ids_list) - set(updated_ids_list)) product_pool.write(cr, uid, nullify_ids_list, {'quantity': 0, }) report = u'* %s : %d %s:' % (time.strftime('%d.%m.%y %H:%M:%S'),new_prod_count+exist_prod_count,('records imported')) # report = u'* %s : %d %s:' % (time.strftime('%d.%m.%y %H:%M:%S'),new_prod_count+exist_prod_count,_('records imported')) report += u'\n\t- %d %s' % (new_prod_count,('records created')) #report += u'\n\t- %d %s' % (new_prod_count,_('records created')) report += u'\n\t- %d %s' % (exist_prod_count,('records updated')) #report += u'\n\t- %d %s' % (exist_prod_count,_('records updated')) report += u'\n\t- %d %s' % (len(nullify_ids_list),('records nullified')) if error_rows_list: report += u'\n\t- %s: %s' % (('could not import records on rows'),str(error_rows_list)[1:-1]) #report += u'\n\t- %s: %s' % (_('could not import records on rows'),str(error_rows_list)[1:-1]) if inst.etalon_catalog: fields += ['if_etalon'] for l in datas: l.append('True') return product_pool.import_data(cr, uid, fields, datas), report