else: modelPosition[model['field']] = '' modelPosition1.append('') # pprint(modelColumns) filenameExtension = fileName.split('.') if len(filenameExtension) < 2: filenameExtension.append('txt') if filenameExtension[1] in ['csv', 'xlsx']: if (filenameExtension[1] == 'csv'): inputDataRaw = excel.getDataCSV( file_path=importLogInfo['file_path'], dtype=object, sep=sep, header=0, skiprows=[1], names=modelColumns, na_values='') else: inputDataRaw = excel.getDataExcel( file_path=importLogInfo['file_path'], active_sheet='Sheet1', skiprows=[1], header=0, names=modelColumns, na_values='') inputData = inputDataRaw.to_dict('records') for idx, row in enumerate(inputData):
subtype = json.loads(model['sub_type']) if 'format' in subtype.keys(): modelFormat[model['field']] = subtype['format'] else: modelFormat[model['field']] = '' if 'update_key' in subtype.keys() and subtype['update_key'] == 1: updateKey.append(model['field']) if 'check_null_key' in subtype.keys(): checkNullKey.append(model['field']) filenameExtension = ftpInfo['filename'].split('.') if(filenameExtension[1] == 'csv'): inputDataRaw = excel.getDataCSV(file_path=importLogInfo['file_path'], header=None, names=None, encoding='ISO-8859-1') else: inputDataRaw = excel.getDataExcel(file_path=importLogInfo['file_path'], header=None, names=None, na_values='', encoding='ISO-8859-1') inputData = inputDataRaw.to_dict('records') insertData = [] updateDate = [] errorData = [] temp = {} countList = 0 for idx, row in enumerate(inputData): temp = {} if row[2] is not None and row[2] is not '': for key,cell in enumerate(modelColumns): try:
models = _mongodb.get(MONGO_COLLECTION='Model', WHERE={'collection': collection}, SORT=[('index', 1)], SELECT=['index', 'collection', 'field', 'type', 'sub_type'], TAKE=1000) for model in models: modelColumns.append(model['field']) modelConverters[model['field']] = model['type'] subtype = json.loads(model['sub_type']) if 'format' in subtype.keys(): modelFormat[model['field']] = subtype['format'] else: modelFormat[model['field']] = '' filenameExtension = fileName.split('.') if(filenameExtension[1] == 'csv'): inputDataRaw = excel.getDataCSV(file_path=importLogInfo['file_path'], sep=';', header=0, names=modelColumns) else: inputDataRaw = excel.getDataExcel(file_path=importLogInfo['file_path'], active_sheet='Sheet1', header=0, names=modelColumns, na_values='') inputData = inputDataRaw.to_dict('records') insertData = [] updateDate = [] errorData = [] temp = {} countList = 0 for idx, row in enumerate(inputData): temp = {} if row['contract_no'] not in ['', None]: for cell in row:
collection = common.getSubUser(subUserType, 'Sibs') importLogId = sys.argv[1] sibsColumns = [] sibsConverters = {} try: importLogInfo = mongodb.getOne(MONGO_COLLECTION=common.getSubUser(subUserType, 'Import'), WHERE={'_id': ObjectId(importLogId)}) modelsSibs = _mongodb.get(MONGO_COLLECTION='Model', WHERE={'collection': collection}, SORT=[('index', 1)], SELECT=['index', 'collection', 'field', 'type']) for model in modelsSibs: sibsColumns.append(model['field']) if(model['type'] == 'string'): sibsConverters[model['field']] = str zaccfs = excel.getDataCSV(file_path=importLogInfo['file_path'], header=None, names=sibsColumns, usecols=[5, 6, 7, 116, 122], converters=sibsConverters) zaccfList = zaccfs.to_dict('records') insertData = [] updateData = [] errorData = [] temp = {} countList = 0 for idx, zaccf in enumerate(zaccfList): if zaccf['account_no'] not in (None, '') and zaccf['cif'] not in (None, '') and zaccf['cus_name'] not in (None, ''): result = True checkSibs = mongodb.getOne(MONGO_COLLECTION=collection, WHERE={'account_no': zaccf['account_no']}, SELECT=['account_no']) zaccf['import_id'] = importLogId try: zaccf['advance'] = float(zaccf['advance'])
else: modelPosition[model['field']] = '' modelPosition1.append('') filenameExtension = fileName.split('.') if len(filenameExtension) < 2: filenameExtension.append('txt') if filenameExtension[1] in ['csv', 'xlsx']: if (filenameExtension[1] == 'csv'): inputDataRaw = excel.getDataCSV( file_path=importLogInfo['file_path'], header=0, dtype=object, sep=sep, names=modelColumns, na_values='', encoding='utf-16', quotechar='"') else: inputDataRaw = excel.getDataExcel( file_path=importLogInfo['file_path'], header=0, names=modelColumns, na_values='', encoding='utf-16', quotechar='"') inputData = inputDataRaw.to_dict('records')
updateKey.append(model['field']) if 'check_null_key' in subtype.keys(): checkNullKey.append(model['field']) filenameExtension = ftpInfo['filename'].split('.') if ftpInfo['header'] == 'None': header = None else: header = [int(x) for x in ftpInfo['header']] if (filenameExtension[1] == 'csv'): inputDataRaw = excel.getDataCSV(file_path=importLogInfo['file_path'], sep=',', header=header, names=modelColumns, encoding='ISO-8859-1', low_memory=False) else: inputDataRaw = excel.getDataExcel(file_path=importLogInfo['file_path'], header=header, names=modelColumns, na_values='', encoding='ISO-8859-1') inputData = inputDataRaw.to_dict('records') insertData = [] updateDate = [] errorData = []
SELECT=['extension', 'agentname'], SORT=([('id', 1)])) users = list(users) arr = {} random = {} for user in users: arr[user['extension']] = 0 random[user['extension']] = 0 # fileName = importLogInfo['file_name'] # filenameExtension = fileName.split('.') dataLibrary = excel.getDataCSV(file_path=importLogInfo['file_path'], header=0, names=None, index_col=None, usecols=None, dtype=object, converters=None, skiprows=None, na_values=None, encoding='ISO-8859-1') # if(filenameExtension[1] == 'csv'): # dataLibrary = excel.getDataCSV(file_path=importLogInfo['file_path'], dtype=object, sep='\t', lineterminator='\r', header=None, names=None, na_values='') # else: # dataLibrary = excel.getDataExcel(file_path=importLogInfo['file_path'], header=0, names=None, na_values='') listDataLibrary = dataLibrary.values for key, listCol in enumerate(listDataLibrary): temp = {} checkErr = False header_index = 0 for idx, header in enumerate(headers):
modelConverters[model['field']] = model['type'] subtype = json.loads(model['sub_type']) if 'format' in subtype.keys(): modelFormat[model['field']] = subtype['format'] else: modelFormat[model['field']] = '' filenameExtension = ftpInfo['filename'].split('.') if ftpInfo['header'] == 'None': header = None else: header = [ int(x) for x in ftpInfo['header'] ] if(filenameExtension[1] == 'csv'): inputDataRaw = excel.getDataCSV(file_path=importLogInfo['file_path'], dtype='object', sep=ftpInfo['sep'], header=header, names=modelColumns) else: inputDataRaw = excel.getDataExcel(file_path=importLogInfo['file_path'], dtype='object', active_sheet=ftpInfo['sheet'], header=header, names=modelColumns, na_values='') inputData = inputDataRaw.to_dict('records') insertData = [] updateDate = [] errorData = [] temp = {} countList = 0 for idx, row in enumerate(inputData): temp = {} if row['cif'] not in ['', None] and row['acc'] not in ['', None]: for cell in row:
else: modelPosition[model['field']] = '' modelPosition1.append('') filenameExtension = fileName.split('.') if len(filenameExtension) < 2: filenameExtension.append('txt') mongodb.remove_document(MONGO_COLLECTION=collection) if filenameExtension[1] in ['csv', 'xlsx']: if (filenameExtension[1] == 'csv'): inputDataRaw = excel.getDataCSV( file_path=importLogInfo['file_path'], dtype=object, header=None, names=modelColumns, usecols=[5, 6, 7, 116, 122]) else: inputDataRaw = excel.getDataExcel( file_path=importLogInfo['file_path'], header=None, names=modelColumns, na_values='') inputData = inputDataRaw.to_dict('records') for idx, row in enumerate(inputData): temp = {} result = True if row['account_no'] not in (None, '') and row['cif'] not in ( None, '') and row['cus_name'] not in (None, ''): for cell in row:
_mongodb = Mongodb("_worldfone4xs") excel = Excel() config = Config() ftp = Ftp() common = Common() base_url = config.base_url() now = datetime.now() subUserType = 'LO' collection = common.getSubUser(subUserType, 'SBV') url = '/var/www/html/worldfone4xs_ibm/cronjob/python/Loan/ZACCF_header.xlsx' filename = 'ZACCF_header.xlsx' filenameExtension = filename.split('.') if (filenameExtension[1] == 'csv'): inputDataRaw = excel.getDataCSV(file_path=url, sep=',', header=0, low_memory=False) else: inputDataRaw = excel.getDataExcel(file_path=url, header=0) inputData = inputDataRaw.to_dict('records') importData = [] for key in inputData[0].keys(): temp = {} temp['index'] = inputData[0][key] temp['collection'] = 'LO_ZACCF' temp['field'] = key temp['title'] = inputData[1][key] temp['type'] = 'string' temp['sub_type'] = '{"column": ' + '"' + inputData[2][key] + '"' + '}'
insertData = [] resultData = [] errorData = [] headers = _mongodb.get(MONGO_COLLECTION='Model', WHERE={"collection": collection}, SELECT=['index', 'field', 'type'], SORT=([('index', 1)])) headers = list(headers) file_path = importLogInfo['file_path'] dataLibrary = excel.getDataCSV(file_path=file_path, header=0, names=None, index_col=None, usecols=None, dtype=object, converters=None, skiprows=None, na_values=None, encoding='latin-1') listDataLibrary = dataLibrary.values for key, listCol in enumerate(listDataLibrary): temp = {} checkErr = False for idx, header in enumerate(headers): err = {} if header['index'] == 27: pprint(header) continue if str(listDataLibrary[key][idx]) == 'nan':
for model in models: modelColumns.append(model['field']) modelConverters[model['field']] = model['type'] subtype = json.loads(model['sub_type']) if 'format' in subtype.keys(): modelFormat[model['field']] = subtype['format'] else: modelFormat[model['field']] = '' filenameExtension = fileName.split('.') if (filenameExtension[1] == 'csv'): inputDataRaw = excel.getDataCSV(file_path=importLogInfo['file_path'], dtype=object, sep=";", header=1, names=modelColumns) else: inputDataRaw = excel.getDataExcel(file_path=importLogInfo['file_path'], dtype=object, active_sheet="Sheet1", header=1, names=modelColumns, na_values='') inputData = inputDataRaw.to_dict('records') insertData = [] updateDate = [] errorData = []