def insertNthChild(params, data, is_last=False): po = fn.getNestedElement(params, 'po.po') gk = fn.getNestedElement(params, 'po.gk') naming_keymap = fn.getNestedElement(params, 'po.naming_keymap') info = data[data[po] == gk] if is_last: obj_ = { 'id': fn.convertToSnakecase(gk), 'name': info['item_desc'].values[0], 'code': info['item_code'].values[0], 'quantity': int(info['available_quantity'].values[0]), 'pku': round(float(info['pku'].values[0]), 2), } else: obj_ = { 'id': fn.convertToSnakecase(gk), 'name': info[naming_keymap[po]].unique().tolist()[0], 'code': info[po].unique().tolist()[0], } result = { 'obj_': obj_, 'info': info, } return result
def addIntegrityData(data, category): global date_count row = fn.getNestedElement(data, 'row') result = fn.getNestedElement(data, 'to_update') category_data_map = { 'state': { 'code': str(fn.getNestedElement(row, 'state_code')), 'name': fn.getNestedElement(row, 'state_name'), 'updated_at': fn.getNestedElement(row, 'state_updated_at'), 'unique_code': fn.convertToSnakecase(fn.getNestedElement(row, 'state_code')), }, 'facility': { 'code': str(fn.getNestedElement(row, 'facility_code')), 'name': fn.getNestedElement(row, 'facility_name'), 'updated_at': fn.getNestedElement(row, 'facility_updated_at'), 'unique_code': fn.convertToSnakecase('_'.join([ str(fn.getNestedElement(row, 'state_code')), str(fn.getNestedElement(row, 'facility_code')) ])), }, } code = fn.getNestedElement(category_data_map, '{0}.code'.format(category)) name = fn.getNestedElement(category_data_map, '{0}.name'.format(category)) updated_at = fn.getNestedElement(category_data_map, '{0}.updated_at'.format(category)) unique_code = fn.getNestedElement(category_data_map, '{0}.unique_code'.format(category)) if code not in result: result[code] = { 'id': unique_code, 'name': name, 'code': code, 'count': 1, } else: result[code]['count'] += 1 if updated_at not in result[code]: result[code][updated_at] = 0 if category == 'state': date_count += 1 # limit loop data/ show data in N days result[code][updated_at] += 1 return result
def getTableHeader(data): thead = data.thead.find_all('th') result = [] for th in thead: # print(th.get_text()); result.append(fn.convertToSnakecase(th.get_text())) # print(result); # Logger.v('main_result', main_result); # exit(); return result
def getIntegrity(params, data): dbManager = SharedMemoryManager.getInstance() db = dbManager.query() check_data = fn.getNestedElement(data, 'check_data') facility = ModelFacility.getActiveFacility() filter_key = fn.getNestedElement(params, 'filter_key') durations = fn.getNestedElement(params, 'durations') result = fn.getNestedElement(data, 'to_update') state_data = fn.getNestedElement(data, 'state') facility_data_by_state = fn.getNestedElement(data, 'facility') data_list = getFacilityByState(params=params, data=check_data) for key in data_list: row = fn.getNestedElement(data_list, key) count = getTotalCount(params={ 'filter_key': filter_key, 'key': key }, data={ 'row': row, 'facility': facility }) obj_ = { 'id': fn.convertToSnakecase(fn.getNestedElement(row, 'id')), 'name': fn.getNestedElement(row, 'name'), 'code': fn.getNestedElement(row, 'code'), 'data': [], } for idx in range(len(durations) - 1, -1, -1): date = durations[idx] previous_date = DateTime.toString( DateTime.getDaysAgo(1, datefrom=date)) # Logger.v('date', date, 'previous_date', previous_date); if filter_key: date_count = fn.getNestedElement( facility_data_by_state, '{0}.{1}.{2}'.format(filter_key, key, date), 0) if not date_count: date_count = 0 else: date_count = 0 # do not include those positive, count missing facility quantity only # date_count = fn.getNestedElement(state_data, '{0}.{1}'.format(key, date), 0); if filter_key: val = date_count - count else: val = 0 obj_['data'].append({ previous_date: val, # negative value is missing, 0 mean complete, positive value is not found from user upload facility }) if filter_key: # Logger.v('recursive end') pass else: obj_['facility'] = [] obj_['facility'] = getIntegrity(params={ 'filter_key': key, 'durations': durations, }, data={ 'state': state_data, 'facility': facility_data_by_state, 'to_update': obj_['facility'], 'check_data': check_data, }) result.append(obj_) # Logger.v('result', result) return result
def calculateData(params, data): global naming_keymap, crawl_folder; item_key_to_show = fn.getNestedElement(params, 'item_key_to_show'); process_order = fn.getNestedElement(params, 'process_order'); key_to_join = fn.getNestedElement(params, 'key_to_join'); number_of_month = fn.getNestedElement(params, 'number_of_month', 1); start_month = fn.getNestedElement(params, 'start_month'); custom_params = copy.deepcopy(params); result = {}; main_po = {}; df = pd.DataFrame(data[:]).astype(str); df = preprocessDataFrame(data=df); Logger.v('start_month', start_month); month_range = getMonthRange(params=custom_params); # print(df['approved_year_month']); functions = { 'issue_quantity': sum, 'facility_name':'first', 'requester_name': 'first', 'item_name': 'first', 'sub_group_name': 'first', 'item_category_name': 'first', 'pku_name': 'first', 'drug_nondrug_name': 'first', 'requester_group_name': 'first', 'ptj_name': 'first', 'approved_year_month': 'first', 'issue_type': 'first', 'item_group_name': 'first', } # df.info(); # print(df['requester_group_name']); table = pd.pivot_table(df, values=list(functions.keys()), index=key_to_join, aggfunc=functions); new_table = table.reset_index(level=key_to_join); # print(new_table); # new_table.info(); result = {}; for index, row in new_table.iterrows(): unique_list = []; for ktj in key_to_join: unique_list.append(row[ktj]); unique_id = '|'.join(unique_list); # Logger.v('row', row); # exit(); codename = { 'state': { 'name': row['state'], 'code': row['state'], }, 'facility_code': { 'name': row['facility_name'], 'code': row['facility_code'], }, 'requester_group_code': { 'name': row['requester_group_name'], 'code': row['requester_group_code'], }, 'drug_nondrug_code': { 'name': row['drug_nondrug_name'], 'code': row['drug_nondrug_code'], }, 'item_code': { 'name': row['item_name'], 'code': row['item_code'], }, }; for idx in range(0, len(process_order)): po = process_order[idx]; if idx == 0: code = codename[po]['code']; name = codename[po]['name']; if code not in result: result[code] = {}; temp_result = result[code]; temp_result.update({ 'id': fn.convertToSnakecase(code), 'name': name, 'code': code, }); elif idx == len(process_order) - 1: code = row['item_code']; name = row['item_name']; if po not in temp_result: temp_result.update({ po: {}, }); if unique_id not in temp_result[po]: temp_result[po][unique_id] = {}; temp_result = temp_result[po][unique_id]; temp_result.update({ 'id': fn.convertToSnakecase(unique_id), 'name': name, 'code': code, 'quantity': row['issue_quantity'], 'quantity_by_month': {}, }); for mr in month_range: if mr == row['approved_year_month']: monthly_quantity = row['issue_quantity']; else: monthly_quantity = 0; temp_result['quantity_by_month'].update({ mr: monthly_quantity, }); for item_show in item_key_to_show: # Logger.v('item show', item_show, row[item_show]) temp_result.update({ item_show: row[item_show], }); else: code = codename[po]['code']; name = codename[po]['name']; if po not in temp_result: temp_result.update({ po: {}, }); if code not in temp_result[po]: temp_result[po][code] = {}; temp_result = temp_result[po][code]; temp_result.update({ 'id': fn.convertToSnakecase(code), 'name': name, 'code': code, }); return result;
def calculateData(params, data): processed_df = preprocessData(params, data) grouped_df = preprocessData(params=params, data=processed_df) report_name = Report.getReportName(params) key_to_join = fn.getNestedElement(global_key_to_join, report_name) joined_key = [] joined_ = [] joined_columns_list = [key_to_join[0]] for idx in range(0, len(key_to_join)): ktj = key_to_join[idx] joined_key.append(ktj) if idx > 0: joined_.append(['_'.join(joined_key[:-1]), ktj]) columns = joined_[idx - 1] joined_columns = '_'.join(columns) joined_columns_list.append(joined_columns) last_key = joined_columns_list[-1] # Logger.v('last_key', last_key); result = {} for index, row in grouped_df.iterrows(): # Logger.v('row', row); unique_value = row[last_key] reference = { 'value': {}, 'po': {}, 'name': { 0: row['facility_type'], 1: row['facility_name'], 2: row['drug_name'], 3: row['drug_name'], } } for idx in range(0, len(global_process_order[report_name])): po = global_process_order[report_name][idx] # Logger.v('po', po); # Logger.v('po value', row[po]); reference['value'][idx] = row[global_process_order[report_name] [idx]] reference['po'][idx] = global_process_order[report_name][idx] value_ref = reference['value'] po_ref = reference['po'] name_ref = reference['name'] if idx > 0: keys = [] for idx1 in range(0, idx): keys += [po_ref[idx1], value_ref[idx1]] # Logger.v('keys', keys); key = '.'.join(keys) # Logger.v('idx', idx, 'key', key); check_temp_result = fn.getNestedElement(result, key) else: check_temp_result = result if po_ref[idx] not in check_temp_result: check_temp_result[po_ref[idx]] = {} if value_ref[idx] not in check_temp_result[po_ref[idx]]: if idx == len(global_process_order[report_name]) - 1: obj_ = { 'id': fn.convertToSnakecase(value_ref[idx]), 'name': name_ref[idx], 'code': value_ref[idx], 'min_unit_price': float(row['min_unit_price']), 'max_unit_price': float(row['max_unit_price']), 'e_p_approved_quantity': float(row['e_p_approved_quantity']), 'purchase_amount': float(row['purchase_amount']), 'item_packaging_name': row['item_packaging_name'], 'item_packaging_seq_no': row['item_packaging_seq_no'], } else: obj_ = { 'id': fn.convertToSnakecase(value_ref[idx]), 'name': name_ref[idx], 'code': value_ref[idx], } check_temp_result[po_ref[idx]][value_ref[idx]] = obj_ # exit(); # Logger.v('result', result); # filename = 'william_py_result'; # fn.writeTestFile(filename, result, minified=False); # exit(); return result
def calculateData(params, data): Debug = DebugManager.DebugManager() Debug.start() Debug.trace('start') global naming_keymap, crawl_folder item_key_to_show = fn.getNestedElement(params, 'item_key_to_show') process_order = fn.getNestedElement(params, 'process_order') custom_params = copy.deepcopy(params) result = {} main_po = {} df = pd.DataFrame(data[:]).astype(str) df = preprocessDataframe(params=custom_params, data=df) summation_df = groupDataframe(params=custom_params, data=df) # to check summation result # output_file = '{0}/output/find_result.xlsx'.format(crawl_folder); # df.to_excel(output_file); Debug.trace('dataframe process') for idx in range(0, len(process_order)): main_po[idx] = { 'po': process_order[idx], 'group_po': [], } for idx1 in range(0, idx + 1): main_po[idx]['group_po'].append(process_order[idx1]) for idx in range(0, len(process_order)): po = main_po[idx]['po'] group_po = main_po[idx]['group_po'] custom_params['po'] = { 'po': po, 'naming_keymap': naming_keymap, } grouped_df = df.groupby(group_po).groups Logger.v('len', idx, 'th', len(grouped_df.keys())) if idx == 0: for gk in grouped_df.keys(): code = gk.split('|')[-1] name = df[df[po] == gk][naming_keymap[po]].unique().tolist()[0] result[gk] = { 'id': fn.convertToSnakecase(gk), 'name': name, 'code': code, } else: for gk in grouped_df.keys(): level = gk[-2] code = gk[-1] if idx == 1: temp_result = result elif idx == 2: # Logger.v('gk0', gk[0], main_po[idx-1]['po'], 'level', level, 'code', code); temp_result = fn.getNestedElement( result, '{0}.{1}'.format(gk[0], main_po[idx - 1]['po'])) elif idx == 3: # Logger.v('gk0', gk[0], main_po[idx-2]['po'], gk[1], main_po[idx-1]['po'], 'level', level, 'code', code); temp_result = fn.getNestedElement( result, '{0}.{1}.{2}.{3}'.format(gk[0], main_po[idx - 2]['po'], gk[1], main_po[idx - 1]['po'])) if po not in temp_result[level]: temp_result[level][po] = {} if code not in temp_result[level][po]: temp_result[level][po][code] = {} custom_params['po'].update({ 'gk': code, }) # when this is the last element in process_order if process_order[-1] == po: last_child_data = insertNthChild(params=custom_params, data=summation_df, is_last=True) info = last_child_data['info'] temp_result[level][po][code] = last_child_data['obj_'] # add extra info by group_by for ik in item_key_to_show: temp_result[level][po][code].update({ ik: info[ik].values[0], }) else: last_child_data = insertNthChild(params=custom_params, data=summation_df) temp_result[level][po][code] = last_child_data['obj_'] Debug.trace('{0}th'.format(idx)) Debug.end() Debug.show('Model.Stock.calculateData') return result
def calculateData(params, data): processed_df = preprocessData(params, data) # Logger.v('processed_df', processed_df); # processed_df.info(); grouped_df = groupData(params=params, data=processed_df) report_name = Report.getReportName(params) key_to_join = fn.getNestedElement(global_key_to_join, report_name) result = {} joined_key = [] joined_ = [] joined_columns_list = [key_to_join[0]] for idx in range(0, len(key_to_join)): ktj = key_to_join[idx] joined_key.append(ktj) if idx > 0: joined_.append(['_'.join(joined_key[:-1]), ktj]) columns = joined_[idx - 1] joined_columns = '_'.join(columns) joined_columns_list.append(joined_columns) last_key = joined_columns_list[-1] # Logger.v('last_key', last_key); for index, row in grouped_df.iterrows(): # Logger.v('row', row); unique_value = row[last_key] reference = { 'value': {}, 'po': {}, 'name': { 0: row['state_code'], 1: row['facility_name'], 2: row['budget_type_name'], 3: row['object_name'], 4: row['item_group_name'], 5: row['item_group_name'], } } for idx in range(0, len(global_process_order[report_name])): po = global_process_order[report_name][idx] # Logger.v('po', po); # Logger.v('po value', row[po]); reference['value'][idx] = row[global_process_order[report_name] [idx]] reference['po'][idx] = global_process_order[report_name][idx] value_ref = reference['value'] po_ref = reference['po'] name_ref = reference['name'] if idx > 0: keys = [] for idx1 in range(0, idx): keys += [po_ref[idx1], value_ref[idx1]] # Logger.v('keys', keys); key = '.'.join(keys) # Logger.v('idx', idx, 'key', key); check_temp_result = fn.getNestedElement(result, key) else: check_temp_result = result if po_ref[idx] not in check_temp_result: check_temp_result[po_ref[idx]] = {} if po_ref[idx] == 'state_name': name = value_ref[idx] code = name_ref[idx] else: name = name_ref[idx] code = value_ref[idx] if value_ref[idx] not in check_temp_result[po_ref[idx]]: if idx == len(global_process_order[report_name]) - 1: # Logger.v('row', row); # exit(); obj_ = { 'id': fn.convertToSnakecase(code), 'name': name, 'code': code, 'first_allocation': float(row['first_allocation']), 'additional_allocation': float(row['additional_allocation']), 'pending_amount': float(row['pending_amount']), 'utilized_amount': float(row['utilized_amount']), 'liablity_amount': float(row['liablity_amount']), 'trans_in_amount': float(row['trans_in_amount']), 'trans_out_amount': float(row['trans_out_amount']), 'deduction_amount': float(row['deduction_amount']), 'current_actual_amount': float(row['current_actual_amount']), 'total_allocation': float(row['total_allocation']), 'balance_amount': float(row['balance_amount']), } else: obj_ = { 'id': fn.convertToSnakecase(code), 'name': name, 'code': code, } check_temp_result[po_ref[idx]][value_ref[idx]] = obj_ # exit(); # Logger.v('result', result); # filename = 'william_py_result'; # fn.writeTestFile(filename, result, minified=False); # exit(); return result