def downday_str_parser(string, year, week_number): regex = re.compile( r'(?P<weekday>[a-zA-Z]+)\s*(?P<hour>[0-9]+):(?P<min>[0-9]+)') parts = regex.match(string) if parts: parts = parts.groupdict() else: return None str_new = '{year}-{week_number}-{weekday}-{hour}-{min}' \ .format(year=year, week_number=week_number, weekday=parts['weekday'], hour=parts['hour'], min=parts['min'] ) downday = UDatetime.localize(dt.strptime(str_new, '%Y-%W-%a-%H-%M')) return downday
def report_lost_time_detail_processor(cls): files = Documents.objects.filter(status__exact='new', file_type__exact='ReportLostTimeDetail') cls.file_num = files.count() cls.percent = 0 if files.exists(): for file in files: path = BASE_DIR + file.document.url if os.path.exists(path): csv_data = pd.read_csv(path, header=None) # data init if not csv_data.empty: data = pd.DataFrame(columns=[ 'SalesDate', 'Line', 'Shift', 'OracleType', 'MIPS_Description', 'LostTimeMinutes', 'LostTimeValue', 'Occ', 'OracleCategory', 'Category', 'Area', 'Cause', 'Comments', 'LineStop', 'Init' ]) data['SalesDate'] = csv_data[27] data['Line'] = csv_data[28] data['Shift'] = csv_data[29] data['OracleType'] = csv_data[30] data['MIPS_Description'] = csv_data[31] data['LostTimeMinutes'] = csv_data[32] data['LostTimeValue'] = csv_data[33] data['Occ'] = csv_data[34] data['OracleCategory'] = csv_data[35] data['Category'] = csv_data[36] data['Area'] = csv_data[37] data['Cause'] = csv_data[38] data['Comments'] = csv_data[39] data['LineStop'] = csv_data[40] data['Init'] = csv_data[41] # mapping line and shift line_mapping = { 'Line 1': '1', 'Line 2': '2', 'Line 7': '7', 'Line 8': '8', 'Line 9': '9', } shift_mapping = { 'Shift 1': '1', 'Shift 2': '2', 'Shift 3': '3', 'ZZZZ': 'N', } mapping = line_mapping.copy() mapping.update(shift_mapping) data.replace(mapping, inplace=True) data['LostTimeValue'] = data['LostTimeValue'].str.replace('$', '') data['LostTimeValue'] = data['LostTimeValue'].str.replace(',', '') data['LostTimeValue'] = data['LostTimeValue'].astype('float64') try: data['SalesDate'] = pd.to_datetime(data['SalesDate'], format='%m/%d/%y') except: try: data['SalesDate'] = pd.to_datetime(data['SalesDate'], format='%m/%d/%Y') except Exception as e: raise Exception data['SalesDate'] = data['SalesDate'].apply(lambda x: UDatetime.localize(x)) data_count = len(data) i = 0 for index, row in data.iterrows(): ReportLostTimeDetail.objects \ .update_or_create(SalesDate=row['SalesDate'], Line=row['Line'], Shift=row['Shift'], MIPS_Description=row['MIPS_Description'], LostTimeMinutes=row['LostTimeMinutes'], defaults={ 'LostTimeValue': row['LostTimeValue'], 'Occ': row['Occ'], 'OracleCategory': row['OracleCategory'], 'Category': row['Category'], 'Area': row['Area'], 'Cause': row['Cause'], 'Comments': row['Comments'], 'LineStop': row['LineStop'], 'Init': row['Init'], }) cls.percent += 1/(data_count*cls.file_num) i += 1 if i == 20: cls.update_process(cls.percent) i = 0 # update documents Documents.objects.filter(id=file.id).update(status='loaded') cls.percent = 1 cls.update_process(cls.percent) return JsonResponse({})
def tasks_data_processor(cls, data_new): data_old = pd.DataFrame.from_records(Tasks.objects.all().values('work_order', 'current_status_somax')) data = cls.get_new_and_update_data(data_old, data_new) # print(len(data)) if data.empty: return try: data['Created'] = pd.to_datetime(data['Created'], format='%Y/%m/%d') except: try: data['Created'] = pd.to_datetime(data['Created'], format='%m/%d/%Y') except: data['Created'] = pd.to_datetime(data['Created'], format='%m/%d/%y') data['Created'] = data['Created'].apply(lambda x: UDatetime.localize(x)) try: data['Scheduled'] = pd.to_datetime(data['Scheduled'], format='%Y/%m/%d') except: data['Scheduled'] = pd.to_datetime(data['Scheduled'], format='%m/%d/%Y') data['Scheduled'] = data['Scheduled'].apply(lambda x: UDatetime.localize(x)) try: data['Actual Finish'] = pd.to_datetime(data['Actual Finish'], format='%Y/%m/%d') except: try: data['Actual Finish'] = pd.to_datetime(data['Actual Finish'], format='%m/%d/%Y') except: data['Actual Finish'] = pd.to_datetime(data['Actual Finish'], format='%m/%d/%y') data['Actual Finish'] = data['Actual Finish'].apply(lambda x: UDatetime.localize(x)) data['Description'] = data['Description'].apply(lambda x: x.encode('ascii', errors="ignore").decode()) for index, row in data.iterrows(): # if index in list(np.arange(0,40000, 100)): # print(index) equipment = Equipment.objects.filter(equipment_id__exact=row['Charge To']) if equipment.exists(): equipment = equipment[0] else: equipment = None aor = AOR.objects.filter(equip_id__equipment_id__exact=row['Charge To']) if aor.exists(): aor = aor[0] else: aor = None creator = SomaxAccount.objects.filter(user_name__exact=row['Creator']) if creator.exists(): creator = creator[0] else: creator = None assigned = SomaxAccount.objects.filter(user_name__exact=row['Assigned']) if assigned.exists(): assigned = assigned[0] else: assigned = None pms = PMs.objects.filter(description__exact=row['Description']) if pms.exists(): pms = pms[0] est = pms.duration due_days = pms.due_days else: pms = None est = timedelta(hours=0) due_days = None current_status = row['Status'] # current_status = cls.smart_status_choice(row['Work Order'], row['Status'], due_days, row['Created']) Tasks.objects.update_or_create(work_order=row['Work Order'], defaults={ 'description': row['Description'], 'work_type': row['Type'], 'current_status': current_status, 'line': None, 'shift': row['Shift'], 'priority': row['Priority'], 'create_date': row['Created'], 'current_status_somax': row['Status'], 'schedule_date_somax': row['Scheduled'], 'actual_date_somax': row['Actual Finish'], 'estimate_hour': est, 'fail_code': row['Fail Code'], 'completion_comments': row['Completion Comments'], 'equipment': equipment, 'AOR': aor, 'creator': creator, 'assigned': assigned, 'PMs': pms, 'parts_location': None })