def app(): adjust_rev = st.sidebar.checkbox('Adjust Vineyard Revenue?') if adjust_rev: rev_col = 'GrossSalesAdjusted' else: rev_col = 'GrossSalesAmount' # Load Data df_tick = get_clean_data('data/clean/revenue/clean_revenue.csv', low_memory=False) df_tick['DoneDateFormatted'] = pd.to_datetime(df_tick['DoneDateFormatted'], format='%Y-%m-%d') csv = pd.read_csv('data/clean/employee_hours/employee_hours.csv') df_hours = pd.DataFrame(csv) df_hours['ReportDateFormatted'] = pd.to_datetime( df_hours['ReportDateFormatted'], format='%m/%d/%Y') csv2 = pd.read_csv('data/clean/production/tick_production_analysis.csv') df_prod = pd.DataFrame(csv2) time_frame = st.sidebar.selectbox('Time Frame:', list(timezz.keys())) start = st.sidebar.date_input('Start:', dt.date(2021, 1, 1)) end = st.sidebar.date_input('End:', dt.date.today()) stat_choice = st.selectbox('Mean or Median?', ['Median', 'Mean']) csv4 = pd.read_csv('data/clean/employee_hours/tick_first_services.csv') df_first_services = pd.DataFrame(csv4) df_first_services = df_first_services.rename( columns={'AverageSpeedToFirstService': 'SpeedToFirstService'}) df_first_services['Date'] = pd.to_datetime(df_first_services['Date'], format='%Y-%m-%d') mask1 = (df_first_services['Date'] >= pd.to_datetime(start,format='%Y-%m-%d')) &\ (df_first_services['Date'] <= pd.to_datetime(end,format='%Y-%m-%d')) &\ (df_first_services['SpeedToFirstService'] > 0) df_first_services = df_first_services.loc[mask1] df_first_services = df_first_services.loc[ (df_first_services['SpeedToFirstService'] > 0) & (df_first_services['SpeedToFirstService'] < 60)] csv5 = pd.read_csv('data/clean/employee_hours/tick_inbetween_services.csv') df_inbetween_services = pd.DataFrame(csv5) df_inbetween_services['Date'] = pd.to_datetime( df_inbetween_services['Date'], format='%Y-%m-%d') mask2 = (df_inbetween_services['Date'] >= pd.to_datetime(start,format='%Y-%m-%d')) &\ (df_inbetween_services['Date'] <= pd.to_datetime(end,format='%Y-%m-%d')) &\ (df_inbetween_services['SpeedToNextService'] > 0) df_inbetween_services = df_inbetween_services[mask2] df_inbetween_services = df_inbetween_services.rename( columns={ 'MilesToNextAddress': 'MilesToNextService', 'TimeToNextService': 'HoursToNextService' }) df_inbetween_services = df_inbetween_services.drop(columns=['Unnamed: 0']) if stat_choice == 'Median': df_first_services_group = df_first_services.groupby( by=['EmployeeName']).agg(pd.Series.median) df_inbetween_services_group = df_inbetween_services.groupby( by=['EmployeeName']).agg(pd.Series.median) else: df_first_services_group = df_first_services.groupby( by=['EmployeeName']).agg(pd.Series.mean) df_inbetween_services_group = df_inbetween_services.groupby( by=['EmployeeName']).agg(pd.Series.mean) df_time_services = df_first_services_group.merge( df_inbetween_services_group, left_index=True, right_index=True) df_time_services = df_time_services[[ 'SpeedToFirstService', 'SpeedToNextService', 'MilesToFirstService', 'MilesToNextService', 'HoursToFirstService', 'HoursToNextService' ]] st.markdown(f''' ** {stat_choice} Time, Distance and Speed to First and Next Properties (July-Current 2021)** ''') st.dataframe(df_time_services) st.write( f'''Correlation Between Distance and Speed (First Services): {np.corrcoef(df_first_services['SpeedToFirstService'], df_first_services['MilesToFirstService'])[0,1]}''' ) st.write( f'''Correlation Between Distance and Speed (In Between Services): {np.corrcoef(df_inbetween_services['SpeedToNextService'], df_inbetween_services['MilesToNextService'])[0,1]}''' ) employee_choice = st.selectbox('Select Employee', tick_employees) employee_first_df = df_first_services.loc[df_first_services['EmployeeName'] == employee_choice] [ employee_first_df.drop(columns=[i], inplace=True) for i in employee_first_df.columns if 'Unnamed' in i ] if stat_choice == 'Median': employee_first_df['FirstPropSpeedToDate'] = employee_first_df.apply( lambda row: get_average_over_time( row, 'SpeedToFirstService', employee_first_df, pd.Series.median ), axis=1) employee_first_df['10DayFirstPropSpeed'] = employee_first_df[ 'SpeedToFirstService'].rolling(window=10).median() employee_next_df = df_inbetween_services.loc[df_inbetween_services['EmployeeName'] == employee_choice]\ .groupby(by=['Date']).agg(pd.Series.median).reset_index() employee_next_df['NextPropSpeedToDate'] = employee_next_df.apply( lambda row: get_average_over_time( row, 'SpeedToNextService', employee_next_df, pd.Series.median), axis=1) employee_next_df['10DayNextPropSpeed'] = employee_next_df[ 'SpeedToNextService'].rolling(window=10).median() else: employee_first_df['FirstPropSpeedToDate'] = employee_first_df.apply( lambda row: get_average_over_time( row, 'SpeedToFirstService', employee_first_df, pd.Series.mean), axis=1) employee_first_df['10DayFirstPropSpeed'] = employee_first_df[ 'SpeedToFirstService'].rolling(window=10).mean() employee_next_df = df_inbetween_services.loc[df_inbetween_services['EmployeeName'] == employee_choice]\ .groupby(by=['Date']).agg(pd.Series.mean).reset_index() employee_next_df['NextPropSpeedToDate'] = employee_next_df.apply( lambda row: get_average_over_time( row, 'SpeedToNextService', employee_next_df, pd.Series.mean), axis=1) employee_next_df['10DayNextPropSpeed'] = employee_next_df[ 'SpeedToNextService'].rolling(window=10).mean() [ employee_next_df.drop(columns=[i], inplace=True) for i in employee_next_df.columns if 'Unnamed' in i ] st.markdown( f'** {employee_choice} 10-Day {stat_choice} Speed to First and Next Properties Over Time **' ) fig2 = make_subplots( rows=1, cols=2, subplot_titles=(f'10-Day {stat_choice} Speeds (7/1-to-Date)', f'Speeds by Day')) fig2.add_trace(go.Scatter(x=employee_first_df['Date'], y=employee_first_df['10DayFirstPropSpeed'], mode='lines+markers', name=f'10-Day {stat_choice} First Service Speed', legendgroup='First'), row=1, col=1) fig2.add_trace(go.Scatter(x=employee_next_df['Date'], y=employee_next_df['10DayNextPropSpeed'], mode='lines+markers', name=f'10-Day {stat_choice} In-between Speed', legendgroup='Inbetween'), row=1, col=1) fig2.add_trace(go.Scatter(x=employee_first_df['Date'], y=employee_first_df['SpeedToFirstService'], mode='lines+markers', name='Daily First Speeds', legendgroup='First'), row=1, col=2) fig2.add_trace(go.Scatter(x=employee_next_df['Date'], y=employee_next_df['SpeedToNextService'], mode='lines+markers', name='Daily In-between Speeds', legendgroup='Inbetween'), row=1, col=2) fig2.update_xaxes(tickangle=45, title_text='Date') fig2.update_yaxes(title_text='Speed (MPH)') st.plotly_chart(fig2) csv_EODs = pd.read_csv('data/clean/employee_hours/missed_accounts.csv') df_EODs = pd.DataFrame(csv_EODs).set_index('index') [ df_EODs.drop(columns=[i], inplace=True) for i in df_EODs.columns if 'Unnamed' in i ] st.markdown(f'''**Accounts Unfinished by Employee**''') st.dataframe(df_EODs) fig3 = px.bar(df_EODs, x=df_EODs.index, y=df_EODs['Accounts/Week']) fig3.update_layout(title='Number of Accounts Missed / Week (Mon - Fri)', xaxis_title='Employee Name', yaxis_title='Accounts per Week') st.plotly_chart(fig3) df_prod = df_prod[[ 'RouteCodeDescription', 'ProductionDays', 'AverageRevenuePerDay', 'AverageSizePerDay', 'TotalOfDistinctProductionDays' ]] df_prod = df_prod.loc[df_prod['RouteCodeDescription'].isin(tick_employees)] df_prod = df_prod.set_index('RouteCodeDescription') df_tick = df_tick.loc[(df_tick['DoneDateFormatted'] >= pd.to_datetime( start, format='%Y-%m-%d')) & (df_tick['DoneDateFormatted'] <= pd.to_datetime(end, format='%Y-%m-%d'))] df_prod_sum = df_tick.loc[df_tick['EmployeeName'].isin( tick_employees)].groupby( by=['EmployeeName', 'DoneDateFormatted']).sum() df_prod_sum = df_prod_sum.loc[ df_prod_sum['GrossSalesAmount'] > 500].reset_index() df_prod_production_days = df_prod_sum.groupby(by=['EmployeeName']).agg({'GrossSalesAmount': pd.Series.count})\ .rename(columns={'GrossSalesAmount': 'NumFullProdDays'}) ''' df_prod_production_days['TotalProdDays'] = df_prod_production_days.apply(lambda row: get_percent_fullDays(row, df_hours, start, end, 1), axis=1) df_prod_production_days['%DaysFull'] = df_prod_production_days.apply(lambda row: get_percent_fullDays(row, df_hours, start, end, 0), axis=1) df_prod_production_days = df_prod_production_days[['NumFullProdDays', 'TotalProdDays', '%DaysFull']] st.markdown(f'**Full Production Days by Employee**') st.dataframe(df_prod_production_days) st.markdown(' *Full production day = days with earned revenue greater than $500* ') ''' if end.isoweekday() in range(1, 6): first_of_week = end - timedelta(days=end.weekday()) if first_of_week < start: end_alt = end else: end_alt = first_of_week - dt.timedelta(days=1) else: end_alt = end # Get average weekly hours per employee + results bar chart df_hours = get_average_weekly_hours(df_hours, start, end_alt, tick_employees) df_hours['Avg. Weekly Pay'] = (df_hours['RegularHours'] * 19) + (df_hours['OvertimeHours'] * (19 * 1.5)) df_hours = df_hours.sort_values(by=['TotalHours'], ascending=False) st.plotly_chart( px.bar( df_hours, x=df_hours.index, y=['OvertimeHours', 'TotalHours'], title= f'''Average Num. Hours Worked per Week ({start.strftime('%m/%d/%Y')} - {end_alt.strftime('%m/%d/%Y')})''', barmode='group').add_hline(y=40, annotation_text='40 Hours', annotation_font_size=16)) tick_results_production = get_tick_employee_results(df_tick, rev_col=rev_col, start=start, end=end) results_prodUsage = get_tick_product_data(time_frame) # Combine Dataframes df_combine = tick_results_production.join(df_prod, how='outer').join( results_prodUsage, how='outer').join(df_hours, how='outer') df_combine = df_combine.join(df_prod_production_days) df_combine['Avg. Profit/Day'] = df_combine.apply( lambda row: get_profit_per_service(row), axis=1) df_combine['AvgServicesPerDay'] = df_combine[ 'Number of Services'] / df_combine['NumFullProdDays'] df_combine['AverageRevenuePerDay'] = df_combine[ 'AvgServicesPerDay'] * df_combine['Median Revenue/Service ($)'] df_tick_ranks = pd.DataFrame(index=df_combine.index) for i in df_combine.columns: if i in reverse_rank_metrics: df_tick_ranks[f'{i} Rank'] = df_combine[i].rank(ascending=False) else: df_tick_ranks[f'{i} Rank'] = df_combine[i].rank(ascending=True) tick_select_stats = st.multiselect( 'Choose Stats to Include in Composite Score:', list(df_combine.columns), default=tick_composite_cols) df_combine['CompositeScore'] = df_tick_ranks.apply( lambda row: get_average(row, tick_select_stats), axis=1) # Trick to move last column to first column position tick_cols = list(df_combine.columns) tick_cols = [tick_cols[-1]] + tick_cols[:-1] df_combine = df_combine[tick_cols] df_combine = df_combine.sort_values(by=['CompositeScore'], ascending=False) df_graph2 = df_combine.sort_values(by=['Treatment/Area'], ascending=False) df_graph3 = df_combine.sort_values( by=['Median Servicing Speed (Acres/Hour-on-Sight)'], ascending=False) df_graph5 = df_combine.sort_values(by=['Avg. Profit/Day'], ascending=False) fig = make_subplots(rows=2, cols=2) fig.add_trace(go.Bar(x=df_hours.index, y=df_hours['OvertimeHours'], name='Average Overtime Hours per Week'), row=1, col=1) fig.add_trace(go.Bar(x=df_hours.index, y=df_hours['TotalHours'], name='Average Total Hours per Week'), row=1, col=1) fig.add_trace(go.Bar(x=df_graph2.index, y=df_graph2['Treatment/Area'], name='Treatment Applied per Area (Gallons/Acre)', marker_color='#FFA15A'), row=1, col=2) fig.add_trace(go.Bar( x=df_graph3.index, y=df_graph3['Median Servicing Speed (Acres/Hour-on-Sight)'], name='Median Servicing Speed (Acres/Hour-on-Sight)', marker_color='#00CC96'), row=2, col=1) fig.add_trace(go.Bar(x=df_graph5.index, y=df_graph5['Avg. Profit/Day'], name='Average Profit per Day ($ Profit/Day)', marker_color='#AB63FA'), row=2, col=2) fig.add_hrect(y0=9, y1=11, row=1, col=2, annotation_text='Target Range', annotation_position='top right', annotation_font_size=16, line_width=0, fillcolor='green', opacity=0.5) fig.add_hline(y=40, line_dash='dot', row=1, col=1, annotation_text='40 Hours', annotation_font_size=16) fig.update_layout( title= f'''Employee Stats From {start.strftime('%m/%d')} to {end.strftime('%m/%d')}''', barmode='group') fig.update_xaxes(tickangle=45, tickfont_size=10.5) st.plotly_chart(fig) # Download results as CSV if st.button('Download Dataframe as CSV'): tmp_download_link = download_link( df_combine.reset_index(), f"employee_stats_{''.join(filter(str.isalpha, time_frame.lower()))}.csv", 'Download data.') st.markdown(tmp_download_link, unsafe_allow_html=True) st.dataframe(df_combine.sort_values(by=['CompositeScore'], ascending=False)) chosen_ones = st.multiselect('Choose Employees: ', sorted(tick_employees)) show_curve = st.checkbox('Show histogram:') try: if show_curve: make_property_size_histogram(df_tick, start, end, chosen_ones, show_hist=True) else: make_property_size_histogram(df_tick, start, end, chosen_ones, show_hist=False) except IndexError: st.write('No employees chosen.')
# USER PROGRAMMER # 만드는 사람을 유저 프로그래머라고 함 import sys from functions import (get_raw_data, get_average, get_evaluation, get_scores, get_std_dev, get_variance) # functions.py 파일의 코드를 다 긁어다가 붙인것 if not len(sys.argv) == 3 and not len(sys.argv) == 4: print("usage : python main.py <exel filename> <total_avg> <sd=20>") exit(-1) filename = sys.argv[1] total_avg = float(sys.argv[2]) if len(sys.argv) == 4: sd = float(sys.argv[3]) # raw_data = get_raw_data(sys.argv[1]) raw_data = get_raw_data(filename) scores = get_scores(raw_data) avg = get_average(scores) var = get_variance(scores, avg) std_dev = get_std_dev(var) print(f"평균 : {avg}, 분산 : {var}, 표준편차 : {std_dev}") if len(sys.argv) == 4: get_evaluation(avg, total_avg, std_dev, sd) elif len(sys.argv) == 3: get_evaluation(avg, total_avg, std_dev)
def app(): ''' adjust_rev = st.sidebar.checkbox('Adjust Vineyard Revenue?') if adjust_rev: rev_col = 'GrossSalesAdjusted' else: rev_col = 'GrossSalesAmount' ''' rev_col = 'GrossSalesAmount' csv = pd.read_csv('data/clean/employee_hours/turf_employees.csv') csv2 = pd.read_csv('data/clean/employee_hours/employee_hours.csv') csv3 = pd.read_csv('data/clean/production/turf_production_analysis.csv') csv4 = pd.read_csv('data/clean/revenue/clean_revenue.csv', low_memory=False) csv5 = pd.read_csv('data/clean/product_usage/yearly_product_costs.csv') turf_results_production = pd.DataFrame(csv) df_hours = pd.DataFrame(csv2) df_prod = pd.DataFrame(csv3) df_rev = pd.DataFrame(csv4) df_product_costs = pd.DataFrame(csv5) df_prod = df_prod[[ 'RouteCodeDescription', 'ProductionCompleted', 'ProductionDays', 'ProductionSize', 'AverageRevenuePerDay', 'AverageSizePerDay' ]] df_prod = df_prod.loc[df_prod['RouteCodeDescription'].isin(turf_employees)] df_prod = df_prod.set_index('RouteCodeDescription') time_frame = st.sidebar.selectbox('Time Frame:', list(timezz.keys())) start = st.sidebar.date_input('Start:', timezz['Current Year'][0]) end = st.sidebar.date_input('Start:', timezz['Current Year'][1]) mask = (make_datetime(df_rev['DoneDateFormatted'], format='%Y-%m-%d') >= make_datetime(timezz['Current Year'][0])) & (make_datetime( df_rev['DoneDateFormatted'], format='%Y-%m-%d') <= make_datetime(timezz['Current Year'][1])) df_product_costs['ProductCode'] = df_product_costs.apply( lambda row: make_listCol(row, 'ProductCode'), axis=1) df_product_costs['TotalRevenue'] = df_product_costs.apply( lambda row: get_revenue_by_programCode(row, df_product_costs, 'AssociatedService', df_rev.loc[ mask], 'GrossSalesAmount'), axis=1) df_product_costs[ 'Profit'] = df_product_costs['TotalRevenue'] - df_product_costs['Cost'] st.markdown(''' **Product Profitability Stats (Year-to-date)** ''') st.dataframe(df_product_costs) st.markdown(f''' **General Information by Product** ''') product_download = st.button('Download Dataframe as CSV', key='Product Data') if product_download: tmp_download_link = download_link( pd.DataFrame.from_dict(products, orient='index').sort_values( ['Name']).rename_axis('ProductCode').reset_index(), f"product_info.csv", 'Download data.') st.markdown(tmp_download_link, unsafe_allow_html=True) st.dataframe( pd.DataFrame.from_dict(products, orient='index').sort_values([ 'Name' ]).rename_axis('ProductCode').reset_index().set_index('Name')) df_use = get_turf_product_data(time_frame) df_hours = get_average_weekly_hours(df_hours, start, end, turf_employees) df_hours = df_hours.sort_values(by=['TotalHours'], ascending=False) st.plotly_chart( px.bar( df_hours, x=df_hours.index, y=['OvertimeHours', 'TotalHours'], title= f'''Average Num. Hours Worked per Week ({start.strftime('%m/%d/%Y')} - {end.strftime('%m/%d/%Y')})''', barmode='group')) product_choice = st.selectbox('Select Product', df_use['ProductDescription'].unique()) product_code = list(df_use['ProductCode'].loc[df_use['ProductDescription'] == product_choice].mode())[0] stat_choice = st.selectbox('Select Stat', [ f'AmountApplied', 'PropertySize', f'Treatment/Area', 'Cost/Area ($/1000 Ft^2)' ]) if stat_choice == 'AmountApplied': units = f" ({products[product_code]['Units']})" elif stat_choice == 'Treatment/Area': units = f" ({products[product_code]['Units']}/1000 Ft^2)" elif stat_choice == 'PropertySize': units = f' (000s Ft^2)' else: units = '' df_use_graph = df_use.loc[df_use['ProductDescription'] == product_choice] df_use_graph = df_use_graph.sort_values(by=[stat_choice], ascending=False) st.plotly_chart( px.bar(df_use_graph, x=df_use_graph['TechnicianName'], y=[stat_choice], title=f'''{stat_choice}{units} for {product_choice}''')) df_use_combine = df_use.drop(columns=['ProductDescription']) df_use_combine = df_use_combine.groupby(by='TechnicianName').sum() st.dataframe(turf_results_production) mask = (make_datetime(turf_results_production['DoneDateFormatted'], format='%Y-%m-%d') >= pd.to_datetime(start) ) & (make_datetime(turf_results_production['DoneDateFormatted'], format='%Y-%m-%d') <= make_datetime(end)) turf_results_production = turf_results_production.loc[mask] results = {} for emp in turf_employees: results[emp] = [] temp_df = turf_results_production.loc[ turf_results_production['EmployeeName'] == emp] results[emp] = [ np.nanmean(temp_df[rev_col]), np.nanmean(temp_df['CustomerSize']), np.nanmean(temp_df['TotalManHours']), np.nanmean(temp_df['size_per_hour']), np.nanmean(temp_df['value_per_hour']), len(temp_df['IndividualServiceNumber'].unique()) ] turf_results_production = pd.DataFrame.from_dict( results, orient='index', columns=[ 'Avg. Revenue/Service ($)', 'Avg. Property Size (Acres)', 'Avg. Time/Service (Minutes)', 'Avg. Servicing Speed (Acres/Hour-on-Sight)', 'Avg. Revenue/Hour of Servicing ($/Hour-on-Sight)', 'Number of Services', ]) turf_results_production = turf_results_production.join( df_prod, how='outer').join(df_use_combine, how='outer').join(df_hours, how='outer') turf_results_production[ 'AverageProfitPerDay ($/Day)'] = turf_results_production.apply( lambda row: get_turf_profit_perDay(row), axis=1) df_turf_ranks = pd.DataFrame(index=turf_results_production.index) for i in turf_results_production.columns: if i in reverse_rank_metrics: df_turf_ranks[f'{i} Rank'] = turf_results_production[i].rank( ascending=False) else: df_turf_ranks[f'{i} Rank'] = turf_results_production[i].rank( ascending=True) turf_select_stats = st.multiselect( 'Choose Stats to Include in Composite Score:', list(turf_results_production.columns), default=turf_composite_cols) turf_results_production['CompositeScore'] = df_turf_ranks.apply( lambda row: get_average(row, turf_select_stats), axis=1) turf_cols = list(turf_results_production.columns) turf_cols = [turf_cols[-1]] + turf_cols[:-1] turf_results_production = turf_results_production[turf_cols] choice = st.selectbox('Choose Stat:', turf_results_production.columns) df_graph = turf_results_production.sort_values(by=[choice], ascending=False) fig = make_subplots(rows=1, cols=1) fig.add_trace(go.Bar(x=df_graph.index, y=df_graph[choice], name=f'{choice}', marker_color='#A777F1'), row=1, col=1) fig.update_layout(showlegend=True, title=f'''Employee Stats for {choice}''') st.plotly_chart(fig) st.dataframe( turf_results_production.sort_values(by=['CompositeScore'], ascending=False)) chosen_emp = st.selectbox('Choose Employee:', turf_employees + ['All']) summary_stat_choice = st.selectbox( 'Choose Stat:', ['Number Services', 'Square Feet (000s)', 'Revenue']) df_programs, unique_progs = get_employee_daily_programs( df=df_rev, start=start, end=end, employee=chosen_emp, choice=stat_translations[summary_stat_choice][0], func=summary_stat_choice) st.dataframe(df_programs) if df_programs.empty: pass else: st.plotly_chart( make_daily_program_graph( df=df_programs, unique_programs=unique_progs, all_stats=True, title= f'''Daily {summary_stat_choice} for {chosen_emp} ({start.strftime('%m/%d/%Y')} - {end.strftime('%m/%d/%Y')})''', colors=px.colors.qualitative.Light24, stat=summary_stat_choice)) options = [] for column in df_programs.columns: if column.split('_')[0].upper() not in ['ALL', 'OTHER']: options.append( f"{column.split('_')[0].upper()}: {OTC_with_addons[column.split('_')[0].upper()]}" ) else: continue choice = st.selectbox('Choose Program:', options) st.dataframe(df_programs) df_programs[f'''Other_{''.join(summary_stat_choice.split(' '))}'''] = df_programs[ f'''All_{''.join(summary_stat_choice.split(' '))}'''] - df_programs[ f'''{choice.split(': ')[0].upper()}_{''.join(summary_stat_choice.split(' '))}'''] st.plotly_chart( make_daily_program_graph( df=df_programs, unique_programs=unique_progs, all_stats=False, title=f'''Daily Count Split by {choice.split(': ')[0]}''', choice=choice, stat=summary_stat_choice)) unique_progs.append('All') statzz = { 'ProgramCode': [], f'Total {summary_stat_choice}': [], f'Average Daily {summary_stat_choice}': [], f'Median Daily {summary_stat_choice}': [], f'Max Daily {summary_stat_choice}': [], 'Num. Production Days': [] } for i in range(len(unique_progs)): non_zero_stats = df_programs[ f'''{unique_progs[i]}_{''.join(summary_stat_choice.split(' '))}'''].loc[ df_programs[ f'''{unique_progs[i]}_{''.join(summary_stat_choice.split(' '))}'''] != 0] if len(non_zero_stats) == 0: continue else: statzz['ProgramCode'].append(unique_progs[i]) statzz[f'Total {summary_stat_choice}'].append( round(np.sum(non_zero_stats), 0)) statzz[f'Average Daily {summary_stat_choice}'].append( f'{round(np.mean(non_zero_stats), 2):,.0f}') statzz[f'Median Daily {summary_stat_choice}'].append( round(np.quantile(non_zero_stats, 0.5), 0)) statzz[f'Max Daily {summary_stat_choice}'].append( f'{np.max(non_zero_stats):,.0f}') statzz['Num. Production Days'].append( f'{len(non_zero_stats):,.0f}') df_allstuff = pd.DataFrame.from_dict(statzz) df_allstuff = df_allstuff.set_index('ProgramCode') stats_download = st.button('Download Dataframe as CSV', key=f'{chosen_emp} Stats') if stats_download: tmp_download_link = download_link( df_allstuff.reset_index(), f'''{chosen_emp.replace(' ', '_')}_{summary_stat_choice.lower().replace(' ', '_')}_stats_{start.strftime('%m%d%Y')}_{end.strftime('%m%d%Y')}.csv''', 'Download data.') st.markdown(tmp_download_link, unsafe_allow_html=True) st.markdown( f'''**Daily {summary_stat_choice} Statistics by Service for {chosen_emp} ({start.strftime('%m/%d/%Y')} - {end.strftime('%m/%d/%Y')})**''' ) st.dataframe(df_allstuff) st.markdown( f'''*NOTE: All calculations only take into account days where technician: {chosen_emp} did 1 or more services of said service.*''' ) st.markdown( f'''**{summary_stat_choice} Stats by Day for {chosen_emp} ({start.strftime('%m/%d/%Y')} - {end.strftime('%m/%d/%Y')})**''' ) st.dataframe(df_programs) single_day_choice = st.date_input('Inspect Particular Day:', dt.date(2021, 4, 1)) df_single_day = df_rev.loc[ (make_datetime(df_rev['DoneDateFormatted'], format='%Y-%m-%d') == make_datetime(single_day_choice)) & (df_rev['EmployeeName'].isin(turf_employees))] ''' df_single_day = df_single_day[['EmployeeName', 'ProgramCode', 'GrossSalesAmount', 'Size', 'Address', 'City']]\ .groupby(['EmployeeName', 'ProgramCode']).agg(list) ''' ''' st.dataframe(df_single_day[['EmployeeName', 'ProgramCode', 'GrossSalesAmount', 'Size', 'Address', 'City']]\ .sort_values(['EmployeeName', 'ProgramCode']).set_index(['EmployeeName', 'ProgramCode'])) ''' st.dataframe(df_single_day[['EmployeeName', 'ProgramCode', 'GrossSalesAmount', 'Size', 'Address', 'City']]\ .set_index(['EmployeeName', 'ProgramCode']).sort_index()) fig = make_subplots(rows=2, cols=2, subplot_titles=('# Services: All', '# Services: TD', 'Square Feet (000s): All', 'Square Feet (000s): TD')) fig.add_trace(go.Bar(x=df_programs.index, y=df_programs['All_NumberServices'], name='All: # Services', legendgroup='All', marker_color='#AB63FA'), row=1, col=1) fig.add_trace(go.Bar(x=df_programs.index, y=df_programs['TD_NumberServices'], name='TD: # Services', legendgroup='TD', marker_color='#FFA15A'), row=1, col=2) fig.add_trace(go.Bar(x=df_programs.index, y=df_programs['All_SquareFeet(000s)'], name='All: Square Feet (000s)', legendgroup='All', marker_color='#00CC96'), row=2, col=1) fig.add_trace( go.Bar( x=df_programs.index, y=df_programs['TD_SquareFeet(000s)'], name='TD: Square Feet (000s)', legendgroup='TD', #showlegend=False, marker_color='#AB63FA'), row=2, col=2) fig.update_layout( title= f'''**Daily Service Stats Split by All & Top Dressing for {chosen_emp} ({start.strftime('%m/%d/%Y')} - {end.strftime('%m/%d/%Y')})**''', barmode='group') st.plotly_chart(fig)
# Explosion calculations # predict the ratio of the velocities of the gliders using masses v_ratio = mass_g2 / mass_g1 # Calculate velocites if necessary if flag_length > 0: g1_explode_velocities = [ f.calc_velocity(flag_length, time) for time in g1_explode_times ] g2_explode_velocities = [ f.calc_velocity(flag_length, time) for time in g2_explode_times ] # Get explosion velocities and uncertainties g1_explode_vel_ave = f.get_average(g1_explode_velocities) g2_explode_vel_ave = f.get_average(g2_explode_velocities) g1_explode_vel_uncert = f.calc_uncertainty(g1_explode_velocities) g2_explode_vel_uncert = f.calc_uncertainty(g2_explode_velocities) ave_explode_vel_ratio = g1_explode_vel_ave / g2_explode_vel_ave # calculate KE before and after explosion explode_ke_0 = [f.calc_ke(mass_g1, vel) for vel in g1_explode_velocities] explode_ke_1 = [f.calc_ke(mass_g2, vel) for vel in g2_explode_velocities] explode_ke_tot = [sum(x) for x in zip(explode_ke_0, explode_ke_1)] # Elastic collision calculations # Calculate velocites if necessary if flag_length > 0: g1_elastic_vels_0 = [
# best fit data # input after creating the graph x1 = 1 # to avoid div by 0 y1 = 0 x2 = 0 y2 = 0 ################ # END DATA INPUT ################ # Calculate values radius_values = [radius_min, radius_max] omega_ave = functions.calc_omega(time_1) radius_ave = functions.get_average(radius_values) weight = functions.calc_weight(mass_w) slope = functions.calc_slope(x1, y1, x2, y2) # Calculate uncertainties omega_d = functions.calc_omega_uncertainty(time_1, time_d) # Run full rotational calculations rot_force = functions.calc_rotational_force(mass_h, omega_ave, radius_ave) rot_force_uncert = functions.calc_rotational_force_uncertainty( mass_h, omega_ave, radius_ave, mass_d, omega_d, radius_d) # Output values print("Mass: " + str(mass_h) + " +/- " + str(mass_d)) print("Omega: " + str(omega_ave) + " +/- " + str(omega_d)) print("Radius: " + str(radius_ave) + " +/- " + str(radius_d))
# USER PROGRAMMER import sys from functions import (get_raw_data, get_average, get_variance, get_scores, get_std_dev, get_evaluation) if not len(sys.argv) == 3 and not len(sys.argv) == 4: print("usage : python main.py <exel filename> <total_avrg> <sd = 20>") exit(-1) # exit()는 종료하라는 의미이며, -1을 쓴 것은 비정상적인 종료라는 것을 알려주는 것 # from functions --> functions.py 파일의 코드를 다 긁어다가 붙인것!! # avgv : arguments variable filename = sys.argv[1] total_avrg = float(sys.argv[2]) raw_data = get_raw_data(filename) scores = get_scores(raw_data) avrg = get_average(scores) variance = get_variance(scores, avrg) std_dev = get_std_dev(variance) if len(sys.argv) == 4: sd = float(sys.argv[3]) get_evaluation(avrg, variance, std_dev, total_avrg, sd) elif len(sys.argv) == 3: get_evaluation(avrg, variance, std_dev, total_avrg)