Пример #1
0
    
    net.eval()
    
    """
    
    Make prediction
    
    """
    
    inputs = X_all_tensor
    label = Y_all_tensor
    
    prediction = net(inputs)
    
    label_origin = label.numpy() * Y_norm_std + Y_norm_mean
    prediction_origin = prediction.detach().numpy() * Y_norm_std + Y_norm_mean
    
    df3 = pd.DataFrame(prediction_origin, columns=['Retail and Recreation', 'Grocery and Pharmacy', 'Parks', 
                                                   'Workplaces', 'Apple State Mobility Predict'])
    

    df3['date'] = pd_all['date'].iloc[plot_index].to_numpy()
    df3['State Name'] = pd_all['State Name'].iloc[plot_index].to_numpy()
    df3['WeekDay'] = pd_all['WeekDay'].iloc[plot_index].to_numpy()
    
    createFolder('./Mobility projection')
    df3.to_excel('./Mobility projection/Mobility_Projection_'+model_mark+'_YYG'+YYG_projection_Date+'_MLmodel_'+ML_Model+'_'+case+isopen+'.xlsx')
    PODA_Model['Google_Apple_Mobility_Projection_'+case]=df3
    
np.save(("./PODA_Model_"+today+".npy"), PODA_Model)
Пример #2
0
        self.seq.add_module('fc_1', nn.Linear(n_feature, nodes))
        self.seq.add_module('relu_1', nn.ReLU())

        for i in range(layers):
            self.seq.add_module('fc_' + str(i + 2), nn.Linear(nodes, nodes))
            self.seq.add_module('relu_' + str(i + 2), nn.ReLU())

        self.seq.add_module('fc_last', nn.Linear(nodes, n_output))

    def forward(self, input):

        return self.seq(input)


createFolder('./ML Files')
#nn.ReLU() -> nn.Tanh()

# load raw data
pd_all = PODA_Model['ML_Data']

# layers_number=4
# nodes_number = 20

# Assign the ML inputs
col_X_Name = [
    'US Daily Confirmed', 'US Daily Confirmed Dfdt',
    'US Daily Confirmed_shifted_1', 'US Daily Confirmed_shifted_3',
    'US Daily Confirmed_shifted_7', 'US Daily Confirmed_shifted_10',
    'US Daily Confirmed Dfdt_shifted_1', 'US Daily Confirmed Dfdt_shifted_3',
    'US Daily Confirmed Dfdt_shifted_7', 'US Daily Confirmed Dfdt_shifted_10',
    fig1 = plt.figure(figsize=(6, 5))
    ax1 = fig1.add_subplot(1, 1, 1)
    ax1.plot(x.index,
             x['Google Fuel Demand Predict'],
             '-',
             label='Google Mobility (Predicted')
    ax1.plot(x.index,
             x['Apple Fuel Demand Predict'],
             '--g',
             label='Apple Mobility (Predicted')
    ax1.plot(fuel_Demand_EIA.index - pd.DateOffset(days=day_Shift),
             fuel_Demand_EIA['Gasoline'],
             '--s',
             label='EIA Weekly Fuel Demand')
    ax1.set_xlabel('Date')
    ax1.set_ylabel('Daily Motor Gasoline Demand (thousand BPD)')
    ax1.set_ylim(4000, 10000)
    ax1.set_title('Fuel Demand: ' + model_mark + case + ' YYG:' +
                  YYG_projection_Date + ' MLmodel:' + ML_Model + isopen)
    ax1.legend()

    # if (case == 'mean') & (isopen ==''):
    #     data_save.to_excel('C:/Users/hexx/Box Sync/Energy-COVID-19/Data for Website/Mobility_State_'+YYG_projection_Date+case+'.xlsx')

np.save(("./PODA_Model_" + today + ".npy"), PODA_Model)

createFolder('./PODA_Model')
copyfile('./PODA_Model_' + today + '.npy',
         './PODA_Model/PODA_Model_' + today + '.npy')
                                  header=0)

PODA_Model['StateName_StateCode'] = df_StateName_Code
'''
Get Apple and Google Mobility Data
'''
# Get Apple mobility data
print('Read Apple Data')
'''
Need to check Apple Website to find the right url path
'''
df_Apple_Mobility = pd.read_csv(
    "https://covid19-static.cdn-apple.com/covid19-mobility-data/2016HotfixDev16/v3/en-us/applemobilitytrends-"
    + Apple_File_Date + ".csv")

createFolder('./Mobility Google-Apple')
df_Apple_Mobility.to_csv('./Mobility Google-Apple/applemobilitytrends-' +
                         Apple_File_Date + '.csv')

apple_US = df_Apple_Mobility[
    (df_Apple_Mobility['region'] == 'United States')
    & (df_Apple_Mobility['transportation_type'] == 'driving')].transpose()
apple_US = apple_US.iloc[6:, ]
apple_US.rename(columns={apple_US.columns[0]: "Apple US"}, inplace=True)
apple_US.index = pd.to_datetime(apple_US.index)

############################################################################################################################
# get Google Mobility data
print('Read Google Mobility Data')
df_Google = pd.read_csv(
    'https://www.gstatic.com/covid19/mobility/Global_Mobility_Report.csv?cachebust=911a386b6c9c230f',
    retu = EIA_fuel['least_square'].sum()
    return retu


x0 = [1, 1, 1, 1, 1, 1, 1, 1, 0.5, 0, 0]
bounds = Bounds([0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0, 0, 0],
                [1.2, 1.2, 1.2, 1.2, 1.2, 1, 1.3, 1.3, 1, 0.05, 10])

# res = minimize(min_func, x0, method='trust-constr', options={'verbose': 1, 'disp': True}, bounds=bounds)
res = minimize(min_func, x0, method='SLSQP', bounds=bounds)
print(res.x)
print(res.fun)

a = res.x

createFolder('./Fuel Demand Projection')
np.savetxt('./Fuel Demand Projection/Fuel_mobility_factor' +
           google_Mobility_Day + '.csv',
           a,
           delimiter=",")

PODA_Model['Google_Mobility_EIA_Factor'] = a
np.save(("./PODA_Model_" + today + ".npy"), PODA_Model)

# fig1 = plt.figure(figsize=(6, 5))
# ax1 = fig1.add_subplot(1, 1, 1)
# ax1.plot(x.index, x['fuel factor']*baseline/100, '-o', label=['pred'])
# ax1.plot(fuel_Demand_EIA.index-pd.DateOffset(days=day_Shift), fuel_Demand_EIA['Gasoline']*demand_factor, '--s', label='EIA')
# ax1.set_xlabel('Date')
# ax1.set_ylabel('Y')
# ax1.set_title('fuel demand: shift:'+str(day_Shift)+' days')
from sklearn import datasets, linear_model
import matplotlib.pyplot as plt
import copy
from myFunctions import def_add_datashift, createFolder

today_x = pd.to_datetime('today')
today =today_x.strftime("%Y-%m-%d")

# today = '2020-08-07'

'''
Please download the file from https://www.covidanalytics.io/projections
'''
MIT_file_name = 'MIT_covid_analytics_projections_'+today+'.csv'

createFolder('./Mobility projection')
        
scenario_cases = ['mean']      #'upper', 'lower',
# shiftDay = 16   #YYG model shift between new infected and confirmed

# today_x = pd.to_datetime('today')
# today =today_x.strftime("%Y-%m-%d")

PODA_Model = np.load(("./PODA_Model_"+today+".npy"),allow_pickle='TRUE').item()


MIT_Data = pd.read_csv(MIT_file_name, header=0)

MIT_Data = MIT_Data[(MIT_Data['Country'] == 'US') & 
                    (MIT_Data['Province']!='District of Columbia')]