Example #1
0
def funTest():
    root_path = Common_func.UsePlatform()
    station_list = pandas.read_csv(os.path.join(root_path, 'HHHstations.csv'))
    station_stage_list =pandas.read_csv(os.path.join(root_path,'stations_cx_back.txt'),' ')
    result_file = (os.path.join(root_path, 'stations_heat_stress_hours.txt'))
    for index, lonlatdata in station_list.iterrows():
        lon = float(lonlatdata['Longitude'])
        lat = float(lonlatdata['Latitude'])
        stationID = lonlatdata['StationID']
        station_name = lonlatdata['StationName']
        for i in range(2001, 2014):
            year = str(i)
            stage_start_day = int(station_stage_list[(station_stage_list['year'] == i) & (station_stage_list['stationID'] == stationID)]['cx_start'].values[0])
            stage_end_day = int(station_stage_list[(station_stage_list['year'] == i) & (station_stage_list['stationID'] == stationID)]['cx_end'].values[0])
            models.base_stations_data.get_heat_stress_hours_every_station(root_path,stationID,station_name,lon,lat,i,stage_start_day,stage_end_day,340,result_file)
Example #2
0
def funcTest():
    root_path = Common_func.UsePlatform()
    data_file=root_path + 'grid_station_night.txt'
    data = pd.read_csv(data_file,",")
    data = data[(data['gridval'] > 0) & (data['stationval'] > 0)]
    X = data['gridval'].values
    y = data['stationval'].values
    stationID_list = data['stationID'].unique()
    title = '黄淮海地区2003-2018年夜间遥感-气象温度散点图'
    for stationID in stationID_list:
        lon,lat = stations.Station_ETL.get_lonlat_by_stationID(stationID)
        station_data = data[data['stationID'] == stationID]
        station_name = stations.Station_ETL.get_station_name_by_stationID(stationID)
        X = station_data['gridval'].values
        y = station_data['stationval'].values
        a, b, RMSE, R2 = multi_linear_fit(X,y)
        print (stationID,station_name,lon,lat,R2)
Example #3
0
def count_pecent():
    im_geotrans = ''
    im_proj = ''
    amount_data = np.zeros((1221, 2224))
    data_path = os.path.join(Common_func.UsePlatform(), 'results', 'nights')
    for root, dirs, files in os.walk(data_path):
        for file in files:
            im_data, im_geotrans, im_proj = Modis_IO.read_img(
                os.path.join(data_path, file), 1)
            amount_data = amount_data + im_data
        amount_data = np.where(amount_data >= 0,
                               (amount_data / 16).astype(float), np.nan)
        np.around(amount_data, decimals=2)
        Modis_IO.write_img(os.path.join(data_path, '2003-2018.tif'), im_proj,
                           im_geotrans, amount_data)


#count_pecent()
Example #4
0
def RHF_cluster():
    root_path = Common_func.UsePlatform()
    im_proj = ''
    im_geotrans = ''
    data_path = os.path.join(root_path, 'results', 'days')
    amount_data = []
    im_data = []
    for i in range(2003, 2019):
        file = str(i) + '.tif'
        im_data, im_geotrans, im_proj = Modis_IO.read_img(
            os.path.join(data_path, file), 1)
        im_data = np.where(im_data > 0, im_data, np.nan)
        amount_data.append(im_data.flatten())
    #amount_data = np.array(amount_data).T
    #amount_data = np.nan_to_num(amount_data)
    data = pd.DataFrame(amount_data).add_prefix("col")
    data.profile_report(title='Pandas Profiling Report').to_file(
        output_file="output.html")
Example #5
0
def FuncTest():
    root_path = Common_func.UsePlatform()
    starttime = datetime.datetime.now()
    begin_year = 2003
    end_year = 2019
    im_proj = ''
    im_geotrans = ''
    for i in range(begin_year, end_year):
        year = str(i)
        # RHF(root_path, year)
        # EveryPoint(root_path, year)

    # 计算函数
    # year = str('2005')
    # 统计做图
    # results(root_path, year)

    # 气象点和格网点的关系方法
    #    every_station(root_path, year)

    # 计算时间
    endtime = datetime.datetime.now()
    print((endtime - starttime).seconds)
Example #6
0
    data.profile_report(title='Pandas Profiling Report').to_file(
        output_file="output.html")
    #amount_data  = np.nanmean(amount_data, axis=0).reshape(1221,2224)
    #amount_data = feature_cluster(im_data)
    #Modis_IO.write_img(os.path.join(data_path, '2003-2018_mean.tif'), im_proj, im_geotrans, amount_data)


#RHF_cluster()

#data = os.path.join(Common_func.UsePlatform(),'stations','hour-sum-mask.tif')
#im_data, im_geotrans, im_proj = Modis_IO.read_img(data, 1)
#im_data = np.where(im_data>0,im_data,np.nan)
#im_data = feature_cluster(im_data)
#Modis_IO.write_img(os.path.join(Common_func.UsePlatform(), 'stations','hour-sum-mask_cluster.tif'),im_proj,im_geotrans,im_data)

station_hours = os.path.join(Common_func.UsePlatform(), 'stations',
                             'hour-sum-mask_cluster_final.tif')
modis_hours = os.path.join(Common_func.UsePlatform(), 'results', 'RHD',
                           '2003-2018.tif')
im_data_S, im_geotrans, im_proj = Modis_IO.read_img(station_hours, 1)
im_data_M, im_geotrans, im_proj = Modis_IO.read_img(modis_hours, 1)
im_data_S = im_data_S.flatten()
im_data_M = im_data_M.flatten()
#im_data_S = np.where(im_data_S<-2,np.nan,im_data_S)
final_result = np.zeros(im_data_M.shape).flatten()
row = 1221
col = 2224
lenth = row * col
for i in range(0, lenth):
    #tem = im_data_M[i]
    if np.isnan(im_data_M[i]):
Example #7
0
                            round(stage_end_day)) + ' ' + str(
                                day_num) + ' ' + str(T_max) + ' ' + str(
                                    format(heat_stress_hours, '2f'))
                print(row)
                Modis_IO.write_txt(result_file, row)
            except:
                continue

    row = str(stationID) + ' ' + station_name + ' ' + str(lon) + ' ' + str(
        lat) + ' ' + str(year) + ' ' + str(sum_heat_days) + ' ' + str(
            format(sum_heat_hours, '2f'))
    file = str(year) + '.txt'
    Modis_IO.write_txt(file, row)


root_path = Common_func.UsePlatform()


def Cal(root_path):
    for i in range(2010, 2019):
        year = i
        lonlatlist = pd.read_csv(os.path.join(root_path, 'HHHstations.csv'))
        for index, lonlatdata in lonlatlist.iterrows():
            try:
                lon = float(lonlatdata['Longitude'])
                lat = float(lonlatdata['Latitude'])
                stationID = lonlatdata['StationID']
                station_name = stations.Station_ETL.get_station_name_by_stationID(
                    stationID)
                cx_data = pd.read_csv(Common_func.cx, " ")
                stage_start_day = 152
Example #8
0
def scatter_3D(cluster_feature):
    fig = plt.figure()
    ax = Axes3D(fig)

    ax.scatter(cluster_feature[:, 0],
               cluster_feature[:, 1],
               cluster_feature[:, 2] % 100,
               edgecolor='k')

    ax.set_xlabel('grid_value')
    ax.set_ylabel('station_value')
    ax.set_zlabel('day')
    fig.show()


data_path = os.path.join(Common_func.UsePlatform(), 'grid_station_day.txt')
orig_data = pandas.read_csv(data_path, ',')
# print(len(orig_data))
grid_station_data = orig_data[(orig_data['gridval'] > 0)
                              & (orig_data['stationval'] > 0)]
# print(len(grid_station_data), len(grid_station_data)/len(orig_data))
cluster_feature = grid_station_data[['gridval', 'stationval', 'date']].values

station_list = grid_station_data['stationID'].unique()

for stationid in station_list:

    lon, lat = stations.Station_ETL.get_lonlat_by_stationID(stationid)
    sum_days = len(orig_data[(orig_data['stationID'] == stationid)])
    val_days = len(
        grid_station_data[(grid_station_data['stationID'] == stationid)])