if col_nameT2 not in data_Terra_Iterative.columns:
                # 补全缺失nan列
                data_Terra_Iterative[col_nameT2] = np.nan
    else:
        data_Terra_Iterative.columns = data_Terra_N0.columns  # 重设列名
    for CCCOLT in data_Terra_Iterative.columns:
        if 'add' in CCCOLT:
            del data_Terra_Iterative[CCCOLT]

    data_Terra_Iterative = pd.DataFrame(data_Terra_Iterative)
    data_Terra_Iterative = data_Terra_Iterative.set_index(data_Terra.index)
    data_Terra_Iterative.columns = ['NDVI_0']
    # data_Terra_Iterative["日期合并用"] = data_Terra_Iterative.index

    # 对结果的0值取np.nan
    data_Aqua_KNN.replace(0, np.nan, inplace=True)
    data_Terra_KNN.replace(0, np.nan, inplace=True)
    data_Aqua_ewm.replace(0, np.nan, inplace=True)
    data_Terra_ewm.replace(0, np.nan, inplace=True)
    data_Aqua_IDW.replace(0, np.nan, inplace=True)
    data_Terra_IDW.replace(0, np.nan, inplace=True)
    data_Aqua_Iterative.replace(0, np.nan, inplace=True)
    data_Terra_Iterative.replace(0, np.nan, inplace=True)
    """
    data_KNN = pd.merge(data_Terra_KNN,data_Aqua_KNN,how='right',on='日期合并用')
    data_ewm = pd.merge(data_Terra_ewm,data_Aqua_ewm,how='right',on='日期合并用')
    data_IDW = pd.merge(data_Terra_IDW,data_Aqua_IDW,how='right',on='日期合并用')
    data_Iterative = pd.merge(data_Terra_Iterative,data_Aqua_Iterative,how='right',on='日期合并用')
    """
    data_KNN = pd.concat([data_Terra_KNN, data_Aqua_KNN], axis=1, sort=True)
    data_ewm = pd.concat([data_Terra_ewm, data_Aqua_ewm], axis=1, sort=True)
                    res_output = np.sum(np.array(res_list))
                    try:
                        input_data[pollution][indx] = res_output
                    except Exception as e:
                        print("缺失严重, 插值未定义:", e)
        return input_data

    data_pollution_IDW = get_IDW(data_pollution)

    # 空间全局: 迭代函数法,缺失特征作为y,其他特征作为x
    data_pollution_Iterative = IterativeImputer(
        max_iter=10).fit_transform(data_pollution)
    data_pollution_Iterative = pd.DataFrame(data_pollution_Iterative)

    # 对结果的0值取np.nan
    data_pollution_KNN.replace(0, np.nan, inplace=True)
    data_pollution_ewm.replace(0, np.nan, inplace=True)
    data_pollution_IDW.replace(0, np.nan, inplace=True)
    data_pollution_Iterative.replace(0, np.nan, inplace=True)

    # 合并相同方法的结果
    data_pollution_KNN = data_pollution_KNN.set_index(data_pollution.index)
    data_pollution_KNN.columns = data_pollution.columns
    # data_pollution_KNN["日期合并用"] = data_pollution_KNN.index
    data_pollution_ewm = data_pollution_ewm.set_index(data_pollution.index)
    data_pollution_ewm.columns = data_pollution.columns
    # data_pollution_ewm["日期合并用"] = data_pollution_ewm.index
    data_pollution_IDW = data_pollution_IDW.set_index(data_pollution.index)
    data_pollution_IDW.columns = data_pollution.columns
    # data_pollution_IDW["日期合并用"] = data_pollution_IDW.index
    data_pollution_Iterative = data_pollution_Iterative.set_index(
def get4method(xx152):
    # 地理距离
    def geo_distance(lng1_df, lat1_df, lng2_df, lat2_df):
        lng1_df, lat1_df, lng2_df, lat2_df = map(
            radians, [lng1_df, lat1_df, lng2_df, lat2_df])
        d_lon = lng2_df - lng1_df
        d_lat = lat2_df - lat1_df
        a = sin(d_lat / 2) ** 2 + cos(lat1_df) * \
            cos(lat2_df) * sin(d_lon / 2) ** 2
        dis = 2 * asin(sqrt(a)) * 6371.393 * 1000  # 地球半径
        return dis  # 输出结果的单位为“米”

    # 空间局部: 难以插值是因为大部分地区及其临近地区同一污染物值可能会一同缺失.
    def get_IDW(input_data):
        for darksky_weather in [
            'apparentTemperatureHigh',
            'apparentTemperatureLow',
            'apparentTemperatureMax',
            'apparentTemperatureMin',
            'cloudCover',
            'dewPoint',
            'humidity',
            'moonPhase',
            'ozone',
            'precipAccumulation',
            'precipIntensity',
            'precipIntensityMax',
            'pressure',
            'sunriseTime',
            'sunsetTime',
            'temperatureHigh',
            'temperatureLow',
            'temperatureMax',
            'temperatureMin',
            'uvIndex',
            'visibility',
            'windBearing',
            'windGust',
            'windSpeed',
            'apparentTemperature',
                'temperature']:  # 确定污染物列
            for indx in input_data.index:  # 获取索引
                res_list = []
                weight_list = []
                if pd.isnull(input_data[darksky_weather][indx]):  # 开始循环
                    for item_idw in JCZ_info["监测站"]:  # 获取距离,定义权重
                        if item_idw != name:
                            lng2 = JCZ_info[JCZ_info["监测站"] == item_idw]["经度"]
                            lat2 = JCZ_info[JCZ_info["监测站"] == item_idw]["纬度"]
                            dis_1 = geo_distance(
                                lng1, lat1, lng2, lat2)  # 两站地理距离
                            if dis_1 <= 50000:
                                data_to_add_in_1 = pd.read_excel(
                                    input_file_path_darksky_weather + item_idw + ".xlsx")
                                data_to_add_in_1 = data_to_add_in_1.set_index(
                                    "日期")  # 需要日期为索引,方便下面添加
                                if indx in data_to_add_in_1.index and pd.notnull(
                                        data_to_add_in_1[darksky_weather][indx]):
                                    weight_list.append(dis_1)
                    weight_sum = np.sum(np.array(weight_list))  # 总距离,权重分母
                    for item_idw_2 in JCZ_info["监测站"]:  # 分配权重
                        if item_idw_2 != name:
                            lng2 = JCZ_info[JCZ_info["监测站"]
                                            == item_idw_2]["经度"]
                            lat2 = JCZ_info[JCZ_info["监测站"]
                                            == item_idw_2]["纬度"]
                            dis_1 = geo_distance(
                                lng1, lat1, lng2, lat2)  # 两站地理距离
                            if dis_1 <= 50000:
                                data_to_add_in = pd.read_excel(
                                    input_file_path_darksky_weather + item_idw_2 + ".xlsx")
                                data_to_add_in = data_to_add_in.set_index(
                                    "日期")  # 需要日期为索引,方便下面添加
                                if indx in data_to_add_in.index and pd.notnull(
                                        data_to_add_in[darksky_weather][indx]):
                                    res = (dis_1 / weight_sum) * \
                                        data_to_add_in[darksky_weather][indx]
                                    res_list.append(res)
                                    # print("已添加单元格插值:", res)
                    # 上下公式结果若为nan,并不会报错.会让最后的插值为nan.
                    res_output = np.sum(np.array(res_list))
                    try:
                        input_data.loc[indx, darksky_weather] = res_output
                    except Exception as e:
                        print("缺失严重, 插值未定义:", e)
        print("[IDW]Finished.")
        return input_data

    # 监测站
    jcz_152 = pd.read_excel(
        "D:\\毕业论文程序\\MODIS\\坐标\\站点列表-2018.11.08起_152.xlsx",
        sheet_name=xx152)
    jcz_152["监测站名称_152"] = jcz_152["城市"] + "-" + jcz_152["监测点名称"]
    error_list = []
    for input_file_name in jcz_152["监测站名称_152"]:
        input_file_name = input_file_name + ".xlsx"
        # if input_file_name in saved_list:
        # print("已经完成:", input_file_name, xx152)
        # continue
        print("========正在计算%s========" % input_file_name)
        try:
            # 读取数据源
            data_darksky_weather = pd.read_excel(
                input_file_path_darksky_weather + input_file_name)
            data_darksky_weather = data_darksky_weather.set_index('日期')
            # 时间局部:最近邻KNN,是使用K行都具有全部特征的样本,使用其他特征的均方差进行加权,判断最接近的时间点.
            data_darksky_weather_KNN = KNN(
                k=7).fit_transform(data_darksky_weather)
            data_darksky_weather_KNN = pd.DataFrame(data_darksky_weather_KNN)
            # 时间全局: 平滑,常用于股市;创建新的数据框,不会覆盖原始数据
            data_darksky_weather_ewm_mid = pd.DataFrame.ewm(
                self=data_darksky_weather,
                com=0.5,
                ignore_na=True,
                adjust=True).mean()
            data_darksky_weather_ewm = copy.deepcopy(
                data_darksky_weather)  # 避免覆盖原始数据
            for columname in data_darksky_weather_ewm.columns:
                if data_darksky_weather[columname].count() != len(
                        data_darksky_weather):
                    loc = data_darksky_weather[columname][data_darksky_weather[columname].isnull(
                    ).values].index.tolist()
                    for nub in loc:
                        data_darksky_weather_ewm.loc[nub,
                                                     columname] = data_darksky_weather_ewm_mid.loc[nub,
                                                                                                   columname]

            # 空间
            data_darksky_weather_to_IDW = copy.deepcopy(data_darksky_weather)
            name = str(input_file_name).replace(".xlsx", "")  # 定义相关变量
            lng1 = JCZ_info[JCZ_info["监测站"] == name]["经度"]
            lat1 = JCZ_info[JCZ_info["监测站"] == name]["纬度"]
            # 空间局部: IDW,反距离插值
            data_darksky_weather_IDW = get_IDW(data_darksky_weather_to_IDW)

            # 空间全局: 迭代回归,缺失特征作为y,其他特征作为x
            merge_list = []  # 同一监测站,不同污染物
            for darksky_weather_Iterative in [
                'apparentTemperatureHigh',
                'apparentTemperatureLow',
                'apparentTemperatureMax',
                'apparentTemperatureMin',
                'cloudCover',
                'dewPoint',
                'humidity',
                'moonPhase',
                'ozone',
                'precipAccumulation',
                'precipIntensity',
                'precipIntensityMax',
                'pressure',
                'sunriseTime',
                'sunsetTime',
                'temperatureHigh',
                'temperatureLow',
                'temperatureMax',
                'temperatureMin',
                'uvIndex',
                'visibility',
                'windBearing',
                'windGust',
                'windSpeed',
                'apparentTemperature',
                    'temperature']:
                # 合并部分
                numb = 0
                data_darksky_weather_to_Iterative = copy.deepcopy(data_darksky_weather[[darksky_weather_Iterative]])
                data_darksky_weather_to_Iterative = data_darksky_weather_to_Iterative.reset_index()
                for item in JCZ_info["监测站"]:  # 不同于气溶胶插值方法
                    if item != name:
                        # 添加的文件
                        data_to_add_in_to_Iterative = pd.read_excel(
                            input_file_path_darksky_weather + item + ".xlsx")
                        # 添加的列名
                        data_to_Iterative_concat = data_to_add_in_to_Iterative[[darksky_weather_Iterative, '日期']]
                        data_to_Iterative_concat.columns = [darksky_weather_Iterative + "_add%s" % numb,
                                                            '日期']  # 如果有五个临近, 则NDVI1-NDVI5

                        data_darksky_weather_to_Iterative = pd.merge(data_darksky_weather_to_Iterative,
                                                                     data_to_Iterative_concat,
                                                                     how='left',
                                                                     on='日期')
                        data_darksky_weather_to_Iterative = data_darksky_weather_to_Iterative.set_index('日期')
                    numb += 1
                # 迭代部分
                count_1 = 0
                for value_1 in data_darksky_weather_to_Iterative.sum():
                    if value_1 != 0:
                        count_1 += 1
                if count_1 > 1:  # 至少两个非空列才可以计算
                    data_darksky_weather_Iterative_to_merge = IterativeImputer(
                        max_iter=100).fit_transform(data_darksky_weather_to_Iterative)
                else:
                    data_darksky_weather_Iterative_to_merge = copy.deepcopy(
                        data_darksky_weather_to_Iterative)
                data_darksky_weather_Iterative_to_merge = pd.DataFrame(
                    data_darksky_weather_Iterative_to_merge)  # 格式转换
                data_darksky_weather_Iterative_to_merge = data_darksky_weather_Iterative_to_merge.set_index(
                    data_darksky_weather_to_Iterative.index)  # ok
                if len(data_darksky_weather_Iterative_to_merge.columns) < len(data_darksky_weather_to_Iterative.columns):
                    reset_col_name_list = []  # 对非nan列先命名
                    for col_name in data_darksky_weather_to_Iterative.columns:
                        if np.max(data_darksky_weather_to_Iterative[col_name]) > 0:
                            reset_col_name_list.append(col_name)
                    data_darksky_weather_Iterative_to_merge.columns = reset_col_name_list

                    for col_name in data_darksky_weather_to_Iterative.columns:  # 对缺失的nan列补充
                        if col_name not in data_darksky_weather_Iterative_to_merge.columns:
                            # 补全缺失nan列
                            data_darksky_weather_Iterative_to_merge[col_name] = np.nan
                else:
                    data_darksky_weather_Iterative_to_merge.columns = data_darksky_weather_to_Iterative.columns  # 重设列名
                for numb_del in range(numb):
                    if darksky_weather_Iterative + "_add%s" % numb_del not in data_darksky_weather_Iterative_to_merge.columns:
                        continue
                    else:
                        del data_darksky_weather_Iterative_to_merge[darksky_weather_Iterative +
                                                                    "_add%s" %
                                                                    numb_del]
                # 插补后的该监测点的气象特征列, 仅一列, 循环添加其他特征
                merge_list.append(data_darksky_weather_Iterative_to_merge)
            data_darksky_weather_Iterative_1 = pd.concat(
                merge_list, axis=1, sort=False)

            # 对结果的0值取np.nan
            data_darksky_weather_KNN.replace(0, np.nan, inplace=True)
            data_darksky_weather_ewm.replace(0, np.nan, inplace=True)
            data_darksky_weather_IDW.replace(0, np.nan, inplace=True)
            data_darksky_weather_Iterative_1.replace(0, np.nan, inplace=True)

            # 合并相同方法的结果
            data_darksky_weather_KNN = data_darksky_weather_KNN.set_index(
                data_darksky_weather.index)
            data_darksky_weather_KNN.columns = data_darksky_weather.columns
            data_darksky_weather_ewm = data_darksky_weather_ewm.set_index(
                data_darksky_weather.index)
            data_darksky_weather_ewm.columns = data_darksky_weather.columns
            data_darksky_weather_IDW = data_darksky_weather_IDW.set_index(
                data_darksky_weather.index)
            data_darksky_weather_IDW.columns = data_darksky_weather.columns
            data_darksky_weather_Iterative = data_darksky_weather_Iterative_1.set_index(
                data_darksky_weather.index)
            data_darksky_weather_Iterative.columns = data_darksky_weather.columns

            # 合并不同方法为一个文件

            sheet_name = ["KNN", "ewm", "IDW", "Iterative"]
            sheet_name_count = 0
            writer = pd.ExcelWriter(
                merge_output_file_path + '%s.xlsx' %
                (input_file_name.replace(
                    ".xlsx", "")))
            for methods_output in [
                    data_darksky_weather_KNN,
                    data_darksky_weather_ewm,
                    data_darksky_weather_IDW,
                    data_darksky_weather_Iterative]:
                methods_output.to_excel(
                    writer, sheet_name=sheet_name[sheet_name_count])
                sheet_name_count = 1 + sheet_name_count
            writer.save()

        except Exception as e:
            print(input_file_name, "发生错误:", e)
Example #4
0
def get4method(xx152):
    # 地理距离
    def geo_distance(lng1_df, lat1_df, lng2_df, lat2_df):
        lng1_df, lat1_df, lng2_df, lat2_df = map(
            radians, [lng1_df, lat1_df, lng2_df, lat2_df])
        d_lon = lng2_df - lng1_df
        d_lat = lat2_df - lat1_df
        a = sin(d_lat / 2)**2 + cos(lat1_df) * cos(lat2_df) * sin(d_lon / 2)**2
        dis = 2 * asin(sqrt(a)) * 6371.393 * 1000  # 地球半径
        return dis  # 输出结果的单位为“米”

    # 空间局部: 难以插值是因为大部分地区及其临近地区同一污染物值可能会一同缺失.
    def get_IDW(input_data):
        for pollution in ["PM25", "PM10", "SO2", "NO2", "O3", "CO"]:  # 确定污染物列
            for indx in input_data.index:  # 获取索引
                res_list = []
                weight_list = []
                if pd.isnull(input_data[pollution][indx]):  # 开始循环
                    for item_idw in JCZ_info["监测站"]:  # 获取距离,定义权重
                        if item_idw != name:
                            lng2 = JCZ_info[JCZ_info["监测站"] == item_idw]["经度"]
                            lat2 = JCZ_info[JCZ_info["监测站"] == item_idw]["纬度"]
                            dis_1 = geo_distance(lng1, lat1, lng2,
                                                 lat2)  # 两站地理距离
                            if dis_1 <= 50000:
                                data_to_add_in_1 = pd.read_excel(
                                    input_file_path_pollution + item_idw +
                                    ".xlsx")
                                data_to_add_in_1 = data_to_add_in_1.set_index(
                                    "日期")  # 需要日期为索引,方便下面添加
                                if indx in data_to_add_in_1.index and pd.notnull(
                                        data_to_add_in_1[pollution][indx]):
                                    weight_list.append(dis_1)
                    weight_sum = np.sum(np.array(weight_list))  # 总距离,权重分母
                    for item_idw_2 in JCZ_info["监测站"]:  # 分配权重
                        if item_idw_2 != name:
                            lng2 = JCZ_info[JCZ_info["监测站"] ==
                                            item_idw_2]["经度"]
                            lat2 = JCZ_info[JCZ_info["监测站"] ==
                                            item_idw_2]["纬度"]
                            dis_1 = geo_distance(lng1, lat1, lng2,
                                                 lat2)  # 两站地理距离
                            if dis_1 <= 50000:
                                data_to_add_in = pd.read_excel(
                                    input_file_path_pollution + item_idw_2 +
                                    ".xlsx")
                                data_to_add_in = data_to_add_in.set_index(
                                    "日期")  # 需要日期为索引,方便下面添加
                                if indx in data_to_add_in.index and pd.notnull(
                                        data_to_add_in[pollution][indx]):
                                    res = (dis_1 / weight_sum
                                           ) * data_to_add_in[pollution][indx]
                                    res_list.append(res)
                                    # print("已添加单元格插值:", res)
                    res_output = np.sum(
                        np.array(res_list))  # 上下公式结果若为nan,并不会报错.会让最后的插值为nan.
                    try:
                        input_data[pollution][indx] = res_output
                    except Exception as e:
                        print("缺失严重, 插值未定义:", e)
        print("[IDW]Finished.")
        return input_data

    # 监测站
    jcz_152 = pd.read_excel("D:\\毕业论文程序\\MODIS\\坐标\\站点列表-2018.11.08起_152.xlsx",
                            sheet_name=xx152)
    jcz_152["监测站名称_152"] = jcz_152["城市"] + "-" + jcz_152["监测点名称"]
    for input_file_name in jcz_152["监测站名称_152"]:
        input_file_name = input_file_name + ".xlsx"
        if input_file_name in saved_list:
            print("已经完成:", input_file_name, xx152)
            continue
        print("========正在计算%s========" % input_file_name)
        # 读取数据源
        data_pollution = pd.read_excel(input_file_path_pollution +
                                       input_file_name)
        data_pollution = data_pollution.set_index('日期')
        # 时间局部:最近邻KNN,是使用K行都具有全部特征的样本,使用其他特征的均方差进行加权,判断最接近的时间点.
        data_pollution_KNN = KNN(k=7).fit_transform(data_pollution)
        data_pollution_KNN = pd.DataFrame(data_pollution_KNN)
        # 时间全局: 平滑,常用于股市;创建新的数据框,不会覆盖原始数据
        data_pollution_ewm_mid = pd.DataFrame.ewm(self=data_pollution,
                                                  com=0.5,
                                                  ignore_na=True,
                                                  adjust=True).mean()
        data_pollution_ewm = copy.deepcopy(data_pollution)  # 避免覆盖原始数据
        for columname in data_pollution_ewm.columns:
            if data_pollution[columname].count() != len(data_pollution):
                loc = data_pollution[columname][
                    data_pollution[columname].isnull().values ==
                    True].index.tolist()
                for nub in loc:
                    data_pollution_ewm[columname][
                        nub] = data_pollution_ewm_mid[columname][nub]

        # 空间
        data_pollution_to_IDW = copy.deepcopy(data_pollution)
        name = str(input_file_name).replace(".xlsx", "")  # 定义相关变量
        lng1 = JCZ_info[JCZ_info["监测站"] == name]["经度"]
        lat1 = JCZ_info[JCZ_info["监测站"] == name]["纬度"]
        # 空间局部: IDW,反距离插值
        data_pollution_IDW = get_IDW(data_pollution_to_IDW)
        # 空间全局: 迭代回归,缺失特征作为y,其他特征作为x
        merge_list = []  # 同一监测站,不同污染物
        for pollution_Iterative in ["PM25", "PM10", "SO2", "NO2", "O3", "CO"]:
            concat_list = []  # 用于添加同污染物,不同监测站的数值
            numb = 0
            for item in JCZ_info["监测站"]:  # 不同于气溶胶插值方法
                if item != name:
                    lng_2 = JCZ_info[JCZ_info["监测站"] == item]["经度"]
                    lat_2 = JCZ_info[JCZ_info["监测站"] == item]["纬度"]
                    dis_2 = geo_distance(lng1, lat1, lng_2, lat_2)  # 两站地理距离
                    if dis_2 <= 50000:  # 合并距离内的临近监测站
                        data_to_add_in_to_Iterative = pd.read_excel(
                            input_file_path_pollution + item + ".xlsx")
                        data_to_add_in_to_Iterative = data_to_add_in_to_Iterative.set_index(
                            "日期")
                        data_to_Iterative_concat = data_to_add_in_to_Iterative[
                            pollution_Iterative]
                        data_to_Iterative_concat = pd.DataFrame(
                            data_to_Iterative_concat)
                        data_to_Iterative_concat.columns = [
                            pollution_Iterative + "_add%s" % numb
                        ]
                        concat_list.append(data_to_Iterative_concat)
                        numb += 1
            if len(concat_list) > 0:  # 合并本身与临近
                data_to_Iterative = pd.concat(concat_list, axis=1, sort=False)
                data_to_Iterative = pd.concat(
                    [data_pollution[pollution_Iterative], data_to_Iterative],
                    axis=1,
                    sort=False)
            else:
                data_to_Iterative = data_pollution[pollution_Iterative].copy()
                data_to_Iterative = pd.DataFrame(data_to_Iterative)
                data_to_Iterative.columns = [pollution_Iterative]  # 本身
            data_pollution_Iterative_to_merge = IterativeImputer(
                max_iter=10).fit_transform(data_to_Iterative)
            data_pollution_Iterative_to_merge = pd.DataFrame(
                data_pollution_Iterative_to_merge)
            data_pollution_Iterative_to_merge = data_pollution_Iterative_to_merge.set_index(
                data_to_Iterative.index)
            data_pollution_Iterative_to_merge.columns = data_to_Iterative.columns
            for numb_del in range(numb):
                del data_pollution_Iterative_to_merge[pollution_Iterative +
                                                      "_add%s" % numb_del]
            merge_list.append(data_pollution_Iterative_to_merge)
        data_pollution_Iterative = pd.concat(merge_list, axis=1, sort=False)

        # 对结果的0值取np.nan
        data_pollution_KNN.replace(0, np.nan, inplace=True)
        data_pollution_ewm.replace(0, np.nan, inplace=True)
        data_pollution_IDW.replace(0, np.nan, inplace=True)
        data_pollution_Iterative.replace(0, np.nan, inplace=True)

        # 合并相同方法的结果

        data_pollution_KNN = data_pollution_KNN.set_index(data_pollution.index)
        data_pollution_KNN.columns = data_pollution.columns
        data_pollution_ewm = data_pollution_ewm.set_index(data_pollution.index)
        data_pollution_ewm.columns = data_pollution.columns
        data_pollution_IDW = data_pollution_IDW.set_index(data_pollution.index)
        data_pollution_IDW.columns = data_pollution.columns
        data_pollution_Iterative = data_pollution_Iterative.set_index(
            data_pollution.index)
        data_pollution_Iterative.columns = data_pollution.columns

        # 合并不同方法为一个文件
        sheet_name = ["KNN", "ewm", "IDW", "Iterative"]
        sheet_name_count = 0
        writer = pd.ExcelWriter(merge_output_file_path + '%s.xlsx' %
                                (input_file_name.replace(".xlsx", "")))
        for methods_output in [
                data_pollution_KNN, data_pollution_ewm, data_pollution_IDW,
                data_pollution_Iterative
        ]:
            methods_output.to_excel(writer,
                                    sheet_name=sheet_name[sheet_name_count])
            sheet_name_count = 1 + sheet_name_count
        writer.save()
Example #5
0
                              engine='python')
#Fill Missing number, Mean Value method
MissingData = MissingData.replace(1.0000000000000001e+99, numpy.NaN)
MissingData = KNN(k=3).complete(MissingData)
prediction = pandas.DataFrame(MissingData).to_csv('MissingData1Filled.txt',
                                                  index=False,
                                                  sep="\t",
                                                  header=None)

#import DATASET 2
MissingData = pandas.read_csv('MissingData2.txt',
                              sep="\t",
                              header=None,
                              engine='python')
#Fill Missing number, Mean Value method
MissingData = MissingData.replace(1.0000000000000001e+99, numpy.NaN)
MissingData = KNN(k=3).complete(MissingData)
prediction = pandas.DataFrame(MissingData).to_csv('MissingData2Filled.txt',
                                                  index=False,
                                                  sep="\t",
                                                  header=None)

#import DATASET 3
MissingData = pandas.read_csv('MissingData3.txt',
                              sep="\t",
                              header=None,
                              engine='python')
#Fill Missing number, Mean Value method
MissingData = MissingData.replace(1.0000000000000001e+99, numpy.NaN)
MissingData = KNN(k=3).complete(MissingData)
prediction = pandas.DataFrame(MissingData).to_csv('MissingData3Filled.txt',
    # 时间全局: 平滑,常用于股市
    data_input_ewm = pd.DataFrame.ewm(self=data_input,
                                      com=0.6,
                                      ignore_na=False,
                                      adjust=True).mean()  # 参数设置不同

    # 空间局部: IDW
    data_input_IDW = get_IDW(data_input)

    # 空间全局: 迭代函数法,缺失特征作为y,其他特征作为x
    data_input_Iterative = IterativeImputer(
        max_iter=10).fit_transform(data_input)
    data_input_Iterative = pd.DataFrame(data_input_Iterative)

    # 对结果的0值取np.nan
    data_input_KNN.replace(0, np.nan, inplace=True)
    data_input_ewm.replace(0, np.nan, inplace=True)
    data_input_IDW.replace(0, np.nan, inplace=True)
    data_input_Iterative.replace(0, np.nan, inplace=True)

    # 合并相同方法的结果
    data_input_KNN = data_input_KNN.set_index(data_input.index)
    data_input_KNN.columns = data_input.columns
    data_input_KNN["日期合并用"] = data_input_KNN.index
    data_input_ewm = data_input_ewm.set_index(data_input.index)
    data_input_ewm.columns = data_input.columns
    data_input_ewm["日期合并用"] = data_input_ewm.index
    data_input_IDW = data_input_IDW.set_index(data_input.index)
    data_input_IDW.columns = data_input.columns
    data_input_IDW["日期合并用"] = data_input_IDW.index
    data_input_Iterative = data_input_Iterative.set_index(data_input.index)