コード例 #1
0
def restore_power_input(input_fmt,
                        power_or_path,
                        fmt='on',
                        normalize=True,
                        clip=True):
    if isinstance(power_or_path, str):
        power = Power(fmt=fmt)
        power.load_power(power_or_path, fmt=fmt)
        power.extend_lp_info(real_only=False)
    else:
        power = power_or_path
    if normalize:
        input_fmt['value'] = mm_denormalize(input_fmt['value'].values,
                                            input_fmt['min'].values,
                                            input_fmt['max'].values)
    if clip:
        input_fmt['value'] = np.clip(input_fmt['value'], input_fmt['min'],
                                     input_fmt['max'])
    groups = input_fmt.groupby(['etype', 'dtype'], sort=False)
    for (e, d), sub in groups:
        values = sub[['name', 'value']].set_index('name')
        values = values.reindex(power.data[e]['name'])
        na_idx = np.isnan(values['value'])
        values[na_idx] = power.data[e][d].values[na_idx]
        set_values(power.data, e, d, values['value'].values)
    return power
コード例 #2
0
ファイル: env.py プロジェクト: zll302/PowerAI
def load_trend(path, fmt, inputs):
    """ 重新加载一个episode中每个step的潮流信息、评分和稳定结果,用于事后分析。

    :param path: str. Episode目录。
    :param fmt: str. 数据格式类型。
    :param inputs: {etype: [str]}. 强化学习模型的输入量,以及'score'和'res'。
    :return: {etype: np.array}.
    """
    data = {}
    for i in range(len(os.listdir(path))):
        sub_path = os.path.join(path, str(i))
        if not os.path.exists(os.path.join(sub_path, 'LF.L1')):
            continue
        if fmt is not None:
            power = Power(fmt)
            power.load_power(sub_path, fmt=fmt, st=False, station=False)
            for etype, columns in inputs.items():
                if etype not in power.data:
                    continue
                df = power.data[etype][columns]
                for col in columns:
                    name = '_'.join((etype, col))
                    dict_list_append(data, name, df[col])
        score, _, res = OpEnv.make_assessment(os.path.join(path, str(i)))
        if 'score' in inputs:
            dict_list_append(data, 'score', pd.Series([score]))
        if 'res' in inputs:
            dict_list_append(data, 'res', res)
    for k in data:
        data[k] = pd.concat(data[k], axis=1)
        data[k].columns = list(range(data[k].shape[1]))
    return data
コード例 #3
0
ファイル: action.py プロジェクト: sdy99/PowerAI
def load_actions(power, base_path, out_path, files, fmt='on', st=True, wmlf=True):
    """ 从文件加载并执行修改动作

    :param power: Power. Power实例。
    :param base_path: str. 基础数据目录。
    :param out_path: str. 输出目录,输入的动作文件也在这里。
    :param files: [str]. 文件列表。
    :param fmt: str. 数据格式类型。
    :param st: bool. 是否输出ST*文件。
    :param wmlf: bool. 是否计算潮流。
    :return dict. 每个文件对应目录的输出是否成功(潮流是否收敛)
    """
    ret = {}
    if not power:
        power = Power(fmt=fmt)
        power.load_power(base_path, fmt=fmt, lp=False, st=st)
        power.set_index(idx='name')
    for f in files:
        actions = pd.read_csv(os.path.join(out_path, f), encoding='gbk', index_col=False)
        for _, etype, idx, dtype, value in actions.itertuples():
            set_values(power.data, etype, dtype, {idx: value})
        if '.' in f:
            name = f[:f.index('.')]
        path = os.path.join(out_path, name)
        power.save_power(path, fmt=power.fmt, lp=False, st=st)
        shutil.copy(os.path.join(base_path, 'LF.L0'), path)
        if st:
            shutil.copy(os.path.join(base_path, 'ST.S0'), path)
        if wmlf:
            call_wmlf(path)
            ret[name] = check_lfcal(path)
        else:
            ret[name] = True
    return ret
コード例 #4
0
def run_online(online_path, nets, input_fmts, lmd_indices):
    results = {'cctResult': [], 'cctLMD': []}
    power = Power(fmt='on')
    power.load_power(online_path, st=False)
    for i in range(len(nets)):
        input_data = load_power_input(input_fmts[i], power, 'on')
        input_data = input_data.astype(np.float32)[np.newaxis, :]
        output_size = len(output_fmts[i])
        pre = nets[i].pre_model.predict(input_data)
        grads = run_gradient(nets[i].pre_model, input_data)
        for j in range(output_size):
            results['cctResult'].append({'lineName': output_fmts[i][j],
                                         'cctValue': pre[0][j]})
            res = []
            for idx in np.abs(grads[j][0]).argsort()[::-1]:
                if idx not in lmd_indices[i]:
                    continue
                name = '_'.join(input_fmts[i].loc[idx, ['name', 'dtype']])
                res.append({'lineName': output_fmts[i][j],
                            'genName': name,
                            'cctValue': grads[j][0, idx].numpy()})
                if len(res) >= 10:
                    break
            results['cctLMD'].extend(res)
    return results
コード例 #5
0
def load_power_input(input_fmt, power_or_path, fmt='on', normalize=True):
    if isinstance(power_or_path, str):
        power = Power(fmt=fmt)
        power.load_power(power_or_path, fmt=fmt)
        power.extend_lp_info(real_only=False)
    else:
        power = power_or_path
    values = np.empty(input_fmt.shape[0], dtype=np.float32) + np.nan
    for i, (idx, etype, dtype, name) in \
            enumerate(input_fmt[['etype', 'dtype', 'name']].itertuples()):
        if etype == 'ed':
            # TODO: deal with ed
            continue
        else:
            try:
                v = power.data[etype].loc[power.data[etype]['name'] == name,
                                          dtype]
                if len(v) > 0:
                    values[i] = v.values[0]
                # values.append(v.values[0] if len(v) > 0 else np.nan)
            except KeyError:
                # print(etype, dtype, name)
                # values.append(np.nan)
                continue
    if normalize:
        values = mm_normalize(values, input_fmt['min'].values,
                              input_fmt['max'].values)
    values[np.isnan(values)] = -1.
    return values
コード例 #6
0
ファイル: test_yz.py プロジェクト: sdy99/PowerAI
def test_calc_ed():
    path = '../dataset/wepri36'
    fmt = 'off'
    power = Power(fmt)
    power.load_power(path, fmt=fmt, lp=False, st=False)
    ed = calc_ed_from_power(power, island=0, node_type='bus', x_only=False)
    assert ed[1][1] == 0.
    assert ed[1][36] == 0.07283931941329119


# x = ed_map_tsne(ed)
# groups = group_kmeans(ed, 10)
コード例 #7
0
ファイル: test_topo.py プロジェクト: sdy99/PowerAI
def test_topo():
    path = '../dataset/wepri36'
    fmt = 'off'
    power = Power(fmt)
    power.load_power(path, fmt=fmt, lp=False, st=False, station=True)
    graph1 = PowerGraph(power,
                        graph_type='single',
                        node_type='station',
                        on_only=True)
    islands1 = graph1.get_islands(min_num=5)
    print(islands1)
    graph2 = PowerGraph(power,
                        graph_type='multi',
                        node_type='bus',
                        on_only=False,
                        edge_columns=['x'])
    islands2 = graph2.get_islands(min_num=10)
    print(islands2)
コード例 #8
0
def restore_power_input(input_fmt, power_or_path, fmt='on', normalize=True):
    if isinstance(power_or_path, str):
        power = Power(fmt=fmt)
        power.load_power(power_or_path, fmt=fmt)
        power.extend_lp_info(real_only=False)
    else:
        power = power_or_path
    if normalize:
        input_fmt['value'] = mm_denormalize(input_fmt['value'].values,
                                            input_fmt['min'].values,
                                            input_fmt['max'].values)
    group = input_fmt.groupby(['etype', 'dtype'], sort=False)
    for (e, d), idx in group.indices.items():
        values = power.data[e][[d, 'name']].copy()
        indices = values.index
        values.set_index('name', inplace=True)
        values = input_fmt.loc[idx, ['name', 'value']].set_index('name')
        values.index = indices
        set_values(power.data, e, d, values['value'])
    return power
コード例 #9
0
 def random_generate(base_path, fmt, size, out_path,
                     min_p=None, max_p=None, gl_ratio=0.9,
                     random_q0=True, random_open=False, open_prob=[0.8]):
     power = Power(fmt=fmt)
     power.load_power(base_path, fmt=fmt)
     generators_bak = power.data['generator'].copy()
     loads_bak = power.data['load'].copy()
     if random_open:
         aclines_bak = power.data['acline'].copy()
     min_p = np.sum(generators_bak['pmin']) if not min_p else min_p
     max_p = np.sum(generators_bak['pmax']) if not max_p else max_p
     p0 = np.sum(generators_bak['p0'])
     shutil.rmtree(out_path, ignore_errors=True)
     os.mkdir(out_path)
     conv_count = 0
     for i in range(size):
         generators = power.data['generator'] = generators_bak.copy()
         loads = power.data['load'] = loads_bak.copy()
         if random_open:
             power.data['acline'] = aclines_bak.copy()
         p = min_p + (max_p - min_p) * np.random.rand()
         distribute_generators_p(generators, p - p0, sigma=0.2)
         gen_p = np.sum(generators['p0'])
         load_p = np.sum(loads['p0'])
         distribute_loads_p(loads, gl_ratio * gen_p - load_p,
                            p_sigma=0.2, keep_factor=False)
         if random_q0:
             random_load_q0(loads, sigma=None)
         if random_open:
             open_num = np.sum(np.random.rand(1) > open_prob)
             random_open_acline(power, num=open_num)
         path = os.path.join(out_path, '%08d' % i)
         power.save_power(path, fmt, lf=True, lp=False, st=True)
         shutil.copy(os.path.join(base_path, 'LF.L0'), path)
         shutil.copy(os.path.join(base_path, 'ST.S0'), path)
         call_wmlf(path)
         if check_lfcal(path):
             conv_count += 1
     print('Random generate done: %d / %d' % (conv_count, size))
コード例 #10
0
ファイル: test_power.py プロジェクト: sdy99/PowerAI
def test_power(real_data=False):
    # set_format_version({'mdc': 2.3})
    path = 'dataset/wepri36'
    fmt = 'off'
    power = Power(fmt)
    power.load_power(path, fmt=fmt)
    if real_data:
        path = os.path.join(os.path.expanduser('~'), 'data', 'gd',
                            '2019_12_23T12_15_00')
        fmt = 'on'
        power = Power(fmt)
        power.load_power(path, fmt=fmt)
        power.save_power(path + '/out', fmt=fmt)
        path = 'C:/PSASP_Pro/2020¹úµ÷Äê¶È/¶¬µÍ731'
        fmt = 'off'
        power = Power(fmt)
        power.load_power(path, fmt=fmt, lp=False)
        power.save_power(path + '/out', fmt=fmt, lp=False)
コード例 #11
0
ファイル: dataset.py プロジェクト: sdy99/PowerAI
def pack_data(path,
              file_name,
              fmt='on',
              types=None,
              combine_model=True,
              model_columns=None,
              restore_columns=None,
              ori_order=True):
    format_keys = format_key()
    if not types:
        types = format_keys['types']
    if not restore_columns:
        restore_columns = restore_column()
    types = [t for t in types if t in restore_columns]
    all_columns = get_all_column(types, file_format()[fmt])
    name_indices = name_index_dict()
    if combine_model:
        if not model_columns:
            model_columns = model_column()
        models = {}
    for t in types:
        miss = [col for col in restore_columns[t] if col not in all_columns[t]]
        restore_columns[t] = [
            col for col in restore_columns[t]
            if col in all_columns[t] and col not in name_indices[t]
        ]
        restore_columns[t] = restore_columns[t] + ['flag']
        if combine_model:
            miss.extend(
                [col for col in model_columns[t] if col not in all_columns[t]])
            model_columns[t] = [
                col for col in model_columns[t]
                if col in all_columns[t] and col not in name_indices[t]
            ]
        if miss:
            print('[%s] data miss: ' % t, miss)

    data = dict([(t, {}) for t in types])
    for d in os.listdir(path):
        if not os.path.exists(os.path.join(path, d, 'LF.L1')):
            continue
        power = Power(fmt=fmt)
        power.load_power(os.path.join(path, d), fmt=fmt, station=False)
        for t in types:
            power.data[t].set_index(name_indices[t], inplace=True)
            if ori_order and 'ori_order' in power.data[t]:
                data[t][d] = power.data[t][restore_columns[t] + ['ori_order']]
            else:
                data[t][d] = power.data[t][restore_columns[t]]
            if combine_model:
                if t in models:
                    idx = power.data[t].index.difference(models[t].index)
                    models[t] = models[t].append(
                        power.data[t].loc[idx, model_columns[t]])
                else:
                    models[t] = power.data[t][model_columns[t]]

    package = {}
    for t in types:
        package[t] = pd.concat(data[t].values(), keys=data[t].keys())
        if combine_model and t in models:
            package['model_' + t] = models[t]
    hdf = pd.HDFStore(file_name, 'w', complevel=9, complib='blosc')
    for k in package:
        hdf.put(key=k, value=package[k])
    hdf.close()
コード例 #12
0
def run_trend(trend_path, nets, input_fmts, lmd_indices):
    results = {'cctResultTrend': [], 'cctLMDTrend': [], 'cctRangeTrend': []}
    paths = [p for p in os.listdir(trend_path)
             if 'LF.L1' in os.listdir(os.path.join(trend_path, p))]
    paths.sort()
    for p in paths:
        power = Power(fmt='on')
        power.load_power(os.path.join(trend_path, p), st=False)
        for i in range(len(nets)):
            input_data = load_power_input(input_fmts[i], power, 'on')
            input_data = input_data.astype(np.float32)[np.newaxis, :]
            output_size = len(output_fmts[i])
            grad_data = np.zeros((4, input_data.shape[1]), dtype=np.float32)
            grads = run_gradient(nets[i].pre_model, input_data)
            random_size = 1000
            random_data = np.random.rand(random_size, input_data.shape[1])
            random_data = random_data.astype(np.float32) * 0.1 - 0.05
            random_data += input_data
            for j in range(output_size):
                grad_data[0, :] = input_data + np.sign(grads[j]) * 0.02
                grad_data[1, :] = input_data - np.sign(grads[j]) * 0.02
                key_indices = []
                key_names = []
                res = []
                for idx in np.abs(grads[j][0]).argsort()[::-1]:
                    if idx not in lmd_indices[i]:
                        continue
                    name = '_'.join(input_fmts[i].loc[idx, ['name', 'dtype']])
                    res.append(
                        {
                            'lineNmae': output_fmts[i][j],
                            'genName': name,
                            'value': grads[j][0, idx].numpy(),
                            'time': p
                        }
                    )
                    key_indices.append(idx)
                    key_names.append(name)
                    if len(res) >= 10:
                        break
                results['cctLMDTrend'].extend(res)
                key_onehot = np.zeros((1, input_data.shape[1]), dtype=np.float32)
                key_onehot[0, key_indices] = 0.2
                grad_data[2, :] = input_data + np.sign(grads[j]) * key_onehot
                grad_data[3, :] = input_data - np.sign(grads[j]) * key_onehot
                test_data = np.vstack([input_data, grad_data, random_data])
                test_data = np.clip(test_data, -1., 1.)
                pre = nets[i].pre_model.predict(test_data)
                results['cctResultTrend'].append(
                    {
                        'lineName': output_fmts[i][j], 'cctValue': pre[0, j],
                        'cctMax': np.max(pre[:, j]), 'cctMin': np.min(pre[:, j]),
                        'time': p
                    }
                )
                stable_valid = np.all(pre >= 0.1, axis=1)
                stable_data = test_data[stable_valid]
                min_data = np.min(stable_data, axis=0)
                max_data = np.max(stable_data, axis=0)
                range_data = mm_denormalize(np.vstack([min_data, max_data]),
                                            input_fmt['min'].values, input_fmt['max'].values)
                for ii, idx in enumerate(key_indices):
                    results['cctRangeTrend'].append(
                        {
                            'lineName': output_fmts[i][j], 'genName': key_names[ii],
                            'rangeMin': range_data[0, idx], 'rangeMax': range_data[1, idx],
                            'time': p
                        }
                    )
    return results
コード例 #13
0
ファイル: high_dg.py プロジェクト: sdy99/PowerAI
class HighDG(ADG):
    def __init__(self, work_path, fmt, res_type, nn_size):
        super().__init__(work_path, fmt)
        self.mode = 'one'
        self.generated_num = 0
        self.nn_size = nn_size
        self.base_path = os.path.join(work_path, 'net')
        self.base_power = Power(fmt=fmt)
        self.base_power.load_power(self.base_path, fmt=fmt)
        self.input_fmt = load_input_fmt(os.path.join(work_path, 'cct', 'predict', 'input.txt'),
                                        input_mode=True)
        self.feature_model = load_model(os.path.join(work_path, res_type, 'feature'),
                                        '', suffix='tf')
        self.data_set = GHData(work_path, '', '')
        self.data_set.load_data(work_path)
        self.features = pd.DataFrame(
            self.feature_model.predict(self.data_set.input_data.values),
            index=self.data_set.input_data.index)
        self.init_assess = self.distribution_assess(nn_size)

    def distribution_assess(self, nn_size=-1):
        dists = pairwise_distances(self.features)
        if nn_size < 1:
            return np.average(dists) / np.max(dists)
        return np.average(np.partition(dists, nn_size + 1)[:, 1:nn_size + 1])

    def choose_samples(self, size=1):
        idx = np.arange(self.features.shape[0])
        np.random.shuffle(idx)
        return self.features.index[idx[:size]]

    def generate_one(self, power, idx, out_path):
        print(idx, out_path)
        if not power:
            self.input_fmt['value'] = self.data_set.ori_data.loc[idx].values
            power = restore_power_input(self.input_fmt, self.base_power, normalize=False)
            print('old=', self.input_fmt['value'])
        # TODO
        self.input_fmt['value'] *= np.random.normal(loc=1.0, scale=0.1,
                                                    size=self.input_fmt.shape[0])
        power = restore_power_input(self.input_fmt, power, normalize=False)
        print('new=', self.input_fmt['value'])
        shutil.rmtree(out_path, ignore_errors=True)
        power.save_power(out_path, fmt=self.fmt, lp=False)
        shutil.copy(os.path.join(self.base_path, 'LF.L0'), out_path)
        shutil.copy(os.path.join(self.base_path, 'ST.S0'), out_path)
        call_wmlf(out_path)
        ret = check_lfcal(out_path)
        if ret:
            idx = os.path.split(out_path)[-1]
            new_data = load_power_input(self.input_fmt, power)
            new_data = self.data_set.normalize_it(new_data)
            new_feature = self.feature_model.predict(new_data[np.newaxis, :])
            self.features.loc[idx] = new_feature.reshape(-1)
            self.generated_num += 1
        return ret

    def remove_samples(self):
        pass

    def done(self):
        assess = self.distribution_assess(self.nn_size)
        print('num=%d, init=%.6f, now=%.6f' % (self.generated_num, self.init_assess, assess))
        return self.generated_num > 0
コード例 #14
0
def get_model_name(model_type, model_no):
    model_type = model_type.lower()
    if model_type == "avr":
        if model_no in range(3, 11):
            return 'avr310' if 'avr310' in datalib_columns else None
        elif model_no in range(11, 13):
            return 'avr1112' if 'avr1112' in datalib_columns else None
    if model_type in datalib_columns:
        return model_type
    elif model_type + str(model_no) in datalib_columns:
        return model_type + str(model_no)
    return None


if __name__ == '__main__':
    from core.power import Power
    path = 'D:/PSASP_Pro/2020¹úµ÷Äê¶È/¶¬µÍ731'
    fmt = 'off'
    power = Power(fmt)
    power.load_power(path, fmt=fmt, lp=False, st=True, station=True)
    with timer('Load datalib'):
        datalib = load_datalib(path+'/DATALIB.DAT')
    iwant = [('gen', 3, 11),
             ('avr', 12, 18),
             ('gov', 7, 3),
             ('pss', 2, 7)]
    for t, m, p in iwant:
        name = get_model_name(t, m)
        if name is not None:
            print(datalib[name].loc[p])
コード例 #15
0
ファイル: env.py プロジェクト: zll302/PowerAI
class OpEnv(object):
    def __init__(self, base_path, work_path, inputs, fmt='on'):
        """ 初始化。

        :param base_path: str. 初始断面存放目录。
        :param work_path: str. Env工作目录。
        :param inputs: {etype: [str]}. 强化学习模型的输入量。
        :param fmt: str. 数据格式类型。
        """
        self.power = Power(fmt)
        self.fmt = fmt
        self.base_path = base_path
        self.work_path = work_path
        self.episode = 0
        self.step = 0
        self.assessments = []
        self.state0 = None
        self.min_max = None
        self.init_load_p = 0.
        self.inputs = inputs

    def get_ep_path(self, ep=None, step=None):
        """ 获取指定episode和step的工作目录。

        :param ep: int. 指定episode;
                   or None. 使用当前episode。
        :param step: int. 指定step;
                     or None. 返回episode对应目录。
        :return: str.
        """
        if ep is None:
            ep = self.episode
        if step is None:
            return os.path.join(self.work_path, 'ep%06d' % ep)
        return os.path.join(self.work_path, 'ep%06d' % ep, str(step))

    def reset(self, random=True, load_path=None):
        """ 重置潮流,并进行评估。

        :param random: bool. 是否随机初始化潮流。
        :param load_path: str. 初始断面目录;
                          or None. 用self.base_path作为初始断面。
        :return: bool. 是否重置成果(not done)
        """
        if load_path is None:
            self.power.load_power(self.base_path, fmt=self.fmt)
        else:
            self.power.load_power(load_path, fmt=self.fmt)
        self.power.data['generator']['p0'] = self.power.data['generator']['p']
        self.episode += 1
        path = self.get_ep_path()
        if os.path.exists(path):
            shutil.rmtree(path)
        os.mkdir(path)
        self.step = -1
        self.assessments = []
        if random:
            generators = self.power.data['generator']
            loads = self.power.data['load']
            max_p, gen_p = np.sum(generators[['pmax', 'p']])
            p = max_p * 0.4 + max_p * 0.5 * np.random.rand()  # 40% ~ 90%
            distribute_generators_p(generators, p - gen_p, sigma=0.2)
            generators['p0'] = np.clip(generators['p0'],
                                       generators['pmin'], generators['pmax'])
            gen_p = np.sum(generators['p0'])
            load_p = np.sum(loads['p'])
            distribute_loads_p(loads, 0.9 * gen_p - load_p, p_sigma=0.1, keep_factor=False)
            random_load_q0(loads, sigma=None)
        self.min_max = None
        self.init_load_p = 0.
        self.state0, _, done = self.run_step()
        return not done

    def get_state(self, normalize=True):
        """ 获取当前输入量数值

        :param normalize: bool. True返回归一化数据;False返回原始数据。
        :return: np.array.
        """
        state = []
        for etype, columns in self.inputs.items():
            state.append(self.power.data[etype][columns].values.T.reshape(-1))
        state = np.concatenate(state)
        if normalize:
            state = (state - self.min_max[:, 0]) \
                    / (self.min_max[:, 1] - self.min_max[:, 0] + EPS)
        return state

    def load_init_info(self):
        """ 获取初始状态,包括输入量的上下限和总负荷功率。

        """
        values = []
        for etype, columns in self.inputs.items():
            for col in columns:
                if col == 'p0':
                    values.append(self.power.data[etype][['pmin', 'pmax']].values)
                elif col == 'q0':
                    values.append(self.power.data[etype][['qmin', 'qmax']].values)
                else:
                    continue
        self.min_max = np.concatenate(values)
        loads = self.power.data['load']
        self.init_load_p = np.sum(loads.loc[loads['mark'] == 1, 'p0'])

    @staticmethod
    def make_assessment(path):
        """ 对断面稳定结果进行打分。

        :param path: str. 指定断面目录,包含*.res结果文件。
        :return: (float, bool, np.array). 评分、结束标志、稳定结果。
        """
        headers = {'CCTOUT': 'no desc name cct gen1 gen2 times tmp1 tmp2'}
        update_table_header(path, 'res', headers)
        iwant = {'CCTOUT': ['name', 'cct']}
        results = []
        for file_name in os.listdir(path):
            if file_name.endswith('.res'):
                cct = read_efile(os.path.join(path, file_name), iwant.keys(), iwant)
                results.append(cct['CCTOUT']['cct'])
        results = pd.concat(results)
        values = results.values.reshape(-1,)
        values = values[~np.isnan(values)]
        values = values[values > 0.]
        if len(values) == 0 or np.min(values) < 0.1:
            return ST_UNSTABLE_REWARD, True, results
        # thrs = [0.3, 0.5]
        thrs = [1.0]
        for thr in thrs:
            values_lt = values[values < thr]
            if len(values_lt) > 0:
                return np.average(values_lt), False, results
        return thrs[-1], False, results

    def run_step(self, actions=None):
        """ 按照给定actions运行一步。

        :param actions: str. “random",随机初始化,仅用于测试;
                        or dict. 动作集合,由load_action函数执行动作。
        :return: (np.array, float, bool). 状态量、回报值、结束标志。
        """
        self.step += 1
        path = self.get_ep_path(step=self.step)
        if actions == 'random':  # just for test
            distribute_generators_p(self.power.data['generator'], 1., sigma=0.1)
            distribute_loads_p(self.power.data['load'], 1., p_sigma=0.1,
                               keep_factor=True, factor_sigma=0.1)
        elif actions is not None:
            self.load_action(actions)
        self.power.save_power(path, self.fmt, lf=True, lp=False, st=True)
        shutil.copy(os.path.join(self.base_path, 'LF.L0'), path)
        shutil.copy(os.path.join(self.base_path, 'ST.S0'), path)
        call_wmlf(path)
        if check_lfcal(path):
            self.power.drop_data(self.fmt, 'lp')
            self.power.load_power(path, self.fmt, lf=False, lp=True, st=False)
            self.power.data['generator']['p0'] = self.power.data['generator']['p']
            if self.step == 0:
                self.load_init_info()
            state = self.get_state()
            if os.name != 'nt':
                call_psa(path)
                assess, done, _ = self.make_assessment(path)
            else:
                assess = np.random.rand()
                done = (assess < 0.1)
        else:
            state = []
            assess = PF_NOTCONV_REWARD
            done = True
        self.assessments.append(assess)
        if self.step == 0:
            reward = 0.
        else:
            reward = self.assessments[-1] - self.assessments[-2]
            if not done:
                reward *= CCT_CHANGE_RATIO
                loads = self.power.data['load']
                load_p = np.sum(loads.loc[loads['mark'] == 1, 'p0'])
                if abs(load_p - self.init_load_p) / self.init_load_p >= LOAD_CHANGE_THR:
                    reward += PF_LOADFULL_REWARD
                    done = True
            else:
                pass
                # reward = assess
        return state, reward, done

    def load_action(self, actions):
        """ 加载潮流修改动作,但不计算潮流。

        :param actions: dict. 动作字典:'load_ratio_p'~按比例调整负荷有功;
                                       'generator_ratio_p'~按比例调整机组有功。
        :return: np.array. 归一化的状态量。
        """
        for k in actions:
            if k == 'load_ratio_p':
                set_gl_p0(self.power.data['load'],
                          self.power.data['load']['p0'] * actions[k],
                          keep_factor=False, clip=False)
            elif k == 'generator_ratio_p':
                set_gl_p0(self.power.data['generator'],
                          self.power.data['generator']['p0'] * actions[k],
                          keep_factor=False, clip=True)
        return self.get_state()

    def print_info(self, state=True, assessment=True):
        """ 打印每步信息。

        :param state: bool. 是否打印状态量。
        :param assessment: bool. 是否打印评分值。
        """
        print('episode = %d, step = %d' % (self.episode, self.step))
        if state:
            print('state =', self.get_state())
        if assessment:
            print('assessment =', self.assessments)
コード例 #16
0
ファイル: topo.py プロジェクト: zll302/PowerAI
    def get_bridges(self):
        res = []
        if self.G.is_multigraph():
            raise NotImplementedError("Multi Graph not supported.")
        else:
            for b in nx.bridges(self.G):
                if self.G.edges[b].get('amount', 0) == 1:
                    res.append(b)
        return res


if __name__ == '__main__':
    from core.power import Power
    path = 'D:/PSA_src/psa/localdata/0913/data'
    fmt = 'on'
    power = Power(fmt)
    power.load_power(path, fmt=fmt, lp=False, st=False)
    with timer("graph"):
        graph1 = PowerGraph(power,
                            graph_type='single',
                            node_type='station',
                            on_only=True)
        islands1 = graph1.get_islands(5)
        graph2 = PowerGraph(power,
                            graph_type='multi',
                            node_type='bus',
                            on_only=False,
                            edge_columns=['x'])
        islands2 = graph2.get_islands(10)
コード例 #17
0
class OpEnv(object):
    def __init__(self, base_path, work_path, inputs, fmt='on'):
        """ 初始化。

        :param base_path: str. 初始断面存放目录。
        :param work_path: str. Env工作目录。
        :param inputs: {etype: [str]}. 强化学习模型的输入量。
        :param fmt: str. 数据格式类型。
        """
        self.power = Power(fmt)
        self.fmt = fmt
        self.base_path = base_path
        self.work_path = work_path
        self.episode = 0
        self.step = 0
        self.assessments = []
        self.state0 = None
        self.min_max = None
        self.init_load_p = 0.
        self.inputs = inputs

    def get_ep_path(self, ep=None, step=None):
        """ 获取指定episode和step的工作目录。

        :param ep: int. 指定episode;
                   or None. 使用当前episode。
        :param step: int. 指定step;
                     or None. 返回episode对应目录。
        :return: str.
        """
        if ep is None:
            ep = self.episode
        if step is None:
            return os.path.join(self.work_path, 'ep%06d' % ep)
        return os.path.join(self.work_path, 'ep%06d' % ep, str(step))

    def reset(self, random=True, load_path=None, error='raise'):
        """ 重置潮流,并进行评估。

        :param random: bool. 是否随机初始化潮流。
        :param load_path: str. 初始断面目录;
                          or None. 用self.base_path作为初始断面。
        :param errpr: str. 初始化失败则raise exception.
        :return: bool. 是否重置成果(not done)
        """
        load_path = self.base_path if load_path is None else load_path
        self.episode += 1
        self.min_max = None
        self.init_load_p = 0.
        path = self.get_ep_path()
        shutil.rmtree(path, ignore_errors=True)
        os.mkdir(path)
        self.step = -1
        self.assessments = []
        self.power.load_power(load_path, fmt=self.fmt)
        if random:
            generators = self.power.data['generator']
            loads = self.power.data['load']
            generators['p0'] = generators['p']
            # gl_rate = np.sum(generators['p']) / np.sum(loads['p'])
            max_p, p0 = np.sum(generators[['pmax', 'p0']])
            p = max_p * (0.4 + 0.5 * np.random.rand())  # 40% ~ 90%
            distribute_generators_p(generators, p - p0, sigma=0.2)
            # dp = np.sum(generators['p0']) - p0
            gen_p = np.sum(generators['p0'])
            load_p = np.sum(loads['p0'])
            distribute_loads_p(loads, 0.9 * gen_p - load_p, p_sigma=0.1, keep_factor=False)
            # distribute_loads_p(loads, dp / gl_rate, p_sigma=0.1, keep_factor=False)
            random_load_q0(loads, sigma=None)
        self.state0, _, done = self.run_step()
        if done and error == 'raise':
            raise ValueError
        return not done

    def get_state(self, normalize=True):
        """ 获取当前输入量数值

        :param normalize: bool. True返回归一化数据;False返回原始数据。
        :return: np.array.
        """
        state = []
        for etype, columns in self.inputs.items():
            state.append(self.power.data[etype][columns].values.T.reshape(-1))
        state = np.concatenate(state)
        if normalize:
            state = (state - self.min_max[:, 0]) \
                    / (self.min_max[:, 1] - self.min_max[:, 0] + EPS)
        return state

    def load_init_info(self):
        """ 获取初始状态,包括输入量的上下限和总负荷功率。

        """
        values = []
        for etype, columns in self.inputs.items():
            for col in columns:
                if col == 'p0':
                    values.append(self.power.data[etype][['pmin', 'pmax']].values)
                elif col == 'q0':
                    values.append(self.power.data[etype][['qmin', 'qmax']].values)
                else:
                    continue
        self.min_max = np.concatenate(values)
        loads = self.power.data['load']
        self.init_load_p = np.sum(loads.loc[loads['mark'] == 1, 'p0'])

    @staticmethod
    def make_assessment(path, method='min', **kwargs):
        """ 对断面稳定结果进行打分。

        :param path: str. 指定断面目录,包含*.res结果文件。
        :param method: str. 'min'取最小值;'avg'取平均值;'grade'取分档平均值。
        :param kwargs: dict. 相关参数字典。
        :return: (float, bool, np.array). 评分、结束标志、稳定结果。
        """
        headers = {'CCTOUT': 'no desc name cct gen1 gen2 times tmp1 tmp2'}
        update_table_header(path, 'res', headers)
        iwant = {'CCTOUT': ['name', 'cct']}
        results = []
        for file_name in os.listdir(path):
            if file_name.endswith('.res'):
                cct = read_efile(os.path.join(path, file_name), iwant.keys(), iwant)
                if 'CCTOUT' in cct:
                    results.append(cct['CCTOUT']['cct'])
        results = pd.concat(results)
        values = results.values.reshape(-1, )
        values = values[~np.isnan(values)]
        values = values[values >= 0.]
        if len(values) == 0 or np.min(values) < 0.1:
            return ST_UNSTABLE_REWARD, True, results
        if method == 'min':
            min_n = kwargs.get('min_n', 1)
            values.sort()
            return np.average(values[:min_n]), False, results
        elif method == 'avg':
            return np.average(values), False, results
        elif method == 'grade':
            thrs = kwargs.get('grades', [0.3, 0.5])
            for thr in thrs:
                values_lt = values[values < thr]
                if len(values_lt) > 0:
                    return np.average(values_lt), False, results
            return thrs[-1], False, results
        else:
            raise NotImplementedError('Method =', method)

    def run_step(self, actions=None):
        """ 按照给定actions运行一步。

        :param actions: str. “random",随机初始化,仅用于测试;
                        or dict. 动作集合,由load_action函数执行动作。
        :return: (np.array, float, bool). 状态量、回报值、结束标志。
        """
        self.step += 1
        path = self.get_ep_path(step=self.step)
        if actions == 'random':  # just for test
            distribute_generators_p(self.power.data['generator'], 1., sigma=0.1)
            distribute_loads_p(self.power.data['load'], 1., p_sigma=0.1,
                               keep_factor=True, factor_sigma=0.1)
        elif actions is not None:
            self.load_action(actions)
        shutil.rmtree(path, ignore_errors=True)
        self.power.save_power(path, self.fmt, lf=True, lp=False, st=True)
        shutil.copy(os.path.join(self.base_path, 'LF.L0'), path)
        shutil.copy(os.path.join(self.base_path, 'ST.S0'), path)
        call_wmlf(path)
        if check_lfcal(path):
            self.power.drop_data(self.fmt, 'lp')
            self.power.load_power(path, self.fmt, lf=False, lp=True, st=False)
            self.power.data['generator']['p0'] = self.power.data['generator']['p']
            if self.step == 0:
                self.load_init_info()
            state = self.get_state()
            if os.name != 'nt':
                call_psa(path)
                assess, done, _ = self.make_assessment(path, method='min', min_n=3)
            else:
                assess = np.random.rand()
                done = (assess < 0.1)
        else:
            state = []
            assess = PF_NOTCONV_REWARD
            done = True
        self.assessments.append(assess)
        if self.step == 0:
            reward = 0.
        else:
            reward = self.assessments[-1] - self.assessments[-2] + STEP_REWARD
            # reward = assess
            if not done:
                # reward *= CCT_CHANGE_RATIO
                loads = self.power.data['load']
                load_p = np.sum(loads.loc[loads['mark'] == 1, 'p0'])
                if abs(load_p - self.init_load_p) / self.init_load_p >= LOAD_CHANGE_THR:
                    reward += PF_LOADFULL_REWARD
                    done = True
            else:
                reward = assess
        return state, reward, done

    def load_action(self, actions):
        """ 加载潮流修改动作,但不计算潮流。

        :param actions: dict. 动作字典:'load_ratio_p'~按比例调整负荷有功;
                                      'generator_ratio_p'~按比例调整机组有功。
        :return: np.array. 归一化的状态量。
        """
        for k in actions:
            if k == 'load_ratio_p':
                set_gl_p0(self.power.data['load'],
                          self.power.data['load']['p0'] * actions[k],
                          keep_factor=False, clip=False)
            elif k == 'generator_ratio_p':
                set_gl_p0(self.power.data['generator'],
                          self.power.data['generator']['p0'] * actions[k],
                          keep_factor=False, clip=True)
        return self.get_state()

    def print_info(self, state=True, assessment=True):
        """ 打印每步信息。

        :param state: bool. 是否打印状态量。
        :param assessment: bool. 是否打印评分值。
        """
        print('episode = %d, step = %d' % (self.episode, self.step))
        if state:
            print('state =', self.get_state())
        if assessment:
            print('assessment =', self.assessments)

    @staticmethod
    def random_generate(base_path, fmt, size, out_path,
                        min_p=None, max_p=None, gl_ratio=0.9,
                        random_q0=True, random_open=False, open_prob=[0.8]):
        power = Power(fmt=fmt)
        power.load_power(base_path, fmt=fmt)
        generators_bak = power.data['generator'].copy()
        loads_bak = power.data['load'].copy()
        if random_open:
            aclines_bak = power.data['acline'].copy()
        min_p = np.sum(generators_bak['pmin']) if not min_p else min_p
        max_p = np.sum(generators_bak['pmax']) if not max_p else max_p
        p0 = np.sum(generators_bak['p0'])
        shutil.rmtree(out_path, ignore_errors=True)
        os.mkdir(out_path)
        conv_count = 0
        for i in range(size):
            generators = power.data['generator'] = generators_bak.copy()
            loads = power.data['load'] = loads_bak.copy()
            if random_open:
                power.data['acline'] = aclines_bak.copy()
            p = min_p + (max_p - min_p) * np.random.rand()
            distribute_generators_p(generators, p - p0, sigma=0.2)
            gen_p = np.sum(generators['p0'])
            load_p = np.sum(loads['p0'])
            distribute_loads_p(loads, gl_ratio * gen_p - load_p,
                               p_sigma=0.2, keep_factor=False)
            if random_q0:
                random_load_q0(loads, sigma=None)
            if random_open:
                open_num = np.sum(np.random.rand(1) > open_prob)
                random_open_acline(power, num=open_num)
            path = os.path.join(out_path, '%08d' % i)
            power.save_power(path, fmt, lf=True, lp=False, st=True)
            shutil.copy(os.path.join(base_path, 'LF.L0'), path)
            shutil.copy(os.path.join(base_path, 'ST.S0'), path)
            call_wmlf(path)
            if check_lfcal(path):
                conv_count += 1
        print('Random generate done: %d / %d' % (conv_count, size))
コード例 #18
0
ファイル: load_ana.py プロジェクト: sdy99/PowerAI
import pandas as pd
import numpy as np

from core.power import Power

# 统计离线负荷
base_path = 'C:/PSASP_Pro/2020国调年度'
index = []
loads_sum = []
for sub in os.listdir(base_path):
    path = os.path.join(base_path, sub)
    if not os.path.isdir(path):
        continue
    index.append(sub)
    power = Power('off')
    power.load_power(path, 'off', lf=True, lp=False, st=False, station=False)
    power.data['load']['province'] = power.data['load']['name'].str[0]
    loads = power.data['load']
    # loads = loads[loads['province'].isin(['黑', '吉', '辽'])]
    loads = loads[loads['mark'] == 1]
    loads_sum.append(loads.groupby('province', sort=False).agg({'p0': 'sum'}))
    print(sub, 'loaded.')

loads_sum = pd.concat(loads_sum, axis=1).T
loads_sum.index = index

# 统计在线负荷
file_name = 'C:/Users/sdy/data/db/2019_09_12/data/station_pl.npz'
arch = np.load(file_name, allow_pickle=True)
loads = pd.DataFrame(arch['data'], index=arch['times'], columns=arch['elems'])
loads.fillna(0., inplace=True)
コード例 #19
0
        43: 'Hunan',
        46: 'Jiangxi',
        51: 'Shaanxi',
        52: 'Gansu',
        53: 'Qinghai',
        54: 'Ningxia',
        55: 'Xinjiang',
        84: 'Chongqing',
        85: 'Sichuan',
        86: 'Xizang'
    }
    path = os.path.join(os.path.expanduser('~'), 'data', 'gd', '2020_09_10',
                        'net')

    power = Power(fmt='on')
    power.load_power(path, fmt='on', lp=False, st=False, station=True)
    islands = list(set(power.data['bus']['island']))
    islands = [i for i in islands if 0 <= i < 5]
    st_info = load_station_info("%s/st_info.dat" % path)
    st_info["area"].replace(12, 11, inplace=True)
    st_info["area"].replace(18, 11, inplace=True)

    layer1, layer2, layer3 = {}, {}, {}
    for island in islands:
        areas = list(set(st_info.loc[st_info['island'] == island, 'area']))
        print(island, areas)
        areas.sort()
        ed = calc_ed_from_power(power,
                                island,
                                node_type='station',
                                on_only=False,
コード例 #20
0
ファイル: mlp.py プロジェクト: gintian/PowerAI
    y = layers.Dense(shapes[-1], activation=last_activation)(x)
    all_layers.append(y)
    model = Model(inputs, y, name=name)
    return model, all_layers


if __name__ == '__main__':
    path = os.path.join(os.path.expanduser('~'), 'data', 'wepri36', 'gen')
    net_path = os.path.join(path, 'net')
    res_type = 'cct'
    res_path = os.path.join(path, res_type)
    input_dic = {'generator': ['p'],
                 'load': ['p', 'q']}
    fmt = 'off'
    power = Power(fmt=fmt)
    power.load_power(net_path, fmt, lf=True, lp=False, st=False, station=True)
    input_layer = []
    for etype in input_dic:
        for dtype in input_dic[etype]:
            t = '_'.join((etype, dtype))
            input_layer.extend([(t, n) for n in power.data[etype]['name']])

    data_set = GHData(path, net_path, input_layer)
    data_set.load_x(x_ratio_thr=-1.0, dt_idx=False)
    data_set.load_y(res_type)
    data_set.normalize()
    data_set.column_valid = np.ones((data_set.input_data.shape[1], ), dtype=np.bool)
    """
    y_columns = list(range(data_set.y.shape[1]))
    column_names = data_set.y.columns[y_columns]
    print("targets:", column_names)