示例#1
0
def check_consistency_uids_gids(is_uvps, uids, gids):
    u_seq = 0
    for seq, is_uvp in enumerate(is_uvps):
        if is_uvp:
            equal(uids[seq], u_seq)
            equal(gids[u_seq], seq)
            u_seq += 1
示例#2
0
def test_opf():

    csv_folder_name = os.path.join(os.path.dirname(__file__), "..", "examples",
                                   "opf-storage-hvdc","opf-storage-data")

    n = pypsa.Network(csv_folder_name)

    target_path = os.path.join(csv_folder_name,"results","generators-p.csv")

    target_gen_p = pd.read_csv(target_path, index_col=0)

    #test results were generated with GLPK and other solvers may differ
    for solver_name in solvers:

        n.lopf(solver_name=solver_name, pyomo=True)

        equal(n.generators_t.p.reindex_like(target_gen_p), target_gen_p, decimal=2)

    if sys.version_info.major >= 3:

        for solver_name in solvers:

            status, cond = n.lopf(solver_name=solver_name, pyomo=False)
            assert status == 'ok'
            equal(n.generators_t.p.reindex_like(target_gen_p), target_gen_p,
                  decimal=2)
示例#3
0
def test_circum_center_radius():
    '''
    circum_center_radius(): boundary, near lon=0, big triangle
    '''
    from numpy import pi
    from circumcircle import circum_center_radius
    from sphere import angle
    from convert_coord.cart_ll import latlon2xyz


    # boundary
    ll1, ll2, ll3 = (0,pi/3), (0,2/3*pi), (pi/6,pi/2)
    xyz1, xyz2, xyz3 = [latlon2xyz(*ll) for ll in [ll1,ll2,ll3]]
    center, radius = circum_center_radius(xyz1, xyz2, xyz3)
    equal(center, (0,1,0))


    # near lon=0
    ll1, ll2, ll3 = (pi/5, 2*pi-pi/6), (0,pi/7), (pi/7,pi/6)
    xyz1, xyz2, xyz3 = [latlon2xyz(*ll) for ll in [ll1,ll2,ll3]]
    center, radius = circum_center_radius(xyz1, xyz2, xyz3)

    d1 = angle(center, xyz1)
    d2 = angle(center, xyz2)
    d3 = angle(center, xyz3)
    aa_equal(d1, radius, 15)
    aa_equal(d2, radius, 15)
    aa_equal(d3, radius, 15)


    # big triangle
    ll1, ll2, ll3 = (pi/2,0), (0,0), (0,pi/2)
    xyz1, xyz2, xyz3 = [latlon2xyz(*ll) for ll in [ll1,ll2,ll3]]
    center, radius = circum_center_radius(xyz1, xyz2, xyz3)
    aa_equal(center, latlon2xyz(0.61547970867038737,pi/4), 15)
示例#4
0
def main():
    '''
    main()
    '''

    #
    # fibonacci() 함수 테스트
    #
    fib_list = list()
    for fib in fibonacci():
        if fib > 89:
            break
        else:
            fib_list.append(fib)
    a_equal(fib_list, [1, 2, 3, 5, 8, 13, 21, 34, 55, 89])

    #
    # sum_fibonacci_under() 함수 테스트
    #
    equal(sum_fibonacci_under(89), sum([2, 8, 34]))

    #
    # 문제 풀이
    #
    summed = sum_fibonacci_under(4000000)
    print('짝수이면서 4백만 이하인 피보나치 수열의 합?', summed)
示例#5
0
def test_make_parameter_header_c():
    '''
    make_parameter_header(): c
    '''
    import yaml
    from build import make_parameter_header
    
    code_type = 'c'

    dpath = join(current_dpath, 'src')
    with open(join(dpath, 'build.yaml'), 'r') as f: build_dict = yaml.load(f)

    for target_name, src_dict in build_dict['param_header'].items():
        header = make_parameter_header(src_dict, code_type, dpath)

        if target_name == 'param1':
            expect = '''
// Automatically generated by build.py
// Caution: Do Not Modify Manually

#define KK  2
#define LLL 3.5
'''
            equal(header, expect)

        elif target_name == 'param2':
            expect = '''
// Automatically generated by build.py
// Caution: Do Not Modify Manually

#define MM -1.7
'''
            equal(header, expect)
示例#6
0
 def test_scarr(self):
     inp = np.array([np.arange(100), np.arange(100, 200)]).reshape(100, 2)
     inp95 = scarr(inp, "sig95")
     inpwt = scarr(inp, "whiten")
     aequal(inp95[:, 0], inp95[:, 1])
     equal(inp95[1, 1], 0.0105715990067)
     assert (inpwt[:, 0] - inpwt[:, 1] < 0).all()
 def test_scarr(self):
     inp = np.array([np.arange(100),
                     np.arange(100, 200)]).reshape(100,2)
     inp95 = scarr(inp, "sig95")
     inpwt = scarr(inp, "whiten")
     aequal(inp95[:,0], inp95[:,1])
     equal(inp95[1,1], 0.0105715990067)
     assert (inpwt[:,0] - inpwt[:,1] < 0).all()
示例#8
0
 def test_dsig(self):
     inp = range(100)
     res50 = dsig(inp, 0, 0, 50)
     assert min(res50) >= 0.5
     assert max(res50) < 1.0
     res99 = dsig(inp, 0, 0, 99)
     assert res99[10] - res50[10] < 0
     equal(res99[5], 0.52523106, 6)
 def test_dsig(self):
     inp = range(100)
     res50 = dsig(inp, 0, 0, 50)
     assert min(res50) >= 0.5
     assert max(res50) < 1.0
     res99 = dsig(inp, 0, 0, 99)
     assert res99[10] - res50[10] < 0
     equal(res99[5], 0.52523106, 6)
示例#10
0
def test_make_signature_c():
    '''
    make_signature_c()
    '''
    from source_module import make_signature_c

    src_c = '''
void add(int nx, double *a, double *b, double *c) {
    // size and intent of array arguments for f2py
    // a :: nx, in
    // b :: nx, in
    // c :: nx, inout

    int i;

    for (i=0; i<nx; i++) {
        c[i] = a[i] + b[i];
    }
}

void dummy(int nx, double k, double *a, double *b) {
    // size and intent of array arguments for f2py
    // a :: nx, in
    // b :: nx, inout

    int i;

    for (i=0; i<nx; i++) {
        b[i] = a[i] + k*b[i];
    }
}
    '''

    ref_sig_c = '''
python module $MODNAME
  interface
    subroutine add(nx, a, b, c)
      intent(c) :: add
      intent(c)
      integer, required, intent(in) :: nx
      real(8), dimension(nx), intent(in) :: a
      real(8), dimension(nx), intent(in) :: b
      real(8), dimension(nx), intent(inout) :: c
    end subroutine
    subroutine dummy(nx, k, a, b)
      intent(c) :: dummy
      intent(c)
      integer, required, intent(in) :: nx
      real(8), intent(in) :: k
      real(8), dimension(nx), intent(in) :: a
      real(8), dimension(nx), intent(inout) :: b
    end subroutine
  end interface
end python module
'''

    sig_c = make_signature_c(src_c)
    equal(ref_sig_c, sig_c)
示例#11
0
def main():
    '''
    main()
    '''

    cmath = CMath()

    x = uniform(-1, 1)
    equal(cmath.cos(x), cos(x))
示例#12
0
def test_scale_diff():
    mkdir(path("test_scale_diff"))
    assert sh("cp ../data/test_ab_cnt_deseq.arr test_scale_diff") == 0
    assert sh("epicode.py scale_diff test_scale_diff/test_ab_cnt_deseq.arr") == 0
    with open("test_scale_diff/test_ab_cnt_deseq_lvl.arr") as fh:
        head = fh.readline()
        firs = fh.readline()
        equal(float(firs.split("\t")[1]), 3.635961336943780431e-01)
    assert sh("rm -rf test_scale_diff") == 0
示例#13
0
def test_scale_features():
    mkdir(path("test_scale_features"))
    assert sh("cp ../data/test_ab_cnt_deseq_lvl.arr test_scale_features") == 0
    assert sh("epicode.py scale_features -scalgo sig95 test_scale_features/test_ab_cnt_deseq_lvl.arr") == 0
    with open("test_scale_features/test_ab_cnt_deseq_lvl_sig95.arr") as fh:
        head = fh.readline()
        firs = fh.readline()
        equal(float(firs.split("\t")[1]), 1.211047172546386719e-02)
    assert sh("rm -rf test_scale_features") == 0
示例#14
0
def test_code_sklearn():
    mkdir(path("test_code_sklearn"))
    assert sh("cp ../data/a549_start_lvl_sig95.arr test_code_sklearn") == 0
    assert sh("epicode.py code_sklearn -c 6 test_code_sklearn/a549_start_lvl_sig95.arr") == 0
    with open("test_code_sklearn/a549_start_lvl_sig95_pgnmf-c#6-i#None-p#.epi") as fh:
         head = fh.readline()
         firs = fh.readline()
         equal(float(firs.split("\t")[0]), 4.0693399501)
    assert sh("rm -rf test_code_sklearn") == 0
示例#15
0
def test_scale_pairs():
    mkdir(path("test_scale_pairs"))
    assert sh("cp ../data/test_ab_cnt.arr test_scale_pairs") == 0
    assert sh("epicode.py scale_pairs test_scale_pairs/test_ab_cnt.arr") == 0
    with open("test_scale_pairs/test_ab_cnt_deseq.arr") as fh:
        head = fh.readline()
        firs = fh.readline()
        equal(float(firs.split("\t")[1]), 1.632993161855451847e+00)
    assert sh("rm -rf test_scale_pairs") == 0
示例#16
0
def test_scale_pairs():
    mkdir(path("test_scale_pairs"))
    assert sh("cp ../data/test_ab_cnt.arr test_scale_pairs") == 0
    assert sh("epicode.py scale_pairs test_scale_pairs/test_ab_cnt.arr") == 0
    with open("test_scale_pairs/test_ab_cnt_deseq.arr") as fh:
        head = fh.readline()
        firs = fh.readline()
        equal(float(firs.split("\t")[1]), 1.632993161855451847e+00)
    assert sh("rm -rf test_scale_pairs") == 0
示例#17
0
def test_pypower_case():

    #ppopt is a dictionary with the details of the optimization routine to run
    ppopt = ppoption(PF_ALG=2)

    #choose DC or AC
    ppopt["PF_DC"] = True

    #ppc is a dictionary with details about the network, including baseMVA, branches and generators
    ppc = case()

    results, success = runpf(ppc, ppopt)

    #store results in a DataFrame for easy access
    results_df = {}

    #branches
    columns = 'bus0, bus1, r, x, b, rateA, rateB, rateC, ratio, angle, status, angmin, angmax, p0, q0, p1, q1'.split(
        ", ")
    results_df['branch'] = pd.DataFrame(data=results["branch"],
                                        columns=columns)

    #buses
    columns = [
        "bus", "type", "Pd", "Qd", "Gs", "Bs", "area", "v_mag_pu_set",
        "v_ang_set", "v_nom", "zone", "Vmax", "Vmin"
    ]
    results_df['bus'] = pd.DataFrame(data=results["bus"],
                                     columns=columns,
                                     index=results["bus"][:, 0])

    #generators
    columns = "bus, p, q, q_max, q_min, Vg, mBase, status, p_max, p_min, Pc1, Pc2, Qc1min, Qc1max, Qc2min, Qc2max, ramp_agc, ramp_10, ramp_30, ramp_q, apf".split(
        ", ")
    results_df['gen'] = pd.DataFrame(data=results["gen"], columns=columns)

    #now compute in PyPSA
    network = pypsa.Network()
    network.import_from_pypower_ppc(ppc)
    network.lpf()

    #compare generator dispatch
    p_pypsa = network.generators_t.p.loc["now"].values
    p_pypower = results_df['gen']["p"].values

    equal(p_pypsa, p_pypower)

    #compare branch flows
    for item in ["lines", "transformers"]:
        df = getattr(network, item)
        pnl = getattr(network, item + "_t")

        for si in ["p0", "p1"]:
            si_pypsa = getattr(pnl, si).loc["now"].values
            si_pypower = results_df['branch'][si][df.original_index].values
            equal(si_pypsa, si_pypower)
示例#18
0
def test_recode_sklearn():
    mkdir(path("test_recode_sklearn"))
    assert sh("cp ../data/0*sig95.arr test_recode_sklearn") == 0
    assert sh("cp ../data/tss_vs_enh_pgnmf-c#6-i#None-p#.epi test_recode_sklearn") == 0
    assert sh("epicode.py recode_sklearn -arr test_recode_sklearn/0_tss_vs_enh_lvl_sig95.arr -epi test_recode_sklearn/tss_vs_enh_pgnmf-c#6-i#None-p#.epi -base recode -odn test_recode_sklearn") == 0
    with open("test_recode_sklearn/recode.arr") as fh:
        head = fh.readline()
        firs = fh.readline()
        equal(float(firs.split("\t")[0]), 1.250747926106863528e-01)
    assert sh("rm -rf test_recode_sklearn") == 0
示例#19
0
def test_scale_diff():
    mkdir(path("test_scale_diff"))
    assert sh("cp ../data/test_ab_cnt_deseq.arr test_scale_diff") == 0
    assert sh(
        "epicode.py scale_diff test_scale_diff/test_ab_cnt_deseq.arr") == 0
    with open("test_scale_diff/test_ab_cnt_deseq_lvl.arr") as fh:
        head = fh.readline()
        firs = fh.readline()
        equal(float(firs.split("\t")[1]), 3.635961336943780431e-01)
    assert sh("rm -rf test_scale_diff") == 0
示例#20
0
def test_scale_features():
    mkdir(path("test_scale_features"))
    assert sh("cp ../data/test_ab_cnt_deseq_lvl.arr test_scale_features") == 0
    assert sh(
        "epicode.py scale_features -scalgo sig95 test_scale_features/test_ab_cnt_deseq_lvl.arr"
    ) == 0
    with open("test_scale_features/test_ab_cnt_deseq_lvl_sig95.arr") as fh:
        head = fh.readline()
        firs = fh.readline()
        equal(float(firs.split("\t")[1]), 1.211047172546386719e-02)
    assert sh("rm -rf test_scale_features") == 0
示例#21
0
def test_load_yaml_dict():
    from load_yaml import load_yaml_dict

    conf_dict = load_yaml_dict('sample.yaml', select=[('ne',30),('np',4)])

    ref_dict = {'aa':5, 'bb':2, 'cc':3, 
            'dd':90., 'ee':5, 'ff':18., \
            'parameter':{'a':4.5, 'b':3.2, 'c':3e5, 'd':4.5+3.2, 'e':4.5/(4.5+3.2)}}

    for k, v in conf_dict.items():
        equal(v, ref_dict[k])
示例#22
0
def test_code_sklearn():
    mkdir(path("test_code_sklearn"))
    assert sh("cp ../data/a549_start_lvl_sig95.arr test_code_sklearn") == 0
    assert sh(
        "epicode.py code_sklearn -c 6 test_code_sklearn/a549_start_lvl_sig95.arr"
    ) == 0
    with open("test_code_sklearn/a549_start_lvl_sig95_pgnmf-c#6-i#None-p#.epi"
              ) as fh:
        head = fh.readline()
        firs = fh.readline()
        equal(float(firs.split("\t")[0]), 4.0693399501)
    assert sh("rm -rf test_code_sklearn") == 0
示例#23
0
def test_multi_code_sklearn():
    mkdir(path("test_multicode_sklearn"))
    assert sh("cp ../data/[01]*sig95.arr test_multicode_sklearn") == 0
    assert sh("epicode.py multi_code_sklearn -base test_multicode_sklearn/tve -c 6 test_multicode_sklearn/[01]*sig95.arr") == 0
    with open("test_multicode_sklearn/tve_pgnmf-c#6-i#None-p#.epi") as fh:
         head = fh.readline()
         firs = fh.readline()
         equal(float(firs.split("\t")[1]), 2.75326630104)
    with open("test_multicode_sklearn/tve_pgnmf-c#6-i#None-p#.arr") as fh:
         head = fh.readline()
         firs = fh.readline()
         equal(float(firs.split("\t")[0]), 1.250747926106863528e-01)
    assert sh("rm -rf test_multicode_sklearn") == 0
示例#24
0
def test_sclopf():
    csv_folder_name = os.path.join(
        os.path.dirname(__file__),
        "..",
        "examples",
        "scigrid-de",
        "scigrid-with-load-gen-trafos",
    )

    n = pypsa.Network(csv_folder_name)

    # test results were generated with GLPK and other solvers may differ

    # There are some infeasibilities without line extensions
    for line_name in ["316", "527", "602"]:
        n.lines.loc[line_name, "s_nom"] = 1200

    # choose the contingencies
    branch_outages = n.lines.index[:2]

    objectives = []
    for pyomo in [True, False]:

        n.sclopf(
            n.snapshots[0],
            branch_outages=branch_outages,
            pyomo=pyomo,
            solver_name=solver_name,
        )

        # For the PF, set the P to the optimised P
        n.generators_t.p_set = n.generators_t.p.copy()
        n.generators.loc[:, "p_set_t"] = True
        n.storage_units_t.p_set = n.storage_units_t.p.copy()
        n.storage_units.loc[:, "p_set_t"] = True

        # Check no lines are overloaded with the linear contingency analysis

        p0_test = n.lpf_contingency(n.snapshots[0],
                                    branch_outages=branch_outages)

        # check loading as per unit of s_nom in each contingency

        max_loading = (abs(p0_test.divide(n.passive_branches().s_nom,
                                          axis=0)).describe().loc["max"])

        arr_equal(max_loading, np.ones((len(max_loading))), decimal=4)

        objectives.append(n.objective)

    equal(*objectives, decimal=1)
示例#25
0
def test_recode_sklearn():
    mkdir(path("test_recode_sklearn"))
    assert sh("cp ../data/0*sig95.arr test_recode_sklearn") == 0
    assert sh(
        "cp ../data/tss_vs_enh_pgnmf-c#6-i#None-p#.epi test_recode_sklearn"
    ) == 0
    assert sh(
        "epicode.py recode_sklearn -arr test_recode_sklearn/0_tss_vs_enh_lvl_sig95.arr -epi test_recode_sklearn/tss_vs_enh_pgnmf-c#6-i#None-p#.epi -base recode -odn test_recode_sklearn"
    ) == 0
    with open("test_recode_sklearn/recode.arr") as fh:
        head = fh.readline()
        firs = fh.readline()
        equal(float(firs.split("\t")[0]), 1.250747926106863528e-01)
    assert sh("rm -rf test_recode_sklearn") == 0
示例#26
0
def test_pt_in_polygon():
    '''
    pt_in_polygon(): out, border, in
    '''

    from sphere import pt_in_polygon
    from convert_coord.cart_ll import latlon2xyz


    # out
    polygon = [latlon2xyz(*ll) for ll in [(pi/2,0), (0,0), (0,pi/4)]]
    point = latlon2xyz(pi/4,2*pi/6)
    ret = pt_in_polygon(polygon, point)
    equal(ret, 'out')


    polygon = [latlon2xyz(*ll) for ll in [(pi/2,0), (0,0), (0,pi/4)]]
    point = latlon2xyz(-pi/6,pi/4)
    ret = pt_in_polygon(polygon, point)
    equal(ret, 'out')


    # border
    polygon = [latlon2xyz(*ll) for ll in [(pi/2,0), (0,0), (0,pi/4)]]
    point = latlon2xyz(pi/4,pi/4)
    ret = pt_in_polygon(polygon, point)
    equal(ret, 'border')


    # in
    polygon = [latlon2xyz(*ll) for ll in [(pi/2,0), (0,0), (0,pi/4)]]
    point = latlon2xyz(pi/4,pi/6)
    ret = pt_in_polygon(polygon, point)
    equal(ret, 'in')
示例#27
0
def test_check_and_make_parameter_header_f90():
    '''
    check_and_make_parameter_header(): f90
    '''
    import yaml
    import os
    import io
    import sys
    from build import check_and_make_parameter_header
    
    code_type = 'f90'
    suffix = '.f90.h' if code_type == 'f90' else '.h'

    dpath = join(current_dpath, 'src')
    src_dir = {'f90':'f90', 'c':'c', 'cu':'cuda', 'cl':'opencl'}[code_type]
    build_dpath = join(dpath, src_dir, 'build')
    with open(join(dpath, 'build.yaml'), 'r') as f: build_dict = yaml.load(f)

    #
    # Remove previous generated files
    #
    for target_name, src_dict in build_dict['param_header'].items():
        f90_fpath = join(build_dpath, target_name+suffix)
        if os.path.exists(f90_fpath): os.remove(f90_fpath)

    #
    # Make and compile header file
    # verify stdout and file existence
    #
    ret, out, err = capture(check_and_make_parameter_header)(code_type, dpath)
    expect_list = list()
    for target_name in sorted(build_dict['param_header'].keys()):
        f90_fpath = join(build_dpath, target_name+suffix)
        expect_list.append('{} is generated.'.format(f90_fpath))
    equal(out, '\n'.join(expect_list))

    for target_name in sorted(build_dict['param_header'].keys()):
        f90_fpath = join(build_dpath, target_name+suffix)
        ok_( os.path.exists(f90_fpath) )

    #
    # verify stdout if revision
    #
    ret, out, err = capture(check_and_make_parameter_header)(code_type, dpath)
    expect_list = list()
    for target_name in sorted(build_dict['param_header'].keys()):
        f90_fpath = join(build_dpath, target_name+suffix)
        expect_list.append('{} is up to date.'.format(f90_fpath))
    equal(out, '\n'.join(expect_list))
示例#28
0
def test_multi_code_sklearn():
    mkdir(path("test_multicode_sklearn"))
    assert sh("cp ../data/[01]*sig95.arr test_multicode_sklearn") == 0
    assert sh(
        "epicode.py multi_code_sklearn -base test_multicode_sklearn/tve -c 6 test_multicode_sklearn/[01]*sig95.arr"
    ) == 0
    with open("test_multicode_sklearn/tve_pgnmf-c#6-i#None-p#.epi") as fh:
        head = fh.readline()
        firs = fh.readline()
        equal(float(firs.split("\t")[1]), 2.75326630104)
    with open("test_multicode_sklearn/tve_pgnmf-c#6-i#None-p#.arr") as fh:
        head = fh.readline()
        firs = fh.readline()
        equal(float(firs.split("\t")[0]), 1.250747926106863528e-01)
    assert sh("rm -rf test_multicode_sklearn") == 0
示例#29
0
def test_remove_duplicates():
    '''
    remove_duplicates() : normal case, latlons
    '''
    from duplicate import remove_duplicates


    # normal case
    xyzs = [(0,0.5,0),(-0.5,0,0),(0.5,0,0),(1,1.2,0),(0.5,0,0)]
    ret = remove_duplicates(xyzs)
    equal(ret, [(0,0.5,0),(-0.5,0,0),(0.5,0,0),(1,1.2,0)])

    xyzs = [(0,0.5,0),(-0.5,0,0),(0.5,0,0),(1,1.2,0),(0.5,0,0),(1.2,2.3,0),(-0.5,0,0)]
    ret = remove_duplicates(xyzs)
    equal(ret, [(0,0.5,0),(-0.5,0,0),(0.5,0,0),(1,1.2,0),(1.2,2.3,0)])
示例#30
0
def main():
    '''
    main()
    '''

    #
    # 함수 테스트
    #
    equal(sum_multiple_under(10), 23)

    #
    # 문제 풀이
    #
    summed = sum_multiple_under(1000)
    print('1000보다 작은 자연수 중에서 3 또는 5의 배수의 합?', summed)
示例#31
0
def test_intersect_two_greatcircles():
    '''
    intersect_two_greatcircles(): axis circles, oblique circles, identical
    '''

    from sphere import plane_origin, intersect_two_greatcircles
    from convert_coord.cart_ll import latlon2xyz


    #---------------------------------------
    # axis circles
    #---------------------------------------
    xyz1 = (1,0,0)
    xyz2 = (0,1,0)
    xyz3 = (0,0,1)

    # x axis
    plane1 = plane_origin(xyz1, xyz2)
    plane2 = plane_origin(xyz1, xyz3)
    ret = intersect_two_greatcircles(plane1, plane2)
    equal(ret, [(1,0,0), (-1,0,0)])

    # y axis
    plane1 = plane_origin(xyz1, xyz2)
    plane2 = plane_origin(xyz2, xyz3)
    ret = intersect_two_greatcircles(plane1, plane2)
    equal(ret, [(0,1,0), (0,-1,0)])

    # z axis
    plane1 = plane_origin(xyz1, xyz3)
    plane2 = plane_origin(xyz2, xyz3)
    ret = intersect_two_greatcircles(plane1, plane2)
    equal(ret, [(0,0,1), (0,0,-1)])


    #---------------------------------------
    # oblique circles
    #---------------------------------------
    xyz1 = (0, 0, 1)
    xyz2 = latlon2xyz(pi/4, pi/4)
    xyz3 = (1,0,0)

    plane1 = plane_origin(xyz1, xyz2)
    plane2 = plane_origin(xyz2, xyz3)

    ret = intersect_two_greatcircles(plane1, plane2)
    aa_equal(ret, [xyz2, latlon2xyz(-pi/4, 5*pi/4)], 15)


    #---------------------------------------
    # identical
    #---------------------------------------
    xyz1 = (0, 0, 1)
    xyz2 = latlon2xyz(pi/4, pi/4)
    plane = plane_origin(xyz1, xyz2)
    ret = intersect_two_greatcircles(plane, plane)
    equal(ret, [None, None])
示例#32
0
def test_replace_braket():
    from load_yaml import replace_braket

    conf_dict = { \
            'a': 4.5, \
            'b': 3.2, \
            'c': '3.e5', \
            'd': '<a>+<b>', \
            'e': '<a>/<d>'}

    for k, v in conf_dict.items():
        conf_dict[k] =  replace_braket(conf_dict, k)

    ref_dict = {'a':4.5, 'b':3.2, 'c':3e5, 'd':4.5+3.2, 'e':4.5/(4.5+3.2)}

    for k, v in conf_dict.items():
        equal(v, ref_dict[k])
示例#33
0
def test_get_surround_idxs():
    '''
    LatlonGridRemap.get_surround_idxs(): nlat=180, nlon=360 (regular)
    '''
    from cube_remap import LatlonGridRemap


    nlat, nlon = 180, 360
    tx = 1e-3
    ll = LatlonGridRemap(nlat, nlon, 'regular')

    # Near south pole
    lat0 = ll.tmp_lats[1]
    lon0 = ll.tmp_lons[7]
    ret_idxs = ll.get_surround_idxs(lat0-tx,lon0+tx)
    equal(ret_idxs, (7,-1,-1,-1))

    # Near north pole
    lat0 = ll.tmp_lats[-2]
    lon0 = ll.tmp_lons[-2]
    ret_idxs = ll.get_surround_idxs(lat0+tx,lon0+tx)
    equal(ret_idxs, (nlat*nlon-1,-1,-1,-1))

    # First box
    lat0 = ll.tmp_lats[1]
    lon0 = ll.tmp_lons[0]
    ret_idxs = ll.get_surround_idxs(lat0+tx,lon0+tx)
    a_equal(ret_idxs, [0,1,nlon,nlon+1])

    # Last box
    lat0 = ll.tmp_lats[-2]
    lon0 = ll.tmp_lons[-2]
    ret_idxs = ll.get_surround_idxs(lat0-tx,lon0+tx)
    a_equal(ret_idxs, [(nlat-1)*nlon-1, (nlat-2)*nlon, nlat*nlon-1, (nlat-1)*nlon])

    # Near Meridian
    lat0 = ll.tmp_lats[1]
    lon0 = ll.tmp_lons[-2]
    ret_idxs = ll.get_surround_idxs(lat0+tx,lon0+tx)
    a_equal(ret_idxs, [nlon-1,0,nlon*2-1,nlon])

    # Error cases
    lat, lon = -0.785398163397, 6.28318530718
    ret_idxs = ll.get_surround_idxs(lat,lon)
    a_equal(ret_idxs, [16199, 15840, 16559, 16200])
示例#34
0
def test_minimum_up_time():
    """This test is based on
    https://pypsa.readthedocs.io/en/latest/examples/unit-commitment.html
    and is not very comprehensive."""

    nu = pypsa.Network()

    snapshots = range(4)

    nu.set_snapshots(snapshots)

    nu.add("Bus", "bus")

    nu.add("Generator",
           "coal",
           bus="bus",
           committable=True,
           p_min_pu=0.3,
           marginal_cost=20,
           p_nom=10000)

    nu.add("Generator",
           "gas",
           bus="bus",
           committable=True,
           marginal_cost=70,
           p_min_pu=0.1,
           up_time_before=0,
           min_up_time=3,
           p_nom=1000)

    nu.add("Load", "load", bus="bus", p_set=[4000, 800, 5000, 3000])

    nu.lopf(nu.snapshots, solver_name=solver_name)

    expected_status = np.array([[1, 0, 1, 1], [1, 1, 1, 0]], dtype=float).T

    equal(nu.generators_t.status.values, expected_status)

    expected_dispatch = np.array([[3900, 0, 4900, 3000], [100, 800, 100, 0]],
                                 dtype=float).T

    equal(nu.generators_t.p.values, expected_dispatch)
示例#35
0
def test_polygon_line():
    '''
    intersection between polygon and line
    '''
    #-------------------------------------------------
    # inclusion
    #-------------------------------------------------
    poly = Polygon([(0,0),(0,1),(1,1),(1,0)])
    line = LineString([(0,0), (0.5,0.5)])

    iline = poly.intersection(line)
    equal(np.sqrt(2)*0.5, iline.length)
    a_equal(line, np.array(iline.coords))


    #-------------------------------------------------
    # partially
    #-------------------------------------------------
    poly = Polygon([(0,0),(0,1),(1,1),(1,0)])
    line = LineString([(0.5,0.5),(1.5,1.5)])

    iline = poly.intersection(line)
    equal(np.sqrt(2)*0.5, iline.length)
    a_equal(LineString([(0.5,0.5),(1,1)]), np.array(iline.coords))


    #-------------------------------------------------
    # not intersection
    #-------------------------------------------------
    poly = Polygon([(0,0),(0,1),(1,1),(1,0)])
    line = LineString([(1,1),(2,2)])

    iline = poly.intersection(line)
    equal(0, iline.length)
示例#36
0
def test_angle():
    '''
    angle(): yz plane circle, oblique circle
    '''

    from sphere import angle
    from convert_coord.cart_ll import latlon2xyz


    ret = angle(latlon2xyz(pi/4,0), latlon2xyz(0,0))
    equal(ret, pi/4)

    ret = angle(latlon2xyz(pi/2,0), latlon2xyz(0,0))
    equal(ret, pi/2)

    ret = angle(latlon2xyz(0,0), latlon2xyz(0,0))
    equal(ret, 0)

    ret = angle(latlon2xyz(pi/4,0), latlon2xyz(-pi/4,0))
    aa_equal(ret, pi/2, 15)

    ret = angle(latlon2xyz(pi/2,0), latlon2xyz(-pi/4,0))
    aa_equal(ret, 3*pi/4, 15)

    ret = angle(latlon2xyz(pi/2,0), latlon2xyz(-pi/2,0))
    aa_equal(ret, pi, 15)
示例#37
0
def test_duplicate_idxs():
    '''
    duplicate_idxs() : normal case, latlons
    '''
    from duplicate import duplicate_idxs


    # normal case
    xyzs = [(0,0.5,0),(-0.5,0,0),(0.5,0,0),(1,1.2,0),(0.5,0,0)]
    ret = duplicate_idxs(xyzs)
    equal(ret, [4])

    xyzs = [(0,0.5,0),(-0.5,0,0),(0.5,0,0),(1,1.2,0),(0.5,0,0),(1.2,2.3,0),(-0.5,0,0)]
    ret = duplicate_idxs(xyzs)
    equal(ret, [6,4])


    # error case
    xyzs = [(-0.69766285707571141, 1.5271630954950388, 0), \
            (-0.69766285707571152, 1.5271630954950384, 0), \
            (-0.78492204764598783, 1.5271630954950381, 0), \
            (-0.78492204764598794, 1.5271630954950384, 0), \
            (-0.78492204764598794, 1.6144295580947545, 0), \
            (-0.69766285707571218, 1.6144295580947559, 0), \
            (-0.69766285707571163, 1.6144295580947547, 0)]
    ret = duplicate_idxs(xyzs)
    equal(ret, [1,3,6])
示例#38
0
def test_get_surround_idxs_2():
    '''
    LatlonGridRemap.get_surround_idxs(): nlat=192, nlon=384 (gaussian)
    '''
    from cube_remap import LatlonGridRemap


    nlat, nlon = 192, 384
    tx = 1e-5
    ll = LatlonGridRemap(nlat, nlon, 'gaussian')

    # Near south pole
    lat0 = ll.tmp_lats[1]
    lon0 = ll.tmp_lons[7]
    ret_idxs = ll.get_surround_idxs(lat0-tx,lon0+tx)
    equal(ret_idxs, (7,-1,-1,-1))

    # Near north pole
    lat0 = ll.tmp_lats[-2]
    lon0 = ll.tmp_lons[-2]
    ret_idxs = ll.get_surround_idxs(lat0+tx,lon0+tx)
    equal(ret_idxs, (nlat*nlon-1,-1,-1,-1))

    # First box
    lat0 = ll.tmp_lats[1]
    lon0 = ll.tmp_lons[0]
    ret_idxs = ll.get_surround_idxs(lat0+tx,lon0+tx)
    a_equal(ret_idxs, [0,1,nlon,nlon+1])

    # Last box
    lat0 = ll.tmp_lats[-2]
    lon0 = ll.tmp_lons[-2]
    ret_idxs = ll.get_surround_idxs(lat0-tx,lon0+tx)
    a_equal(ret_idxs, [(nlat-1)*nlon-1, (nlat-2)*nlon, nlat*nlon-1, (nlat-1)*nlon])

    # Near Meridian
    lat0 = ll.tmp_lats[1]
    lon0 = ll.tmp_lons[-2]
    ret_idxs = ll.get_surround_idxs(lat0+tx,lon0+tx)
    a_equal(ret_idxs, [nlon-1,0,nlon*2-1,nlon])
def check_area_ratio(ncf, SCRIP=False):
    from util.misc.compare_float import feq

    num_links = len(ncf.dimensions["num_links"])
    dsts = ncf.variables["dst_address"][:]
    srcs = ncf.variables["src_address"][:]
    wgts = ncf.variables["remap_matrix"][:]

    if SCRIP:
        up_size = len(ncf.dimensions["dst_grid_size"])
        f = np.zeros(up_size, "f8")
        for i in xrange(num_links):
            dst, src, wgt = dsts[i] - 1, srcs[i] - 1, wgts[i, 0]
            f[dst] += wgt
            # print i, dst, src, wgt
    else:
        up_size = ncf.up_size
        f = np.zeros(up_size, "f8")
        for i in xrange(num_links):
            dst, src, wgt = dsts[i], srcs[i], wgts[i]
            f[dst] += wgt
            # print i, dst, src, wgt

    f_digits = np.ones(up_size, "i4") * (-1)
    num_digits = np.zeros(16, "i4")
    for i in xrange(up_size):
        for digit in xrange(15, 0, -1):
            if feq(f[i], 1, digit):
                f_digits[i] = digit
                num_digits[digit] += 1
                break

        if f_digits[i] == -1:
            f_digits[i] = 0
            num_digits[0] += 1

    for digit in xrange(16):
        print "digit %d -> %d (%1.2f %%)" % (digit, num_digits[digit], num_digits[digit] / up_size * 100)

    equal(sum(num_digits), up_size)
示例#40
0
def test_line_line():
    '''
    intersection between line and line
    '''
    #-------------------------------------------------
    # intersection
    #-------------------------------------------------
    line1 = LineString([(0,0), (1,1)])
    line2 = LineString([(1,0), (0,1)])

    ist = line1.intersection(line2)
    equal(ist.geom_type, 'Point')
    a_equal([0.5,0.5], np.array(ist.coords)[0])



    #-------------------------------------------------
    # parallel
    # line intersection
    #-------------------------------------------------
    line1 = LineString([(0,0), (1,1)])
    line2 = LineString([(-1,-1), (0.5,0.5)])

    ist = line1.intersection(line2)
    equal(ist.geom_type, 'LineString')
    a_equal([(0,0),(0.5,0.5)], np.array(ist.coords))



    #-------------------------------------------------
    # parallel
    # not intersection
    #-------------------------------------------------
    line1 = LineString([(0,0), (1,1)])
    line2 = LineString([(0,-1), (1,0)])

    ist = line1.intersection(line2)
    equal(True, ist.is_empty)



    #-------------------------------------------------
    # not intersection
    #-------------------------------------------------
    line1 = LineString([(0,0), (1,1)])
    line2 = LineString([(3,0), (0,3)])

    ist = line1.intersection(line2)
    equal(True, ist.is_empty)
示例#41
0
def test_sclopf(n):

    # There are some infeasibilities without line extensions
    for line_name in ["316", "527", "602"]:
        n.lines.loc[line_name, "s_nom"] = 1200

    # choose the contingencies
    branch_outages = n.lines.index[:2]

    objectives = []
    for pyomo in [True, False]:

        n.sclopf(
            n.snapshots[0],
            branch_outages=branch_outages,
            pyomo=pyomo,
            solver_name=solver_name,
        )

        # For the PF, set the P to the optimised P
        n.generators_t.p_set = n.generators_t.p.copy()
        n.storage_units_t.p_set = n.storage_units_t.p.copy()

        # Check no lines are overloaded with the linear contingency analysis

        p0_test = n.lpf_contingency(n.snapshots[0],
                                    branch_outages=branch_outages)

        # check loading as per unit of s_nom in each contingency

        max_loading = (abs(p0_test.divide(n.passive_branches().s_nom,
                                          axis=0)).describe().loc["max"])

        arr_equal(max_loading, np.ones((len(max_loading))), decimal=4)

        objectives.append(n.objective)

    equal(*objectives, decimal=1)
示例#42
0
def compare_with_kim_rhs(myrank, nproc):
    '''
    Compare with KIM: CubeGridMPI, CubeTensor
    '''
    from cube_mpi import CubeGridMPI
    from cube_tensor import CubeTensor

    assert nproc==16


    #---------------------------------------------------------------------
    # Read reference variables from KIM
    #---------------------------------------------------------------------
    ref_ncf = nc.Dataset('compute_and_apply_rhs/rank_%d.nc'%(myrank), 'r')
    ref_nelemd = len( ref_ncf.dimensions['nelemd'] )
    ref_Dvv = ref_ncf.variables['Dvv'][:]
    ref_Dinv = ref_ncf.variables['Dinv'][:]
    ref_ps_v = ref_ncf.variables['ps_v'][:]
    ref_grad_ps = ref_ncf.variables['grad_ps'][:]


    #---------------------------------------------------------------------
    # Compare
    #---------------------------------------------------------------------
    cubegrid = CubeGridMPI(ne, ngq, nproc, myrank, homme_style=True)
    cubetensor = CubeTensor(cubegrid)

    # Note: Fortran array is column-major
    equal(ref_nelemd, cubegrid.local_nelem)
    aa_equal(ref_Dvv.ravel()*1e-1, cubetensor.dvvT*1e-1, 15)

    AI = cubetensor.AI.reshape(2,2,ngq,ngq,ref_nelemd)
    for ie in xrange(ref_nelemd):
        for gi in xrange(ngq):
            for gj in xrange(ngq):
                aa_equal(ref_Dinv[ie,gj,gi,:,:], AI[:,:,gj,gi,ie], 15)
示例#43
0
def test_normal_vector():
    '''
    normal_vector(): yz plane circle, oblique circle
    '''

    from sphere import normal_vector
    from convert_coord.cart_ll import latlon2xyz


    #---------------------------------------
    # yz plane circle, +x direction
    #---------------------------------------
    vec1 = (0, 1, 0)
    vec2 = (0, 1/sqrt(2), 1/sqrt(2))
    nvec = normal_vector(vec1, vec2)
    equal(nvec, (sin(pi/4),0,0))

    unit_nvec = normal_vector(vec1, vec2, normalize=True)
    equal(unit_nvec, (1,0,0))


    #---------------------------------------
    # yz plane circle, -x direction
    #---------------------------------------
    vec1 = (0, -1, 0)
    vec2 = (0, 1/sqrt(2), 1/sqrt(2))
    nvec = normal_vector(vec1, vec2)
    equal(nvec, (-sin(pi/4),0,0))

    unit_nvec = normal_vector(vec1, vec2, normalize=True)
    equal(unit_nvec, (-1,0,0))


    #---------------------------------------
    # oblique circle
    #---------------------------------------
    vec1 = (0, 0, 1)
    vec2 = latlon2xyz(pi/4,pi/4)
    nvec = normal_vector(vec1, vec2)
    aa_equal(nvec, latlon2xyz(0,3*pi/4,R=sin(pi/4)), 15)

    unit_nvec = normal_vector(vec1, vec2, normalize=True)
    aa_equal(unit_nvec, (-1/sqrt(2),1/sqrt(2),0), 15)
示例#44
0
def test_lpf(n, n_r):

    n.lpf(snapshots=n.snapshots)

    equal(
        n.generators_t.p[n.generators.index].iloc[:2],
        n_r.generators_t.p[n.generators.index].iloc[:2]
    )
    equal(
        n.lines_t.p0[n.lines.index].iloc[:2],
        n_r.lines_t.p0[n.lines.index].iloc[:2]
    )
    equal(
        n.links_t.p0[n.links.index].iloc[:2],
        n_r.links_t.p0[n.links.index].iloc[:2]
    )
示例#45
0
def test_lpf_chunks(n, n_r):

    for snapshot in n.snapshots[:2]:
        n.lpf(snapshot)

    n.lpf(snapshots=n.snapshots)

    equal(
        n.generators_t.p[n.generators.index],n_r.generators_t.p[n.generators.index])
    equal(
        n.lines_t.p0[n.lines.index],
        n_r.lines_t.p0[n.lines.index]
    )
    equal(
        n.links_t.p0[n.links.index],
        n_r.links_t.p0[n.links.index]
    )
示例#46
0
def test_lopf_lowmem(n, n_r):

    status, _ = n.lopf(snapshots=n.snapshots,
                       solver_name=solver_name,
                       pyomo=False)

    assert status == 'ok'

    equal(n.generators_t.p.loc[:, n.generators.index],
          n_r.generators_t.p.loc[:, n.generators.index],
          decimal=2)

    equal(n.lines_t.p0.loc[:, n.lines.index],
          n_r.lines_t.p0.loc[:, n.lines.index],
          decimal=2)

    equal(n.links_t.p0.loc[:, n.links.index],
          n_r.links_t.p0.loc[:, n.links.index],
          decimal=2)
示例#47
0
def test_lopf(n, n_r, formulation, free_memory):
    """
    Test results were generated with GLPK; solution should be unique,
    so other solvers should not differ (e.g. cbc or gurobi)
    """

    n.lopf(snapshots=n.snapshots,
           solver_name=solver_name,
           formulation=formulation,
           free_memory=free_memory)

    equal(n.generators_t.p.loc[:, n.generators.index],
          n_r.generators_t.p.loc[:, n.generators.index],
          decimal=4)

    equal(n.lines_t.p0.loc[:, n.lines.index],
          n_r.lines_t.p0.loc[:, n.lines.index],
          decimal=4)

    equal(n.links_t.p0.loc[:, n.links.index],
          n_r.links_t.p0.loc[:, n.links.index],
          decimal=4)
示例#48
0
 def test_loadarr(self):
     k, a = load_epi(os.path.join(pkg_dir, "data", "absolute_codes.epi"))
     assert k[0] == "h2az"
     equal(a[0][3], 1.30686752)
示例#49
0
 def test_sparsevec(self):
     equal(sparsevec(np.array([0, 1, 1, 1, 1, 2])), 0.22640339557688832)
     equal(sparsevec(np.array([1, 1, 1, 1, 1, 1])), 4.08e-10, 3)
示例#50
0
 def test_sparsemat(self):
     res = sparsemat(np.array([[0, 1, 1, 1, 1, 2], [1, 1, 1, 1, 1, 1]]))
     equal(res, 0.113201697993, 6)
示例#51
0
def test_pf_distributed_slack():
    csv_folder_name = os.path.join(os.path.dirname(__file__), "..", "examples",
                                   "scigrid-de",
                                   "scigrid-with-load-gen-trafos")

    network = pypsa.Network(csv_folder_name)
    network.set_snapshots(network.snapshots[:2])

    #There are some infeasibilities without line extensions
    network.lines.s_max_pu = 0.7
    network.lines.loc[["316", "527", "602"], "s_nom"] = 1715
    network.storage_units.state_of_charge_initial = 0.

    network.lopf(network.snapshots,
                 solver_name='glpk',
                 formulation='kirchhoff')

    #For the PF, set the P to the optimised P
    network.generators_t.p_set = network.generators_t.p
    network.storage_units_t.p_set = network.storage_units_t.p

    #set all buses to PV, since we don't know what Q set points are
    network.generators.control = "PV"

    #Need some PQ buses so that Jacobian doesn't break
    f = network.generators[network.generators.bus == "492"]
    network.generators.loc[f.index, "control"] = "PQ"

    # by dispatch
    network.pf(distribute_slack=True, slack_weights='p_set')

    equal(network.generators_t.p_set.apply(normed, axis=1),
          (network.generators_t.p - network.generators_t.p_set).apply(normed,
                                                                      axis=1))

    # by capacity
    network.pf(distribute_slack=True, slack_weights='p_nom')

    slack_shares_by_capacity = (network.generators_t.p -
                                network.generators_t.p_set).apply(normed,
                                                                  axis=1)

    for index, row in slack_shares_by_capacity.iterrows():
        equal(network.generators.p_nom.pipe(normed).fillna(0), row)

    # by custom weights (mirror 'capacity' via custom slack weights by bus)
    custom_weights = {}
    for sub_network in network.sub_networks.obj:
        buses_o = sub_network.buses_o
        custom_weights[sub_network.name] = sub_network.generators().groupby(
            'bus').sum().p_nom.reindex(buses_o).pipe(normed).fillna(0)

    network.pf(distribute_slack=True, slack_weights=custom_weights)

    equal(slack_shares_by_capacity,
          (network.generators_t.p - network.generators_t.p_set).apply(normed,
                                                                      axis=1))

    # by custom weights (mirror 'capacity' via custom slack weights by generators)
    custom_weights = {}
    for sub_network in network.sub_networks.obj:
        custom_weights[sub_network.name] = sub_network.generators(
        ).p_nom  # weights do not sum up to 1

    network.pf(distribute_slack=True, slack_weights=custom_weights)

    equal(slack_shares_by_capacity,
          (network.generators_t.p - network.generators_t.p_set).apply(normed,
                                                                      axis=1))
示例#52
0
def test_pypower_case():

    #ppopt is a dictionary with the details of the optimization routine to run
    ppopt = ppoption(PF_ALG=2)

    #choose DC or AC
    ppopt["PF_DC"] = False

    #ppc is a dictionary with details about the network, including baseMVA, branches and generators
    ppc = case()

    results, success = runpf(ppc, ppopt)

    #store results in a DataFrame for easy access
    results_df = {}

    #branches
    columns = 'bus0, bus1, r, x, b, rateA, rateB, rateC, ratio, angle, status, angmin, angmax, p0, q0, p1, q1'.split(
        ", ")
    results_df['branch'] = pd.DataFrame(data=results["branch"],
                                        columns=columns)

    #buses
    columns = [
        "bus", "type", "Pd", "Qd", "Gs", "Bs", "area", "v_mag_pu", "v_ang",
        "v_nom", "zone", "Vmax", "Vmin"
    ]
    results_df['bus'] = pd.DataFrame(data=results["bus"],
                                     columns=columns,
                                     index=results["bus"][:, 0])

    #generators
    columns = "bus, p, q, q_max, q_min, Vg, mBase, status, p_max, p_min, Pc1, Pc2, Qc1min, Qc1max, Qc2min, Qc2max, ramp_agc, ramp_10, ramp_30, ramp_q, apf".split(
        ", ")
    results_df['gen'] = pd.DataFrame(data=results["gen"], columns=columns)

    #now compute in PyPSA

    network = pypsa.Network()
    network.import_from_pypower_ppc(ppc)

    #PYPOWER uses PI model for transformers, whereas PyPSA defaults to
    #T since version 0.8.0
    network.transformers.model = "pi"

    network.pf()

    #compare branch flows
    for c in network.iterate_components(network.passive_branch_components):
        for si in ["p0", "p1", "q0", "q1"]:
            si_pypsa = getattr(c.pnl, si).loc["now"].values
            si_pypower = results_df['branch'][si][c.df.original_index].values
            equal(si_pypsa, si_pypower)

    #compare generator dispatch
    for s in ["p", "q"]:
        s_pypsa = getattr(network.generators_t, s).loc["now"].values
        s_pypower = results_df["gen"][s].values
        equal(s_pypsa, s_pypower)

    #compare voltages
    v_mag_pypsa = network.buses_t.v_mag_pu.loc["now"]
    v_mag_pypower = results_df["bus"]["v_mag_pu"]

    equal(v_mag_pypsa, v_mag_pypower)

    v_ang_pypsa = network.buses_t.v_ang.loc["now"]
    pypower_slack_angle = results_df["bus"]["v_ang"][results_df["bus"]["type"]
                                                     == 3].values[0]
    v_ang_pypower = (results_df["bus"]["v_ang"] -
                     pypower_slack_angle) * np.pi / 180.

    equal(v_ang_pypsa, v_ang_pypower)
示例#53
0
def test_pandapower_case():

    # more complicated examples like
    # net = pandapower.networks.example_simple()
    # can be used once the import of e.g. switches is perfected

    # create empty net
    net = pp.create_empty_network()

    # create buses
    b1 = pp.create_bus(net, vn_kv=20.0, name="Bus 1")
    b2 = pp.create_bus(net, vn_kv=0.4, name="Bus 2")
    b3 = pp.create_bus(net, vn_kv=0.4, name="Bus 3")

    # create bus elements
    pp.create_ext_grid(net, bus=b1, vm_pu=1.02, name="Grid Connection")
    pp.create_load(net, bus=b3, p_mw=0.1, q_mvar=0.05, name="Load")

    # create branch elements
    pp.create_transformer(net,
                          hv_bus=b1,
                          lv_bus=b2,
                          std_type="0.4 MVA 20/0.4 kV",
                          name="Trafo")
    pp.create_line(net,
                   from_bus=b2,
                   to_bus=b3,
                   length_km=0.1,
                   name="Line",
                   std_type="NAYY 4x50 SE")

    # because of phase angles, need to init with DC
    pp.runpp(net, calculate_voltage_angles=True, init="dc")

    n = pypsa.Network()

    n.import_from_pandapower_net(net)

    # seed PF with LPF solution because of phase angle jumps
    n.lpf()
    n.pf(use_seed=True)

    # use same index for everything
    net.res_bus.index = net.bus.name.values
    net.res_line.index = net.line.name.values

    # compare bus angles
    equal(n.buses_t.v_ang.loc["now"] * 180 / np.pi, net.res_bus.va_degree)

    # compare bus voltage magnitudes
    equal(n.buses_t.v_mag_pu.loc["now"], net.res_bus.vm_pu)

    # compare bus active power (NB: pandapower uses load signs)
    equal(n.buses_t.p.loc["now"], -net.res_bus.p_mw)

    # compare bus active power (NB: pandapower uses load signs)
    equal(n.buses_t.q.loc["now"], -net.res_bus.q_mvar)

    # compare branch flows
    equal(n.lines_t.p0.loc["now"], net.res_line.p_from_mw)
    equal(n.lines_t.p1.loc["now"], net.res_line.p_to_mw)
    equal(n.lines_t.q0.loc["now"], net.res_line.q_from_mvar)
    equal(n.lines_t.q1.loc["now"], net.res_line.q_to_mvar)

    equal(n.transformers_t.p0.loc["now"], net.res_trafo.p_hv_mw)
    equal(n.transformers_t.p1.loc["now"], net.res_trafo.p_lv_mw)
    equal(n.transformers_t.q0.loc["now"], net.res_trafo.q_hv_mvar)
    equal(n.transformers_t.q1.loc["now"], net.res_trafo.q_lv_mvar)
示例#54
0
def test_lopf():

    csv_folder_name = os.path.join(os.path.dirname(__file__), "..", "examples",
                                   "ac-dc-meshed", "ac-dc-data")

    n = pypsa.Network(csv_folder_name)

    results_folder_name = os.path.join(csv_folder_name, "results-lopf")

    n_r = pypsa.Network(results_folder_name)

    #test results were generated with GLPK; solution should be unique,
    #so other solvers should not differ (tested with cbc and gurobi)

    snapshots = n.snapshots

    for formulation, free_memory in product(
        ["angles", "cycles", "kirchhoff", "ptdf"], [{}, {"pypsa"}]):
        n.lopf(snapshots=snapshots,
               solver_name=solver_name,
               formulation=formulation,
               free_memory=free_memory)

        equal(n.generators_t.p.loc[:, n.generators.index],
              n_r.generators_t.p.loc[:, n.generators.index],
              decimal=4)
        equal(n.lines_t.p0.loc[:, n.lines.index],
              n_r.lines_t.p0.loc[:, n.lines.index],
              decimal=4)
        equal(n.links_t.p0.loc[:, n.links.index],
              n_r.links_t.p0.loc[:, n.links.index],
              decimal=4)

    if sys.version_info.major >= 3:
        status, cond = n.lopf(snapshots=snapshots,
                              solver_name=solver_name,
                              pyomo=False)
        assert status == 'ok'
        equal(n.generators_t.p.loc[:, n.generators.index],
              n_r.generators_t.p.loc[:, n.generators.index],
              decimal=2)
        equal(n.lines_t.p0.loc[:, n.lines.index],
              n_r.lines_t.p0.loc[:, n.lines.index],
              decimal=2)
        equal(n.links_t.p0.loc[:, n.links.index],
              n_r.links_t.p0.loc[:, n.links.index],
              decimal=2)
示例#55
0
def test_single_to_multi_level_snapshots():
    n = pypsa.Network(snapshots=range(2))
    years = [2030, 2040]
    n.investment_periods = years
    assert isinstance(n.snapshots, pd.MultiIndex)
    equal(n.snapshots.levels[0], years)