Beispiel #1
0
def test_compute_stats_single():
    for G, distances, betas in network_generator():
        data_dict = mock.mock_data_dict(G)
        numeric_data = mock.mock_numerical_data(len(data_dict), num_arrs=1)
        # easy version
        N_easy = networks.Network_Layer_From_nX(G, distances)
        D_easy = layers.Data_Layer_From_Dict(data_dict)
        D_easy.assign_to_network(N_easy, max_dist=500)
        D_easy.compute_stats_single('boo', numeric_data[0])
        # custom version
        N_full = networks.Network_Layer_From_nX(G, distances)
        D_full = layers.Data_Layer_From_Dict(data_dict)
        D_full.assign_to_network(N_full, max_dist=500)
        D_full.compute_aggregated(stats_keys=['boo'],
                                  stats_data_arrs=numeric_data)
        # compare
        for n_label in ['boo']:
            for s_label in [
                    'max', 'min', 'mean', 'mean_weighted', 'variance',
                    'variance_weighted'
            ]:
                for dist in distances:
                    assert np.allclose(
                        N_easy.metrics['stats'][n_label][s_label][dist],
                        N_full.metrics['stats'][n_label][s_label][dist],
                        equal_nan=True,
                        atol=0.001,
                        rtol=0)
        # check that non-single dimension arrays are caught
        with pytest.raises(ValueError):
            D_easy.compute_stats_single('boo', numeric_data)
Beispiel #2
0
def test_check_data_map():
    G = mock.mock_graph()
    G = graphs.nX_simple_geoms(G)
    N = networks.Network_Layer_From_nX(G, distances=[500])
    data_dict = mock.mock_data_dict(G)
    data_uids, data_map = layers.data_map_from_dict(data_dict)

    # should throw error if not assigned
    with pytest.raises(ValueError):
        checks.check_data_map(data_map)

    # should work if flag set to False
    checks.check_data_map(data_map, check_assigned=False)

    # assign then check that it runs as intended
    data_map = data.assign_to_network(data_map,
                                      N._node_data,
                                      N._edge_data,
                                      N._node_edge_map,
                                      max_dist=400)
    checks.check_data_map(data_map)

    # catch zero length data arrays
    empty_2d_arr = np.full((0, 4), np.nan)
    with pytest.raises(ValueError):
        checks.check_data_map(empty_2d_arr)

    # catch invalid dimensionality
    with pytest.raises(ValueError):
        checks.check_data_map(data_map[:, :-1])
Beispiel #3
0
def test_metrics_to_dict():
    G = mock.mock_graph()
    G = graphs.nX_simple_geoms(G)
    # create a network layer and run some metrics
    N = networks.Network_Layer_From_nX(G, distances=[500, 1000])

    # check with no metrics
    metrics_dict = N.metrics_to_dict()
    dict_check(metrics_dict, N)

    # check with centrality metrics
    N.compute_centrality(measures=['node_harmonic'])
    metrics_dict = N.metrics_to_dict()
    dict_check(metrics_dict, N)

    # check with data metrics
    data_dict = mock.mock_data_dict(G)
    landuse_labels = mock.mock_categorical_data(len(data_dict))
    numerical_data = mock.mock_numerical_data(len(data_dict))
    # TODO:
    '''
    D = layers.Data_Layer_From_Dict(data_dict)
    D.assign_to_network(N, max_dist=400)
    D.compute_aggregated(landuse_labels,
                         mixed_use_keys=['hill', 'shannon'],
                         accessibility_keys=['a', 'c'],
                         qs=[0, 1],
                         stats_keys=['boo'],
                         stats_data_arrs=numerical_data)
    '''
    metrics_dict = N.metrics_to_dict()
    dict_check(metrics_dict, N)
Beispiel #4
0
def test_compute_stats_multiple():
    for G, distances, betas in network_generator():
        data_dict = mock.mock_data_dict(G)
        numeric_data = mock.mock_numerical_data(len(data_dict), num_arrs=2)
        # easy version
        N_easy = networks.Network_Layer_From_nX(G, distances)
        D_easy = layers.Data_Layer_From_Dict(data_dict)
        D_easy.assign_to_network(N_easy, max_dist=500)
        D_easy.compute_stats_multiple(['boo', 'baa'], numeric_data)
        # custom version
        N_full = networks.Network_Layer_From_nX(G, distances)
        D_full = layers.Data_Layer_From_Dict(data_dict)
        D_full.assign_to_network(N_full, max_dist=500)
        D_full.compute_aggregated(stats_keys=['boo', 'baa'],
                                  stats_data_arrs=numeric_data)
        # compare
        for n_label in ['boo', 'baa']:
            for s_label in [
                    'max', 'min', 'mean', 'mean_weighted', 'variance',
                    'variance_weighted'
            ]:
                for dist in distances:
                    assert np.allclose(
                        N_easy.metrics['stats'][n_label][s_label][dist],
                        N_full.metrics['stats'][n_label][s_label][dist],
                        equal_nan=True,
                        atol=0.001,
                        rtol=0)
Beispiel #5
0
def test_dict_wgs_to_utm():
    # check that node coordinates are correctly converted
    G_utm = mock.mock_graph()
    data_dict_utm = mock.mock_data_dict(G_utm)

    # create a test dictionary
    test_dict = copy.deepcopy(data_dict_utm)
    # cast to lat, lon
    for k, v in test_dict.items():
        easting = v['x']
        northing = v['y']
        # be cognisant of parameter and return order
        # returns in lat lng order
        lat, lng = utm.to_latlon(easting, northing, 30, 'U')
        test_dict[k]['x'] = lng
        test_dict[k]['y'] = lat

    # convert back
    dict_converted = layers.dict_wgs_to_utm(test_dict)

    # check that round-trip converted match with reasonable proximity given rounding errors
    for k in data_dict_utm.keys():
        # rounding can be tricky
        assert np.allclose(data_dict_utm[k]['x'],
                           dict_converted[k]['x'],
                           atol=0.1,
                           rtol=0)  # relax precision
        assert np.allclose(data_dict_utm[k]['y'],
                           dict_converted[k]['y'],
                           atol=0.1,
                           rtol=0)  # relax precision

    # check that missing node attributes throw an error
    for attr in ['x', 'y']:
        G_wgs = mock.mock_graph(wgs84_coords=True)
        data_dict_wgs = mock.mock_data_dict(G_wgs)
        for k in data_dict_wgs.keys():
            del data_dict_wgs[k][attr]
            break
        # check that missing attribute throws an error
        with pytest.raises(AttributeError):
            layers.dict_wgs_to_utm(data_dict_wgs)

    # check that non WGS coordinates throw error
    with pytest.raises(AttributeError):
        layers.dict_wgs_to_utm(data_dict_utm)
Beispiel #6
0
def test_Data_Layer():
    G = mock.mock_graph()
    data_dict = mock.mock_data_dict(G)
    data_uids, data_map = layers.data_map_from_dict(data_dict)
    x_arr = data_map[:, 0]
    y_arr = data_map[:, 1]

    # test against Data_Layer internal process
    D = layers.Data_Layer(data_uids, data_map)
    assert D.uids == data_uids
    assert np.allclose(D._data, data_map, equal_nan=True, atol=0.001, rtol=0)
    assert np.allclose(D.x_arr, x_arr, atol=0.001, rtol=0)
    assert np.allclose(D.y_arr, y_arr, atol=0.001, rtol=0)
Beispiel #7
0
def test_data_map_from_dict():
    # generate mock data
    G = mock.mock_graph()
    data_dict = mock.mock_data_dict(G)
    data_uids, data_map = layers.data_map_from_dict(data_dict)

    assert len(data_uids) == len(data_map) == len(data_dict)

    for d_label, d in zip(data_uids, data_map):
        assert d[0] == data_dict[d_label]['x']
        assert d[1] == data_dict[d_label]['y']
        assert np.isnan(d[2])
        assert np.isnan(d[3])

    # check that missing attributes throw errors
    for attr in ['x', 'y']:
        for k in data_dict.keys():
            del data_dict[k][attr]
        with pytest.raises(AttributeError):
            layers.data_map_from_dict(data_dict)
Beispiel #8
0
def test_compute_accessibilities():
    for G, distances, betas in network_generator():
        data_dict = mock.mock_data_dict(G)
        landuse_labels = mock.mock_categorical_data(len(data_dict))
        # easy version
        N_easy = networks.Network_Layer_From_nX(G, distances)
        D_easy = layers.Data_Layer_From_Dict(data_dict)
        D_easy.assign_to_network(N_easy, max_dist=500)
        D_easy.compute_accessibilities(landuse_labels, ['c'])
        # custom version
        N_full = networks.Network_Layer_From_nX(G, distances)
        D_full = layers.Data_Layer_From_Dict(data_dict)
        D_full.assign_to_network(N_full, max_dist=500)
        D_full.compute_aggregated(landuse_labels, accessibility_keys=['c'])
        # compare
        for d in distances:
            for wt in ['weighted', 'non_weighted']:
                assert np.allclose(N_easy.metrics['accessibility'][wt]['c'][d],
                                   N_full.metrics['accessibility'][wt]['c'][d],
                                   atol=0.001,
                                   rtol=0)
Beispiel #9
0
def test_hill_diversity():
    for G, distances, betas in network_generator():
        data_dict = mock.mock_data_dict(G)
        landuse_labels = mock.mock_categorical_data(len(data_dict))
        # easy version
        N_easy = networks.Network_Layer_From_nX(G, distances)
        D_easy = layers.Data_Layer_From_Dict(data_dict)
        D_easy.assign_to_network(N_easy, max_dist=500)
        D_easy.hill_diversity(landuse_labels, qs=[0, 1, 2])
        # custom version
        N_full = networks.Network_Layer_From_nX(G, distances)
        D_full = layers.Data_Layer_From_Dict(data_dict)
        D_full.assign_to_network(N_full, max_dist=500)
        D_full.compute_aggregated(landuse_labels,
                                  mixed_use_keys=['hill'],
                                  qs=[0, 1, 2])
        # compare
        for d in distances:
            for q in [0, 1, 2]:
                assert np.allclose(N_easy.metrics['mixed_uses']['hill'][q][d],
                                   N_full.metrics['mixed_uses']['hill'][q][d],
                                   atol=0.001,
                                   rtol=0)
Beispiel #10
0
base_path = path.dirname(__file__)

#
#
# INTRO PLOT
G = mock.mock_graph()
plot.plot_nX(G, path='graph.png', labels=True, dpi=150)

# INTRO EXAMPLE PLOTS
G = graphs.nX_simple_geoms(G)
G = graphs.nX_decompose(G, 20)

N = networks.Network_Layer_From_nX(G, distances=[400, 800])
N.compute_centrality(measures=['segment_harmonic'])

data_dict = mock.mock_data_dict(G, random_seed=25)
D = layers.Data_Layer_From_Dict(data_dict)
D.assign_to_network(N, max_dist=400)
landuse_labels = mock.mock_categorical_data(len(data_dict), random_seed=25)
D.hill_branch_wt_diversity(landuse_labels, qs=[0])
G_metrics = N.to_networkX()

segment_harmonic_vals = []
mixed_uses_vals = []
for node, data in G_metrics.nodes(data=True):
    segment_harmonic_vals.append(
        data['metrics']['centrality']['segment_harmonic'][800])
    mixed_uses_vals.append(
        data['metrics']['mixed_uses']['hill_branch_wt'][0][400])

# custom colourmap
Beispiel #11
0
def test_nX_from_graph_maps():
    # also see test_networks.test_to_networkX for tests on implementation via Network layer

    # check round trip to and from graph maps results in same graph
    G = mock.mock_graph()
    G = graphs.nX_simple_geoms(G)
    # explicitly set live params for equality checks
    # graph_maps_from_networkX generates these implicitly if missing
    for n in G.nodes():
        G.nodes[n]['live'] = bool(np.random.randint(0, 1))

    # test directly from and to graph maps
    node_uids, node_data, edge_data, node_edge_map = graphs.graph_maps_from_nX(G)
    G_round_trip = graphs.nX_from_graph_maps(node_uids, node_data, edge_data, node_edge_map)
    assert list(G_round_trip.nodes) == list(G.nodes)
    assert list(G_round_trip.edges) == list(G.edges)

    # check with metrics dictionary
    N = networks.Network_Layer_From_nX(G, distances=[500, 1000])

    N.compute_centrality(measures=['node_harmonic'])
    data_dict = mock.mock_data_dict(G)
    landuse_labels = mock.mock_categorical_data(len(data_dict))
    D = layers.Data_Layer_From_Dict(data_dict)
    D.assign_to_network(N, max_dist=400)
    D.compute_aggregated(landuse_labels,
                         mixed_use_keys=['hill', 'shannon'],
                         accessibility_keys=['a', 'c'],
                         qs=[0, 1])
    metrics_dict = N.metrics_to_dict()
    # without backbone
    G_round_trip_data = graphs.nX_from_graph_maps(node_uids,
                                                  node_data,
                                                  edge_data,
                                                  node_edge_map,
                                                  metrics_dict=metrics_dict)
    for uid, metrics in metrics_dict.items():
        assert G_round_trip_data.nodes[uid]['metrics'] == metrics
    # with backbone
    G_round_trip_data = graphs.nX_from_graph_maps(node_uids,
                                                  node_data,
                                                  edge_data,
                                                  node_edge_map,
                                                  networkX_graph=G,
                                                  metrics_dict=metrics_dict)
    for uid, metrics in metrics_dict.items():
        assert G_round_trip_data.nodes[uid]['metrics'] == metrics

    # test with decomposed
    G_decomposed = graphs.nX_decompose(G, decompose_max=20)
    # set live explicitly
    for n in G_decomposed.nodes():
        G_decomposed.nodes[n]['live'] = bool(np.random.randint(0, 1))
    node_uids_d, node_data_d, edge_data_d, node_edge_map_d = graphs.graph_maps_from_nX(G_decomposed)

    G_round_trip_d = graphs.nX_from_graph_maps(node_uids_d, node_data_d, edge_data_d, node_edge_map_d)
    assert list(G_round_trip_d.nodes) == list(G_decomposed.nodes)
    for n, node_data in G_round_trip.nodes(data=True):
        assert n in G_decomposed
        assert node_data['live'] == G_decomposed.nodes[n]['live']
        assert node_data['x'] == G_decomposed.nodes[n]['x']
        assert node_data['y'] == G_decomposed.nodes[n]['y']
    assert G_round_trip_d.edges == G_decomposed.edges

    # error checks for when using backbone graph:
    # mismatching numbers of nodes
    corrupt_G = G.copy()
    corrupt_G.remove_node(0)
    with pytest.raises(ValueError):
        graphs.nX_from_graph_maps(node_uids, node_data, edge_data, node_edge_map, networkX_graph=corrupt_G)
    # mismatching node uid
    with pytest.raises(ValueError):
        corrupt_node_uids = list(node_uids)
        corrupt_node_uids[0] = 'boo'
        graphs.nX_from_graph_maps(corrupt_node_uids, node_data, edge_data, node_edge_map, networkX_graph=G)
Beispiel #12
0
def test_compute_aggregated_B():
    '''
    Test stats component
    '''
    G = mock.mock_graph()
    G = graphs.nX_simple_geoms(G)
    betas = np.array([-0.01, -0.005])
    distances = networks.distance_from_beta(betas)
    # network layer
    N = networks.Network_Layer_From_nX(G, distances)
    node_map = N._node_data
    edge_map = N._edge_data
    node_edge_map = N._node_edge_map
    # data layer
    data_dict = mock.mock_data_dict(G)
    qs = np.array([0, 1, 2])
    D = layers.Data_Layer_From_Dict(data_dict)
    # check single metrics independently against underlying for some use-cases, e.g. hill, non-hill, accessibility...
    D.assign_to_network(N, max_dist=500)

    # generate some mock landuse data
    mock_numeric = mock.mock_numerical_data(len(data_dict), num_arrs=2)

    # generate stats
    D.compute_aggregated(stats_keys=['boo', 'baa'],
                         stats_data_arrs=mock_numeric)

    # test against underlying method
    data_map = D._data
    mu_data_hill, mu_data_other, ac_data, ac_data_wt, \
    stats_sum, stats_sum_wt, stats_mean, stats_mean_wt, stats_variance, stats_variance_wt, stats_max, stats_min = \
        data.local_aggregator(node_map,
                              edge_map,
                              node_edge_map,
                              data_map,
                              distances,
                              betas,
                              numerical_arrays=mock_numeric)

    stats_keys = [
        'max', 'min', 'sum', 'sum_weighted', 'mean', 'mean_weighted',
        'variance', 'variance_weighted'
    ]
    stats_data = [
        stats_max, stats_min, stats_sum, stats_sum_wt, stats_mean,
        stats_mean_wt, stats_variance, stats_variance_wt
    ]

    for num_idx, num_label in enumerate(['boo', 'baa']):
        for s_key, stats in zip(stats_keys, stats_data):
            for d_idx, d_key in enumerate(distances):
                assert np.allclose(N.metrics['stats'][num_label][s_key][d_key],
                                   stats[num_idx][d_idx],
                                   atol=0.001,
                                   rtol=0)

    # check that mismatching label and array lengths are caught
    for labels, arrs in (
        (['a'], mock_numeric),  # mismatching lengths
        (['a', 'b'], None),  # missing arrays
        (None, mock_numeric)):  # missing labels
        with pytest.raises(ValueError):
            D.compute_aggregated(stats_keys=labels, stats_data_arrs=arrs)
Beispiel #13
0
def test_compute_aggregated_A():
    G = mock.mock_graph()
    G = graphs.nX_simple_geoms(G)
    betas = np.array([-0.01, -0.005])
    distances = networks.distance_from_beta(betas)
    # network layer
    N = networks.Network_Layer_From_nX(G, distances)
    node_map = N._node_data
    edge_map = N._edge_data
    node_edge_map = N._node_edge_map
    # data layer
    data_dict = mock.mock_data_dict(G)
    qs = np.array([0, 1, 2])
    D = layers.Data_Layer_From_Dict(data_dict)
    # check single metrics independently against underlying for some use-cases, e.g. hill, non-hill, accessibility...
    D.assign_to_network(N, max_dist=500)
    # generate some mock landuse data
    landuse_labels = mock.mock_categorical_data(len(data_dict))
    landuse_classes, landuse_encodings = layers.encode_categorical(
        landuse_labels)
    # compute hill mixed uses
    D.compute_aggregated(landuse_labels,
                         mixed_use_keys=['hill_branch_wt'],
                         qs=qs)
    # test against underlying method
    data_map = D._data
    mu_data_hill, mu_data_other, ac_data, ac_data_wt, \
    stats_sum, stats_sum_wt, stats_mean, stats_mean_wt, stats_variance, stats_variance_wt, stats_max, stats_min = \
        data.local_aggregator(node_map,
                              edge_map,
                              node_edge_map,
                              data_map,
                              distances,
                              betas,
                              landuse_encodings,
                              qs=qs,
                              mixed_use_hill_keys=np.array([1]))
    for q_idx, q_key in enumerate(qs):
        for d_idx, d_key in enumerate(distances):
            assert np.allclose(
                N.metrics['mixed_uses']['hill_branch_wt'][q_key][d_key],
                mu_data_hill[0][q_idx][d_idx],
                atol=0.001,
                rtol=0)
    # gini simpson
    D.compute_aggregated(landuse_labels, mixed_use_keys=['gini_simpson'])
    # test against underlying method
    data_map = D._data
    mu_data_hill, mu_data_other, ac_data, ac_data_wt, \
    stats_sum, stats_sum_wt, stats_mean, stats_mean_wt, stats_variance, stats_variance_wt, stats_max, stats_min = \
        data.local_aggregator(node_map,
                              edge_map,
                              node_edge_map,
                              data_map,
                              distances,
                              betas,
                              landuse_encodings,
                              mixed_use_other_keys=np.array([1]))
    for d_idx, d_key in enumerate(distances):
        assert np.allclose(N.metrics['mixed_uses']['gini_simpson'][d_key],
                           mu_data_other[0][d_idx],
                           atol=0.001,
                           rtol=0)
    # accessibilities
    D.compute_aggregated(landuse_labels, accessibility_keys=['c'])
    # test against underlying method
    data_map = D._data
    mu_data_hill, mu_data_other, ac_data, ac_data_wt, \
    stats_sum, stats_sum_wt, stats_mean, stats_mean_wt, stats_variance, stats_variance_wt, stats_max, stats_min = \
        data.local_aggregator(node_map,
                              edge_map,
                              node_edge_map,
                              data_map,
                              distances,
                              betas,
                              landuse_encodings,
                              accessibility_keys=np.array([landuse_classes.index('c')]))
    for d_idx, d_key in enumerate(distances):
        assert np.allclose(
            N.metrics['accessibility']['non_weighted']['c'][d_key],
            ac_data[0][d_idx],
            atol=0.001,
            rtol=0)
        assert np.allclose(N.metrics['accessibility']['weighted']['c'][d_key],
                           ac_data_wt[0][d_idx],
                           atol=0.001,
                           rtol=0)
    # also check the number of returned types for a few assortments of metrics
    mixed_uses_hill_types = np.array([
        'hill', 'hill_branch_wt', 'hill_pairwise_wt', 'hill_pairwise_disparity'
    ])
    mixed_use_other_types = np.array(
        ['shannon', 'gini_simpson', 'raos_pairwise_disparity'])
    ac_codes = np.array(landuse_classes)

    mu_hill_random = np.arange(len(mixed_uses_hill_types))
    np.random.shuffle(mu_hill_random)

    mu_other_random = np.arange(len(mixed_use_other_types))
    np.random.shuffle(mu_other_random)

    ac_random = np.arange(len(landuse_classes))
    np.random.shuffle(ac_random)

    # mock disparity matrix
    mock_disparity_wt_matrix = np.full(
        (len(landuse_classes), len(landuse_classes)), 1)

    # not necessary to do all labels, first few should do
    for mu_h_min in range(3):
        mu_h_keys = np.array(mu_hill_random[mu_h_min:])

        for mu_o_min in range(3):
            mu_o_keys = np.array(mu_other_random[mu_o_min:])

            for ac_min in range(3):
                ac_keys = np.array(ac_random[ac_min:])

                # in the final case, set accessibility to a single code otherwise an error would be raised
                if len(mu_h_keys) == 0 and len(mu_o_keys) == 0 and len(
                        ac_keys) == 0:
                    ac_keys = np.array([0])

                # randomise order of keys and metrics
                mu_h_metrics = mixed_uses_hill_types[mu_h_keys]
                mu_o_metrics = mixed_use_other_types[mu_o_keys]
                ac_metrics = ac_codes[ac_keys]

                N_temp = networks.Network_Layer_From_nX(G, distances)
                D_temp = layers.Data_Layer_From_Dict(data_dict)
                D_temp.assign_to_network(N_temp, max_dist=500)
                D_temp.compute_aggregated(
                    landuse_labels,
                    mixed_use_keys=list(mu_h_metrics) + list(mu_o_metrics),
                    accessibility_keys=ac_metrics,
                    cl_disparity_wt_matrix=mock_disparity_wt_matrix,
                    qs=qs)

                # test against underlying method
                mu_data_hill, mu_data_other, ac_data, ac_data_wt, stats_sum, stats_sum_wt, \
                stats_mean, stats_mean_wt, stats_variance, stats_variance_wt, stats_max, stats_min = \
                    data.local_aggregator(node_map,
                                          edge_map,
                                          node_edge_map,
                                          data_map,
                                          distances,
                                          betas,
                                          landuse_encodings,
                                          qs=qs,
                                          mixed_use_hill_keys=mu_h_keys,
                                          mixed_use_other_keys=mu_o_keys,
                                          accessibility_keys=ac_keys,
                                          cl_disparity_wt_matrix=mock_disparity_wt_matrix)

                for mu_h_idx, mu_h_met in enumerate(mu_h_metrics):
                    for q_idx, q_key in enumerate(qs):
                        for d_idx, d_key in enumerate(distances):
                            assert np.allclose(
                                N_temp.metrics['mixed_uses'][mu_h_met][q_key]
                                [d_key],
                                mu_data_hill[mu_h_idx][q_idx][d_idx],
                                atol=0.001,
                                rtol=0)

                for mu_o_idx, mu_o_met in enumerate(mu_o_metrics):
                    for d_idx, d_key in enumerate(distances):
                        assert np.allclose(
                            N_temp.metrics['mixed_uses'][mu_o_met][d_key],
                            mu_data_other[mu_o_idx][d_idx],
                            atol=0.001,
                            rtol=0)

                for ac_idx, ac_met in enumerate(ac_metrics):
                    for d_idx, d_key in enumerate(distances):
                        assert np.allclose(N_temp.metrics['accessibility']
                                           ['non_weighted'][ac_met][d_key],
                                           ac_data[ac_idx][d_idx],
                                           atol=0.001,
                                           rtol=0)
                        assert np.allclose(N_temp.metrics['accessibility']
                                           ['weighted'][ac_met][d_key],
                                           ac_data_wt[ac_idx][d_idx],
                                           atol=0.001,
                                           rtol=0)

    # most integrity checks happen in underlying method, though check here for mismatching labels length and typos
    with pytest.raises(ValueError):
        D.compute_aggregated(landuse_labels[-1], mixed_use_keys=['shannon'])
    with pytest.raises(ValueError):
        D.compute_aggregated(landuse_labels, mixed_use_keys=['spelling_typo'])
    # don't check accessibility_labels for typos - because only warning is triggered (not all labels will be in all data)
    # check that unassigned data layer flags
    with pytest.raises(ValueError):
        D_new = layers.Data_Layer_From_Dict(data_dict)
        D_new.compute_aggregated(landuse_labels, mixed_use_keys=['shannon'])