def test_hill_branch_wt_diversity(primal_graph):
    for distances, betas in network_generator():
        G = primal_graph.copy()
        data_dict = mock.mock_data_dict(G)
        landuse_labels = mock.mock_categorical_data(len(data_dict))
        # easy version
        N_easy = networks.NetworkLayerFromNX(G, distances=distances)
        D_easy = layers.DataLayerFromDict(data_dict)
        D_easy.assign_to_network(N_easy, max_dist=500)
        D_easy.hill_branch_wt_diversity(landuse_labels, qs=[0, 1, 2])
        # custom version
        N_full = networks.NetworkLayerFromNX(G, distances=distances)
        D_full = layers.DataLayerFromDict(data_dict)
        D_full.assign_to_network(N_full, max_dist=500)
        D_full.compute_landuses(landuse_labels,
                                mixed_use_keys=['hill_branch_wt'],
                                qs=[0, 1, 2])
        # compare
        for d in distances:
            for q in [0, 1, 2]:
                assert np.allclose(
                    N_easy.metrics['mixed_uses']['hill_branch_wt'][q][d],
                    N_full.metrics['mixed_uses']['hill_branch_wt'][q][d],
                    atol=0.001,
                    rtol=0)
Beispiel #2
0
def test_check_data_map(primal_graph):
    N = networks.NetworkLayerFromNX(primal_graph, distances=[500])
    data_dict = mock.mock_data_dict(primal_graph)
    data_uids, data_map = layers.data_map_from_dict(data_dict)

    # should throw error if not assigned
    with pytest.raises(ValueError):
        checks.check_data_map(data_map)

    # should work if flag set to False
    checks.check_data_map(data_map, check_assigned=False)

    # assign then check that it runs as intended
    data_map = data.assign_to_network(data_map,
                                      N._node_data,
                                      N._edge_data,
                                      N._node_edge_map,
                                      max_dist=400)
    checks.check_data_map(data_map)

    # catch zero length data arrays
    empty_2d_arr = np.full((0, 4), np.nan)
    with pytest.raises(ValueError):
        checks.check_data_map(empty_2d_arr)

    # catch invalid dimensionality
    with pytest.raises(ValueError):
        checks.check_data_map(data_map[:, :-1])
Beispiel #3
0
def test_check_network_maps(primal_graph):
    # network maps
    N = networks.NetworkLayerFromNX(primal_graph, distances=[500])
    # from cityseer.tools import plot
    # plot.plot_networkX_primal_or_dual(primal=G)
    # plot.plot_graph_maps(N.uids, N._node_data, N._edge_data)
    # catch zero length node and edge arrays
    empty_node_arr = np.full((0, 5), np.nan)
    with pytest.raises(ValueError):
        checks.check_network_maps(empty_node_arr, N._edge_data,
                                  N._node_edge_map)
    empty_edge_arr = np.full((0, 4), np.nan)
    with pytest.raises(ValueError):
        checks.check_network_maps(N._node_data, empty_edge_arr,
                                  N._node_edge_map)
    # check that malformed node and data maps throw errors
    with pytest.raises(ValueError):
        checks.check_network_maps(N._node_data[:, :-1], N._edge_data,
                                  N._node_edge_map)
    with pytest.raises(ValueError):
        checks.check_network_maps(N._node_data, N._edge_data[:, :-1],
                                  N._node_edge_map)
    # catch problematic edge map values
    for x in [np.nan, -1]:
        # missing start node
        corrupted_edges = N._edge_data.copy()
        corrupted_edges[0, 0] = x
        with pytest.raises(AssertionError):
            checks.check_network_maps(N._node_data, corrupted_edges,
                                      N._node_edge_map)
        # missing end node
        corrupted_edges = N._edge_data.copy()
        corrupted_edges[0, 1] = x
        with pytest.raises(KeyError):
            checks.check_network_maps(N._node_data, corrupted_edges,
                                      N._node_edge_map)
        # invalid length
        corrupted_edges = N._edge_data.copy()
        corrupted_edges[0, 2] = x
        with pytest.raises(ValueError):
            checks.check_network_maps(N._node_data, corrupted_edges,
                                      N._node_edge_map)
        # invalid angle_sum
        corrupted_edges = N._edge_data.copy()
        corrupted_edges[0, 3] = x
        with pytest.raises(ValueError):
            checks.check_network_maps(N._node_data, corrupted_edges,
                                      N._node_edge_map)
        # invalid imp_factor
        corrupted_edges = N._edge_data.copy()
        corrupted_edges[0, 4] = x
        with pytest.raises(ValueError):
            checks.check_network_maps(N._node_data, corrupted_edges,
                                      N._node_edge_map)
def test_compute_accessibilities(primal_graph):
    for distances, betas in network_generator():
        G = primal_graph.copy()
        data_dict = mock.mock_data_dict(G)
        landuse_labels = mock.mock_categorical_data(len(data_dict))
        # easy version
        N_easy = networks.NetworkLayerFromNX(G, distances=distances)
        D_easy = layers.DataLayerFromDict(data_dict)
        D_easy.assign_to_network(N_easy, max_dist=500)
        D_easy.compute_accessibilities(landuse_labels, ['c'])
        # custom version
        N_full = networks.NetworkLayerFromNX(G, distances=distances)
        D_full = layers.DataLayerFromDict(data_dict)
        D_full.assign_to_network(N_full, max_dist=500)
        D_full.compute_landuses(landuse_labels, accessibility_keys=['c'])
        # compare
        for d in distances:
            for wt in ['weighted', 'non_weighted']:
                assert np.allclose(N_easy.metrics['accessibility'][wt]['c'][d],
                                   N_full.metrics['accessibility'][wt]['c'][d],
                                   atol=0.001,
                                   rtol=0)
Beispiel #5
0
def test_metrics_to_dict(primal_graph):
    # create a network layer and run some metrics
    N = networks.NetworkLayerFromNX(primal_graph, distances=[500, 1000])

    # check with no metrics
    metrics_dict = N.metrics_to_dict()
    dict_check(metrics_dict, N)

    # check with centrality metrics
    N.node_centrality(measures=['node_harmonic'])
    metrics_dict = N.metrics_to_dict()
    dict_check(metrics_dict, N)

    # check with data metrics
    data_dict = mock.mock_data_dict(primal_graph)
    landuse_labels = mock.mock_categorical_data(len(data_dict))
    numerical_data = mock.mock_numerical_data(len(data_dict))
    metrics_dict = N.metrics_to_dict()
    dict_check(metrics_dict, N)
Beispiel #6
0
def test_Network_Layer_From_nX(primal_graph):
    node_uids, node_data, edge_data, node_edge_map = graphs.graph_maps_from_nX(primal_graph)
    x_arr = node_data[:, 0]
    y_arr = node_data[:, 1]
    betas = np.array([0.04, 0.02])
    distances = networks.distance_from_beta(betas)

    # test Network_Layer_From_NetworkX's class
    for d, b in zip([distances, None], [None, betas]):
        for angular in [True, False]:
            N = networks.NetworkLayerFromNX(primal_graph, distances=d, betas=b)
            assert np.allclose(N.uids, node_uids, atol=0.001, rtol=0)
            assert np.allclose(N._node_data, node_data, atol=0.001, rtol=0)
            assert np.allclose(N._edge_data, edge_data, atol=0.001, rtol=0)
            assert np.allclose(N.distances, distances, atol=0.001,
                               rtol=0)  # inferred automatically when only betas provided
            assert np.allclose(N.betas, betas, atol=0.001,
                               rtol=0)  # inferred automatically when only distances provided
            assert N._min_threshold_wt == checks.def_min_thresh_wt
            assert np.allclose(N.node_x_arr, x_arr, atol=0.001, rtol=0)
            assert np.allclose(N.node_y_arr, y_arr, atol=0.001, rtol=0)
            assert np.allclose(N.node_live_arr, node_data[:, 2], atol=0.001, rtol=0)
            assert np.allclose(N.edge_lengths_arr, edge_data[:, 2], atol=0.001, rtol=0)
            assert np.allclose(N.edge_angles_arr, edge_data[:, 3], atol=0.001, rtol=0)
            assert np.allclose(N.edge_impedance_factors_arr, edge_data[:, 4], atol=0.001, rtol=0)
            assert np.allclose(N.edge_in_bearings_arr, edge_data[:, 5], atol=0.001, rtol=0)
            assert np.allclose(N.edge_out_bearings_arr, edge_data[:, 6], atol=0.001, rtol=0)

    # check alternate min_threshold_wt gets passed through successfully
    alt_min = 0.02
    alt_distances = networks.distance_from_beta(betas, min_threshold_wt=alt_min)
    N = networks.NetworkLayerFromNX(primal_graph, betas=betas, min_threshold_wt=alt_min)
    assert np.allclose(N.distances, alt_distances, atol=0.001, rtol=0)

    # check for malformed signatures
    with pytest.raises(TypeError):
        networks.NetworkLayerFromNX('boo', distances=distances)
    with pytest.raises(ValueError):
        networks.NetworkLayerFromNX(primal_graph)  # no betas or distances
    with pytest.raises(ValueError):
        networks.NetworkLayerFromNX(primal_graph, distances=None, betas=None)
    with pytest.raises(ValueError):
        networks.NetworkLayerFromNX(primal_graph, distances=[])
    with pytest.raises(ValueError):
        networks.NetworkLayerFromNX(primal_graph, betas=[])
Beispiel #7
0
def test_to_networkX(primal_graph):
    # also see test_graphs.test_networkX_from_graph_maps for underlying graph maps version

    # check round trip to and from graph maps results in same graph
    # explicitly set live and weight params for equality checks
    # graph_maps_from_networkX generates these implicitly if missing
    G = graphs.nX_decompose(primal_graph, decompose_max=20)
    for n in G.nodes():
        G.nodes[n]['live'] = bool(np.random.randint(0, 1))
    for s, e, k in G.edges(keys=True):
        G[s][e][k]['imp_factor'] = np.random.randint(0, 2)

    # add random data to check persistence at other end
    baa_node = None
    for n in G.nodes():
        baa_node = n
        G.nodes[n]['boo'] = 'baa'
        break
    boo_edge = None
    for s, e, k in G.edges(keys=True):
        boo_edge = (s, e)
        G[s][e][k]['baa'] = 'boo'
        break

    # test with metrics
    N = networks.NetworkLayerFromNX(G, distances=[500])
    N.node_centrality(measures=['node_harmonic'])
    metrics_dict = N.metrics_to_dict()
    G_round_trip = N.to_networkX()
    for n, d in G.nodes(data=True):
        assert G_round_trip.nodes[n]['x'] == d['x']
        assert G_round_trip.nodes[n]['y'] == d['y']
        assert G_round_trip.nodes[n]['live'] == d['live']
    for s, e, k, d in G.edges(keys=True, data=True):
        assert G_round_trip[s][e][k]['geom'] == d['geom']
        assert G_round_trip[s][e][k]['imp_factor'] == d['imp_factor']
    # check that metrics came through
    for uid, metrics in metrics_dict.items():
        assert G_round_trip.nodes[uid]['metrics'] == metrics
    # check data persistence
    assert G_round_trip.nodes[baa_node]['boo'] == 'baa'
    assert G_round_trip[boo_edge[0]][boo_edge[1]][0]['baa'] == 'boo'
Beispiel #8
0
def test_find_nearest(primal_graph):
    N = networks.NetworkLayerFromNX(primal_graph, distances=[100])
    # generate some data
    data_dict = mock.mock_data_dict(primal_graph)
    D = layers.DataLayerFromDict(data_dict)
    # test the filter - iterating each point in data map
    for d in D._data:
        d_x = d[0]
        d_y = d[1]
        # find the closest point on the network
        min_idx, min_dist = data.find_nearest(d_x, d_y, N.node_x_arr, N.node_y_arr, max_dist=500)
        # check that no other indices are nearer
        for i, n in enumerate(N._node_data):
            n_x = n[0]
            n_y = n[1]
            dist = np.sqrt((d_x - n_x) ** 2 + (d_y - n_y) ** 2)
            if i == min_idx:
                assert round(dist, 8) == round(min_dist, 8)
            else:
                assert dist > min_dist
Beispiel #9
0
def test_compute_centrality(primal_graph):
    """
    Underlying methods also tested via test_networks.test_network_centralities
    """
    betas = np.array([0.01, 0.005])
    distances = networks.distance_from_beta(betas)
    # generate data structures
    N = networks.NetworkLayerFromNX(primal_graph, distances=distances)
    node_data = N._node_data
    edge_data = N._edge_data
    node_edge_map = N._node_edge_map

    # CHECK NODE BASED
    node_measures = ['node_density',
                     'node_farness',
                     'node_cycles',
                     'node_harmonic',
                     'node_beta',
                     'node_betweenness',
                     'node_betweenness_beta']
    node_measures_ang = ['node_harmonic_angular',
                         'node_betweenness_angular']

    # check measures against underlying method
    N = networks.NetworkLayerFromNX(primal_graph, distances=distances)
    N.node_centrality(measures=['node_density'])
    # test against underlying method
    measures_data = centrality.local_node_centrality(node_data,
                                                     edge_data,
                                                     node_edge_map,
                                                     distances,
                                                     betas,
                                                     measure_keys=('node_density',))
    for d_idx, d_key in enumerate(distances):
        assert np.allclose(N.metrics['centrality']['node_density'][d_key], measures_data[0][d_idx])
    # also check the number of returned types for a few assortments of metrics
    np.random.shuffle(node_measures)  # in place
    # not necessary to do all labels, first few should do
    for min_idx in range(3):
        measure_keys = np.array(node_measures[min_idx:])
        N = networks.NetworkLayerFromNX(primal_graph, distances=distances)
        N.node_centrality(measures=node_measures)
        # test against underlying method
        measures_data = centrality.local_node_centrality(node_data,
                                                         edge_data,
                                                         node_edge_map,
                                                         distances,
                                                         betas,
                                                         measure_keys=tuple(measure_keys))
        for m_idx, measure_name in enumerate(measure_keys):
            for d_idx, d_key in enumerate(distances):
                assert np.allclose(N.metrics['centrality'][measure_name][d_key],
                                   measures_data[m_idx][d_idx], atol=0.001, rtol=0)
    # check that angular gets passed through
    N_ang = networks.NetworkLayerFromNX(primal_graph, distances=[2000])
    N_ang.node_centrality(measures=['node_harmonic_angular'],
                          angular=True)
    N = networks.NetworkLayerFromNX(primal_graph, distances=[2000])
    N.node_centrality(measures=['node_harmonic'],
                      angular=False)
    assert not np.allclose(N_ang.metrics['centrality']['node_harmonic_angular'][2000],
                           N.metrics['centrality']['node_harmonic'][2000], atol=0.001, rtol=0)
    assert not np.allclose(N_ang.metrics['centrality']['node_harmonic_angular'][2000],
                           N.metrics['centrality']['node_harmonic'][2000], atol=0.001, rtol=0)
    # check that typos, duplicates, and mixed angular / non-angular are caught
    with pytest.raises(ValueError):
        N.node_centrality(measures=['spelling_typo'])
    with pytest.raises(ValueError):
        N.node_centrality(measures=['node_density', 'node_density'])
    with pytest.raises(ValueError):
        N.node_centrality(measures=['node_density', 'node_harmonic_angular'])

    # CHECK SEGMENTISED
    segment_measures = ['segment_density',
                        'segment_harmonic',
                        'segment_beta',
                        'segment_betweenness']
    segment_measures_ang = ['segment_harmonic_hybrid',
                            'segment_betweeness_hybrid']

    # check measures against underlying method
    N = networks.NetworkLayerFromNX(primal_graph, distances=distances)
    N.segment_centrality(measures=['segment_density'])
    # test against underlying method
    measures_data = centrality.local_segment_centrality(node_data,
                                                        edge_data,
                                                        node_edge_map,
                                                        distances,
                                                        betas,
                                                        measure_keys=('segment_density',))
    for d_idx, d_key in enumerate(distances):
        assert np.allclose(N.metrics['centrality']['segment_density'][d_key], measures_data[0][d_idx])
    # also check the number of returned types for a few assortments of metrics
    np.random.shuffle(segment_measures)  # in place
    # not necessary to do all labels, first few should do
    for min_idx in range(3):
        measure_keys = np.array(segment_measures[min_idx:])
        N = networks.NetworkLayerFromNX(primal_graph,
                                        distances=distances)
        N.segment_centrality(measures=segment_measures)
        # test against underlying method
        measures_data = centrality.local_segment_centrality(node_data,
                                                            edge_data,
                                                            node_edge_map,
                                                            distances,
                                                            betas,
                                                            measure_keys=tuple(measure_keys))
        for m_idx, measure_name in enumerate(measure_keys):
            for d_idx, d_key in enumerate(distances):
                assert np.allclose(N.metrics['centrality'][measure_name][d_key],
                                   measures_data[m_idx][d_idx], atol=0.001, rtol=0)
    # check that angular gets passed through
    N_ang = networks.NetworkLayerFromNX(primal_graph, distances=[2000])
    N_ang.segment_centrality(measures=['segment_harmonic_hybrid'],
                             angular=True)
    N = networks.NetworkLayerFromNX(primal_graph, distances=[2000])
    N.segment_centrality(measures=['segment_harmonic'],
                         angular=False)
    assert not np.allclose(N_ang.metrics['centrality']['segment_harmonic_hybrid'][2000],
                           N.metrics['centrality']['segment_harmonic'][2000], atol=0.001, rtol=0)
    assert not np.allclose(N_ang.metrics['centrality']['segment_harmonic_hybrid'][2000],
                           N.metrics['centrality']['segment_harmonic'][2000], atol=0.001, rtol=0)
    # check that typos, duplicates, and mixed angular / non-angular are caught
    with pytest.raises(ValueError):
        N.segment_centrality(measures=['spelling_typo'])
    with pytest.raises(ValueError):
        N.segment_centrality(measures=['segment_density', 'segment_density'])
    with pytest.raises(ValueError):
        N.segment_centrality(measures=['segment_density', 'segment_harmonic_hybrid'])

    # check that the deprecated method raises:
    with pytest.raises(DeprecationWarning):
        N.compute_centrality()
Beispiel #10
0
async def accessibility_calc(db_config,
                             nodes_table,
                             links_table,
                             city_pop_id,
                             distances,
                             boundary_table='analysis.city_boundaries_150',
                             data_table='os.poi',
                             data_where=None,
                             rdm_flag=False,
                             dual_flag=False):
    if dual_flag or rdm_flag:
        if city_pop_id > 1:
            logger.warning(
                'Only do dual or randomised metrics for city_pop_id = 1')
            return

    if dual_flag:
        nodes_table += '_dual'
        links_table += '_dual'

    if rdm_flag:
        data_table += '_randomised'

    logger.info(
        f'Starting LU calcs for city id: {city_pop_id} on network table '
        f'{nodes_table} and data table {data_table}')
    logger.info(f'Loading network data')
    G = await postGIS_to_networkX(db_config, nodes_table, links_table,
                                  city_pop_id)
    N = networks.NetworkLayerFromNX(G, distances)
    logger.info(f'Loading POI data from data table: {data_table}')
    data_dict = await postGIS_to_landuses_dict(db_config,
                                               data_table,
                                               'urn',
                                               'class_code',
                                               boundary_table,
                                               city_pop_id,
                                               max_dist=max(distances),
                                               data_where=data_where)
    data_uids, data_map = layers.data_map_from_dict(data_dict)
    # derive the landuse labels, classes, encodings
    landuse_labels = [v['class'] for v in data_dict.values()]
    landuse_classes, landuse_encodings = layers.encode_categorical(
        landuse_labels)
    logger.info(f'Generating disparity weights matrix')
    cl_disparity_wt_matrix = disparity_wt_matrix(landuse_classes)
    logger.info('Creating data layer')
    D = layers.DataLayer(data_uids, data_map)

    start = time.localtime()
    logger.info('Assigning data points to the network')
    D.assign_to_network(N, max_dist=400)

    # generate the accessibility codes Class
    # this deduces codes and squashes results into categories
    logger.info('Generating POI accessibility codes')
    Acc_codes = Accessibility_Codes(landuse_classes,
                                    len(N.uids),
                                    distances,
                                    compact=(dual_flag or rdm_flag))

    mixed_use_metrics = [
        'hill', 'hill_branch_wt', 'hill_pairwise_wt',
        'hill_pairwise_disparity', 'shannon', 'gini_simpson',
        'raos_pairwise_disparity'
    ]
    # if dual or rdm only do first two
    if dual_flag or rdm_flag:
        mixed_use_metrics = mixed_use_metrics[:2]
        cl_disparity_wt_matrix = None
    # compute
    logger.info('Computing landuses')
    D.compute_aggregated(landuse_labels=landuse_labels,
                         mixed_use_keys=mixed_use_metrics,
                         accessibility_keys=Acc_codes.all_codes,
                         cl_disparity_wt_matrix=cl_disparity_wt_matrix,
                         qs=[0, 1, 2])
    time_duration = datetime.timedelta(seconds=time.mktime(time.localtime()) -
                                       time.mktime(start))
    logger.info(f'Algo duration: {time_duration}')

    # squash the accessibility data
    logger.info('Squashing accessibility data')
    Acc_codes.set_metrics(N.metrics['accessibility'])

    mu_q_keys = [
        'hill', 'hill_branch_wt', 'hill_pairwise_wt', 'hill_pairwise_disparity'
    ]
    if dual_flag or rdm_flag:
        mu_q_keys = mu_q_keys[:2]

    mu_keys = ['shannon', 'gini_simpson', 'raos_pairwise_disparity']
    if dual_flag or rdm_flag:
        mu_keys = []

    if not dual_flag and not rdm_flag:
        ac_keys = [
            'accommodation', 'eating', 'drinking', 'commercial', 'tourism',
            'entertainment', 'government', 'manufacturing', 'retail_food',
            'retail_other', 'transport', 'health', 'education', 'parks',
            'cultural', 'sports', 'total'
        ]
    else:
        ac_keys = [
            'eating', 'drinking', 'commercial', 'retail_food', 'retail_other',
            'transport', 'total'
        ]

    # aggregate the data
    logger.info('Aggregating results')
    bulk_data = []
    for idx, uid in enumerate(N.uids):
        # first check that this is a live node (i.e. within the original city boundary)
        if not N.live[idx]:
            continue
        node_data = [uid]
        # mixed-use keys requiring q values
        for mu_key in mu_q_keys:
            for q_key, q_val in N.metrics['mixed_uses'][mu_key].items():
                inner_data = []
                for d_key, d_val in q_val.items():
                    inner_data.append(d_val[idx])
                node_data.append(inner_data)
        # mixed-use keys not requiring q values
        for mu_key in mu_keys:
            inner_data = []
            for d_key, d_val in N.metrics['mixed_uses'][mu_key].items():
                inner_data.append(d_val[idx])
            node_data.append(inner_data)
        # accessibility keys
        for ac_key in ac_keys:
            inner_data = []
            for d_key, d_val in Acc_codes.metrics['weighted'][ac_key].items():
                inner_data.append(d_val[idx])
            node_data.append(inner_data)
            # also write non-weighted variants of the following
            if ac_key in [
                    'eating', 'commercial', 'retail_food', 'retail_other',
                    'total'
            ]:
                inner_data = []
                for d_key, d_val in Acc_codes.metrics['non_weighted'][
                        ac_key].items():
                    inner_data.append(d_val[idx])
                node_data.append(inner_data)
        bulk_data.append(tuple(node_data))

    logger.info('Writing results to database')
    db_con = await asyncpg.connect(**db_config)
    if not dual_flag and not rdm_flag:
        measure_cols = [
            'mu_hill_0', 'mu_hill_1', 'mu_hill_2', 'mu_hill_branch_wt_0',
            'mu_hill_branch_wt_1', 'mu_hill_branch_wt_2',
            'mu_hill_pairwise_wt_0', 'mu_hill_pairwise_wt_1',
            'mu_hill_pairwise_wt_2', 'mu_hill_dispar_wt_0',
            'mu_hill_dispar_wt_1', 'mu_hill_dispar_wt_2', 'mu_shannon',
            'mu_gini', 'mu_raos', 'ac_accommodation', 'ac_eating',
            'ac_eating_nw', 'ac_drinking', 'ac_commercial', 'ac_commercial_nw',
            'ac_tourism', 'ac_entertainment', 'ac_government',
            'ac_manufacturing', 'ac_retail_food', 'ac_retail_food_nw',
            'ac_retail_other', 'ac_retail_other_nw', 'ac_transport',
            'ac_health', 'ac_education', 'ac_parks', 'ac_cultural',
            'ac_sports', 'ac_total', 'ac_total_nw'
        ]
    else:
        measure_cols = [
            'mu_hill_0', 'mu_hill_1', 'mu_hill_2', 'mu_hill_branch_wt_0',
            'mu_hill_branch_wt_1', 'mu_hill_branch_wt_2', 'ac_eating',
            'ac_eating_nw', 'ac_drinking', 'ac_commercial', 'ac_commercial_nw',
            'ac_retail_food', 'ac_retail_food_nw', 'ac_retail_other',
            'ac_retail_other_nw', 'ac_transport', 'ac_total', 'ac_total_nw'
        ]
    # add the _rdm extension if necessary
    if rdm_flag:
        measure_cols = [m + '_rdm' for m in measure_cols]
    # create the columns
    col_strings = []
    counter = 2
    for measure_col in measure_cols:
        await db_con.execute(f'''
        ALTER TABLE {nodes_table}
            ADD COLUMN IF NOT EXISTS {measure_col} real[];
        ''')
        col_strings.append(f'{measure_col} = ${counter}')
        counter += 1
    await db_con.executemany(
        f'UPDATE {nodes_table} SET ' + ', '.join(col_strings) +
        ' WHERE id = $1;', bulk_data)
    await db_con.close()
Beispiel #11
0
async def centrality_shortest(db_config, nodes_table, links_table, city_pop_id, distances):
    logger.info(f'Loading graph for city: {city_pop_id} derived from table: {nodes_table}')
    G = await postGIS_to_networkX(db_config, nodes_table, links_table, city_pop_id)
    if len(G) == 0:
        return
    logger.info(f'Generating node map and edge map')
    N = networks.NetworkLayerFromNX(G, distances=distances)

    logger.info('Calculating shortest-path node centralities')
    start = time.localtime()
    node_measures = [
        'node_density',
        'node_farness',
        'node_cycles',
        'node_harmonic',
        'node_beta',
        'node_betweenness',
        'node_betweenness_beta'
    ]
    N.node_centrality(measures=node_measures)
    time_duration = datetime.timedelta(
        seconds=time.mktime(time.localtime()) - time.mktime(start))
    logger.info(f'Algo duration: {time_duration}')

    logger.info('Calculating shortest-path segment centralities')
    start = time.localtime()
    segment_measures = [
        'segment_density',
        'segment_harmonic',
        'segment_beta',
        'segment_betweenness'
    ]
    N.segment_centrality(measures=segment_measures)
    time_duration = datetime.timedelta(
        seconds=time.mktime(time.localtime()) - time.mktime(start))
    logger.info(f'Algo duration: {time_duration}')

    logger.info('Calculating simplest-path node centralities')
    start = time.localtime()
    angular_node_measures = [
        'node_harmonic_angular',
        'node_betweenness_angular'
    ]
    N.node_centrality(measures=angular_node_measures, angular=True)
    time_duration = datetime.timedelta(
        seconds=time.mktime(time.localtime()) - time.mktime(start))
    logger.info(f'Algo duration: {time_duration}')

    logger.info('Calculating simplest-path segment centralities')
    start = time.localtime()
    angular_segment_measures = [
        'segment_harmonic_hybrid',
        'segment_betweeness_hybrid'
    ]
    N.segment_centrality(measures=angular_segment_measures, angular=True)
    time_duration = datetime.timedelta(
        seconds=time.mktime(time.localtime()) - time.mktime(start))
    logger.info(f'Algo duration: {time_duration}')

    # Quite slow writing to database so do all distances at once
    logger.info('Prepping data for database')
    metrics = N.metrics_to_dict()
    bulk_data = []
    #
    comb_measures = node_measures + segment_measures
    com_ang_measures = angular_node_measures + angular_segment_measures
    for k, v in metrics.items():
        # first check that this is a live node (i.e. within the original city boundary)
        if not v['live']:
            continue
        # start node data list - initialise with node label
        node_data = [k]

        # pack shortest path data
        for measure in comb_measures:
            inner_data = []
            for d in distances:
                inner_data.append(v['centrality'][measure][d])
            node_data.append(inner_data)

        # pack simplest path data
        for ang_measure in com_ang_measures:
            inner_ang_data = []
            for d in distances:
                inner_ang_data.append(v['centrality'][ang_measure][d])
            node_data.append(inner_ang_data)
        bulk_data.append(node_data)

    logger.info('Writing data back to database')
    db_con = await asyncpg.connect(**db_config)
    # check that the columns exist
    # do this separately to control the order in which the columns are added (by theme instead of distance)
    for measure in comb_measures:
        # prepend with "c_"
        c_measure = f'c_{measure}'
        await db_con.execute(f'''
        ALTER TABLE {nodes_table}
            ADD COLUMN IF NOT EXISTS {c_measure} real[];
        ''')
    for ang_measure in com_ang_measures:
        c_ang_measure = f'c_{ang_measure}'
        await db_con.execute(f'''
        ALTER TABLE {nodes_table}
            ADD COLUMN IF NOT EXISTS {c_ang_measure} real[];
        ''')
    await db_con.executemany(f'''
    UPDATE {nodes_table}
        SET
            c_node_density = $2,
            c_node_farness = $3,
            c_node_cycles = $4,
            c_node_harmonic = $5,
            c_node_beta = $6,
            c_node_betweenness = $7,
            c_node_betweenness_beta = $8,
            c_segment_density = $9,
            c_segment_harmonic = $10,
            c_segment_beta = $11,
            c_segment_betweenness = $12,
            c_node_harmonic_angular = $13,
            c_node_betweenness_angular = $14,
            c_segment_harmonic_hybrid = $15,
            c_segment_betweeness_hybrid = $16
        WHERE id = $1
    ''', bulk_data)
    await db_con.close()
Beispiel #12
0
from cityseer.metrics import networks, layers
from cityseer.tools import mock, graphs, plot

base_path = os.getcwd()
plt.style.use('matplotlibrc')

###
# INTRO PLOT
G = mock.mock_graph()
plot.plot_nX(G, labels=True, node_size=80, path='images/graph.png', dpi=150)

# INTRO EXAMPLE PLOTS
G = graphs.nX_simple_geoms(G)
G = graphs.nX_decompose(G, 20)

N = networks.NetworkLayerFromNX(G, distances=[400, 800])
N.segment_centrality(measures=['segment_harmonic'])

data_dict = mock.mock_data_dict(G, random_seed=25)
D = layers.DataLayerFromDict(data_dict)
D.assign_to_network(N, max_dist=400)
landuse_labels = mock.mock_categorical_data(len(data_dict), random_seed=25)
D.hill_branch_wt_diversity(landuse_labels, qs=[0])
G_metrics = N.to_networkX()

segment_harmonic_vals = []
mixed_uses_vals = []
for node, data in G_metrics.nodes(data=True):
    segment_harmonic_vals.append(
        data['metrics']['centrality']['segment_harmonic'][800])
    mixed_uses_vals.append(
Beispiel #13
0
def test_nX_from_graph_maps(primal_graph):
    # also see test_networks.test_to_networkX for tests on implementation via Network layer

    # check round trip to and from graph maps results in same graph
    # explicitly set live params for equality checks
    # graph_maps_from_networkX generates these implicitly if missing
    for n in primal_graph.nodes():
        primal_graph.nodes[n]['live'] = bool(np.random.randint(0, 1))

    # test directly from and to graph maps
    node_uids, node_data, edge_data, node_edge_map = graphs.graph_maps_from_nX(primal_graph)
    G_round_trip = graphs.nX_from_graph_maps(node_uids, node_data, edge_data, node_edge_map)
    assert list(G_round_trip.nodes) == list(primal_graph.nodes)
    assert list(G_round_trip.edges) == list(primal_graph.edges)

    # check with metrics dictionary
    N = networks.NetworkLayerFromNX(primal_graph, distances=[500, 1000])

    N.node_centrality(measures=['node_harmonic'])
    data_dict = mock.mock_data_dict(primal_graph)
    landuse_labels = mock.mock_categorical_data(len(data_dict))
    D = layers.DataLayerFromDict(data_dict)
    D.assign_to_network(N, max_dist=400)
    D.compute_landuses(landuse_labels,
                       mixed_use_keys=['hill', 'shannon'],
                       accessibility_keys=['a', 'c'],
                       qs=[0, 1])
    metrics_dict = N.metrics_to_dict()
    # without backbone
    G_round_trip_data = graphs.nX_from_graph_maps(node_uids,
                                                  node_data,
                                                  edge_data,
                                                  node_edge_map,
                                                  metrics_dict=metrics_dict)
    for uid, metrics in metrics_dict.items():
        assert G_round_trip_data.nodes[uid]['metrics'] == metrics
    # with backbone
    G_round_trip_data = graphs.nX_from_graph_maps(node_uids,
                                                  node_data,
                                                  edge_data,
                                                  node_edge_map,
                                                  networkX_multigraph=primal_graph,
                                                  metrics_dict=metrics_dict)
    for uid, metrics in metrics_dict.items():
        assert G_round_trip_data.nodes[uid]['metrics'] == metrics

    # test with decomposed
    G_decomposed = graphs.nX_decompose(primal_graph, decompose_max=20)
    # set live explicitly
    for n in G_decomposed.nodes():
        G_decomposed.nodes[n]['live'] = bool(np.random.randint(0, 1))
    node_uids_d, node_data_d, edge_data_d, node_edge_map_d = graphs.graph_maps_from_nX(G_decomposed)

    G_round_trip_d = graphs.nX_from_graph_maps(node_uids_d, node_data_d, edge_data_d, node_edge_map_d)
    assert list(G_round_trip_d.nodes) == list(G_decomposed.nodes)
    for n, iter_node_data in G_round_trip.nodes(data=True):
        assert n in G_decomposed
        assert iter_node_data['live'] == G_decomposed.nodes[n]['live']
        assert iter_node_data['x'] == G_decomposed.nodes[n]['x']
        assert iter_node_data['y'] == G_decomposed.nodes[n]['y']
    assert G_round_trip_d.edges == G_decomposed.edges

    # error checks for when using backbone graph:
    # mismatching numbers of nodes
    corrupt_G = primal_graph.copy()
    corrupt_G.remove_node(0)
    with pytest.raises(ValueError):
        graphs.nX_from_graph_maps(node_uids,
                                  node_data,
                                  edge_data,
                                  node_edge_map,
                                  networkX_multigraph=corrupt_G)
    # mismatching node uid
    with pytest.raises(KeyError):
        corrupt_node_uids = list(node_uids)
        corrupt_node_uids[0] = 'boo'
        graphs.nX_from_graph_maps(corrupt_node_uids,
                                  node_data,
                                  edge_data,
                                  node_edge_map,
                                  networkX_multigraph=primal_graph)
    # missing edge
    with pytest.raises(KeyError):
        corrupt_primal_graph = primal_graph.copy()
        corrupt_primal_graph.remove_edge(0, 1)
        graphs.nX_from_graph_maps(node_uids,
                                  node_data,
                                  edge_data,
                                  node_edge_map,
                                  networkX_multigraph=corrupt_primal_graph)
Beispiel #14
0
async def centrality_dual(db_config, nodes_table, links_table, city_pop_id,
                          distances):
    logger.info(f'Loading graph for city: {city_pop_id} '
                f'derived from table: {nodes_table}')
    G = await postGIS_to_networkX(db_config, nodes_table, links_table,
                                  city_pop_id)
    if len(G) == 0:
        return
    logger.info('Casting to dual')
    G = graphs.nX_to_dual(G)  # convert to dual
    logger.info(f'Generating node map and edge map')
    N = networks.NetworkLayerFromNX(G, distances=distances)
    # the round trip graph is needed for the generated lengths, angles, etc.
    logger.info('Making round-trip graph')
    G_round_trip = N.to_networkX()

    db_con = await asyncpg.connect(**db_config)
    # ADD DUAL GRAPH'S VERTICES TABLE
    logger.info('Preparing dual nodes table')
    nodes_table_name_only = nodes_table
    if '.' in nodes_table_name_only:
        nodes_table_name_only = nodes_table_name_only.split('.')[-1]
    await db_con.execute(f'''
    -- add dual nodes table
    CREATE TABLE IF NOT EXISTS {nodes_table}_dual (
        id text PRIMARY KEY,
        city_pop_id int,
        within bool,
        geom geometry(Point, 27700)
    );
    CREATE INDEX IF NOT EXISTS geom_idx_{nodes_table_name_only}_dual
        ON {nodes_table}_dual USING GIST (geom);
    CREATE INDEX IF NOT EXISTS city_pop_idx_{nodes_table_name_only}_dual
        ON {nodes_table}_dual (city_pop_id);
    ''')

    logger.info('Preparing dual nodes data')
    dual_nodes_data = []
    for n, d in G_round_trip.nodes(data=True):
        dual_nodes_data.append([
            n,
            city_pop_id,
            d['live'],  # within
            d['x'],
            d['y']
        ])

    logger.info('Writing dual nodes to DB')
    await db_con.executemany(
        f'''
        INSERT INTO {nodes_table}_dual (id, city_pop_id, within, geom)
        VALUES ($1, $2, $3, ST_SetSRID(ST_MakePoint($4, $5), 27700))
        ON CONFLICT DO NOTHING;
    ''', dual_nodes_data)

    logger.info('Preparing dual edges table')
    links_table_name_only = links_table
    if '.' in links_table_name_only:
        links_table_name_only = links_table_name_only.split('.')[-1]
    await db_con.execute(f'''
    -- add dual links table
    CREATE TABLE IF NOT EXISTS {links_table}_dual (
      id text PRIMARY KEY,
      parent_id text,
      city_pop_id int,
      node_a text,
      node_b text,
      distance real,
      angle real,
      impedance_factor real,
      geom geometry(Linestring, 27700)
    );
    CREATE INDEX IF NOT EXISTS city_pop_idx_{links_table_name_only}_dual 
        ON {links_table}_dual (city_pop_id);
    CREATE INDEX IF NOT EXISTS geom_idx_{links_table_name_only}_dual 
        ON {links_table}_dual USING GIST (geom);
    ''')

    # prepare the dual edges and nodes tables
    logger.info('Preparing data for dual edges table')
    dual_edge_data = []
    parent_primal_counter = {}
    for s, e, d in G_round_trip.edges(data=True):
        # number each of the new dual edges sequentially
        # based on the original parent primal node
        primal_parent = d['parent_primal_node']
        if primal_parent not in parent_primal_counter:
            parent_primal_counter[primal_parent] = 1
        else:
            parent_primal_counter[primal_parent] += 1
        label = f'{primal_parent}_{parent_primal_counter[primal_parent]}'
        # add the data tuple
        dual_edge_data.append(
            (label, primal_parent, city_pop_id, s, e, d['length'],
             d['angle_sum'], d['imp_factor'], d['geom'].wkb_hex))

    logger.info('Writing dual edges to DB')
    await db_con.executemany(
        f'''
    INSERT INTO {links_table}_dual (
        id,
        parent_id,
        city_pop_id,
        node_a,
        node_b,
        distance,
        angle,
        impedance_factor,
        geom)
    VALUES ($1, $2, $3, $4, $5, $6, $7, $8, ST_SetSRID($9::geometry, 27700))
    ON CONFLICT DO NOTHING;
    ''', dual_edge_data)
    await db_con.close()

    logger.info('Calculating centrality paths and centralities '
                'for centrality-path heuristics')
    start = time.localtime()
    measures = [
        'node_density', 'node_harmonic', 'node_beta', 'node_betweenness',
        'node_betweenness_beta'
    ]
    N.node_centrality(measures=measures)
    time_duration = datetime.timedelta(seconds=time.mktime(time.localtime()) -
                                       time.mktime(start))
    logger.info(f'Algo duration: {time_duration}')
    logger.info('Calculating centrality paths and centralities '
                'for simplest-path heuristics')
    start = time.localtime()
    angular_measures = ['node_harmonic_angular', 'node_betweenness_angular']
    N.node_centrality(measures=angular_measures, angular=True)
    time_duration = datetime.timedelta(seconds=time.mktime(time.localtime()) -
                                       time.mktime(start))
    logger.info(f'Algo duration: {time_duration}')
    db_con = await asyncpg.connect(**db_config)
    # check that the columns exist
    # do this separately to control the order in which the columns are added (by theme instead of distance)
    for measure in measures:
        # prepend with "c_"
        c_measure = f'c_{measure}'
        await db_con.execute(f'''
        ALTER TABLE {nodes_table}_dual 
            ADD COLUMN IF NOT EXISTS {c_measure} real[];
        ''')
    for ang_measure in angular_measures:
        c_ang_measure = f'c_{ang_measure}'
        await db_con.execute(f'''
        ALTER TABLE {nodes_table}_dual
            ADD COLUMN IF NOT EXISTS {c_ang_measure} real[];
        ''')
    # Quite slow writing to database so do all distances at once
    logger.info('Prepping data for database')
    metrics = N.metrics_to_dict()
    bulk_data = []
    for k, v in metrics.items():
        # first check that this is a live node
        # (i.e. within the original city boundary)
        if not v['live']:
            continue
        # start node data list - initialise with node label
        node_data = [k]
        # pack centrality path data
        for measure in measures:
            inner_data = []
            for d in distances:
                inner_data.append(v['centrality'][measure][d])
            node_data.append(inner_data)
        # pack simplest path data
        for ang_measure in angular_measures:
            inner_ang_data = []
            for d in distances:
                inner_ang_data.append(v['centrality'][ang_measure][d])
            node_data.append(inner_ang_data)
        bulk_data.append(node_data)
    logger.info('Writing data back to database')
    await db_con.executemany(
        f'''
     UPDATE {nodes_table}_dual
         SET
             c_node_density = $2,
             c_node_harmonic = $3,
             c_node_beta = $4,
             c_node_betweenness = $5,
             c_node_betweenness_beta = $6,
             c_node_harmonic_angular = $7,
             c_node_betweenness_angular = $8
         WHERE id = $1
     ''', bulk_data)
    await db_con.close()
def test_compute_stats(primal_graph):
    """
    Test stats component
    """
    betas = np.array([0.01, 0.005])
    distances = networks.distance_from_beta(betas)
    # network layer
    N_single = networks.NetworkLayerFromNX(primal_graph, distances=distances)
    N_multi = networks.NetworkLayerFromNX(primal_graph, distances=distances)
    node_map = N_multi._node_data
    edge_map = N_multi._edge_data
    node_edge_map = N_multi._node_edge_map
    # data layer
    data_dict = mock.mock_data_dict(primal_graph)
    D_single = layers.DataLayerFromDict(data_dict)
    D_multi = layers.DataLayerFromDict(data_dict)
    # check single metrics independently against underlying for some use-cases, e.g. hill, non-hill, accessibility...
    D_single.assign_to_network(N_single, max_dist=500)
    D_multi.assign_to_network(N_multi, max_dist=500)
    # generate some mock landuse data
    mock_numeric = mock.mock_numerical_data(len(data_dict), num_arrs=2)
    # generate stats
    D_single.compute_stats(stats_keys='boo', stats_data_arrs=mock_numeric[0])
    D_single.compute_stats(stats_keys='baa', stats_data_arrs=mock_numeric[1])
    D_multi.compute_stats(stats_keys=['boo', 'baa'],
                          stats_data_arrs=mock_numeric)
    # test against underlying method
    data_map = D_single._data
    stats_sum, stats_sum_wt, stats_mean, stats_mean_wt, stats_variance, stats_variance_wt, stats_max, stats_min = \
        data.aggregate_stats(node_map,
                             edge_map,
                             node_edge_map,
                             data_map,
                             distances,
                             betas,
                             numerical_arrays=mock_numeric)
    stats_keys = [
        'max', 'min', 'sum', 'sum_weighted', 'mean', 'mean_weighted',
        'variance', 'variance_weighted'
    ]
    stats_data = [
        stats_max, stats_min, stats_sum, stats_sum_wt, stats_mean,
        stats_mean_wt, stats_variance, stats_variance_wt
    ]
    for num_idx, num_label in enumerate(['boo', 'baa']):
        for s_key, stats in zip(stats_keys, stats_data):
            for d_idx, d_key in enumerate(distances):
                # check one-at-a-time computed vs multiply computed
                assert np.allclose(
                    N_single.metrics['stats'][num_label][s_key][d_key],
                    N_multi.metrics['stats'][num_label][s_key][d_key],
                    atol=0.001,
                    rtol=0,
                    equal_nan=True)
                # check one-at-a-time against manual
                assert np.allclose(
                    N_single.metrics['stats'][num_label][s_key][d_key],
                    stats[num_idx][d_idx],
                    atol=0.001,
                    rtol=0,
                    equal_nan=True)
                # check multiply computed against manual
                assert np.allclose(
                    N_multi.metrics['stats'][num_label][s_key][d_key],
                    stats[num_idx][d_idx],
                    atol=0.001,
                    rtol=0,
                    equal_nan=True)
    # check that problematic keys and data arrays are caught
    for labels, arrs, err in (
        (['a'], mock_numeric, ValueError),  # mismatching lengths
        (['a', 'b'], None, TypeError),  # missing arrays
        (['a', 'b'], [], ValueError),  # missing arrays
        (None, mock_numeric, TypeError),  # missing labels
        ([], mock_numeric, ValueError)):  # missing labels
        with pytest.raises(err):
            D_multi.compute_stats(stats_keys=labels, stats_data_arrs=arrs)
def test_compute_landuses(primal_graph):
    betas = np.array([0.01, 0.005])
    distances = networks.distance_from_beta(betas)
    # network layer
    N = networks.NetworkLayerFromNX(primal_graph, distances=distances)
    node_map = N._node_data
    edge_map = N._edge_data
    node_edge_map = N._node_edge_map
    # data layer
    data_dict = mock.mock_data_dict(primal_graph)
    qs = np.array([0, 1, 2])
    D = layers.DataLayerFromDict(data_dict)
    # check single metrics independently against underlying for some use-cases, e.g. hill, non-hill, accessibility...
    D.assign_to_network(N, max_dist=500)
    # generate some mock landuse data
    landuse_labels = mock.mock_categorical_data(len(data_dict))
    landuse_classes, landuse_encodings = layers.encode_categorical(
        landuse_labels)
    # compute hill mixed uses
    D.compute_landuses(landuse_labels,
                       mixed_use_keys=['hill_branch_wt'],
                       qs=qs)
    # test against underlying method
    data_map = D._data
    mu_data_hill, mu_data_other, ac_data, ac_data_wt = data.aggregate_landuses(
        node_map,
        edge_map,
        node_edge_map,
        data_map,
        distances,
        betas,
        landuse_encodings,
        qs=qs,
        mixed_use_hill_keys=np.array([1]))
    for q_idx, q_key in enumerate(qs):
        for d_idx, d_key in enumerate(distances):
            assert np.allclose(
                N.metrics['mixed_uses']['hill_branch_wt'][q_key][d_key],
                mu_data_hill[0][q_idx][d_idx],
                atol=0.001,
                rtol=0)
    # gini simpson
    D.compute_landuses(landuse_labels, mixed_use_keys=['gini_simpson'])
    # test against underlying method
    data_map = D._data
    mu_data_hill, mu_data_other, ac_data, ac_data_wt = data.aggregate_landuses(
        node_map,
        edge_map,
        node_edge_map,
        data_map,
        distances,
        betas,
        landuse_encodings,
        mixed_use_other_keys=np.array([1]))
    for d_idx, d_key in enumerate(distances):
        assert np.allclose(N.metrics['mixed_uses']['gini_simpson'][d_key],
                           mu_data_other[0][d_idx],
                           atol=0.001,
                           rtol=0)
    # accessibilities
    D.compute_landuses(landuse_labels, accessibility_keys=['c'])
    # test against underlying method
    data_map = D._data
    mu_data_hill, mu_data_other, ac_data, ac_data_wt = data.aggregate_landuses(
        node_map,
        edge_map,
        node_edge_map,
        data_map,
        distances,
        betas,
        landuse_encodings,
        accessibility_keys=np.array([landuse_classes.index('c')]))
    for d_idx, d_key in enumerate(distances):
        assert np.allclose(
            N.metrics['accessibility']['non_weighted']['c'][d_key],
            ac_data[0][d_idx],
            atol=0.001,
            rtol=0)
        assert np.allclose(N.metrics['accessibility']['weighted']['c'][d_key],
                           ac_data_wt[0][d_idx],
                           atol=0.001,
                           rtol=0)
    # also check the number of returned types for a few assortments of metrics
    mixed_uses_hill_types = np.array([
        'hill', 'hill_branch_wt', 'hill_pairwise_wt', 'hill_pairwise_disparity'
    ])
    mixed_use_other_types = np.array(
        ['shannon', 'gini_simpson', 'raos_pairwise_disparity'])
    ac_codes = np.array(landuse_classes)
    # mixed uses hill
    mu_hill_random = np.arange(len(mixed_uses_hill_types))
    np.random.shuffle(mu_hill_random)
    # mixed uses other
    mu_other_random = np.arange(len(mixed_use_other_types))
    np.random.shuffle(mu_other_random)
    # accessibility
    ac_random = np.arange(len(landuse_classes))
    np.random.shuffle(ac_random)
    # mock disparity matrix
    mock_disparity_wt_matrix = np.full(
        (len(landuse_classes), len(landuse_classes)), 1)
    # not necessary to do all labels, first few should do
    for mu_h_min in range(3):
        mu_h_keys = np.array(mu_hill_random[mu_h_min:])
        for mu_o_min in range(3):
            mu_o_keys = np.array(mu_other_random[mu_o_min:])
            for ac_min in range(3):
                ac_keys = np.array(ac_random[ac_min:])
                # in the final case, set accessibility to a single code otherwise an error would be raised
                if len(mu_h_keys) == 0 and len(mu_o_keys) == 0 and len(
                        ac_keys) == 0:
                    ac_keys = np.array([0])
                # randomise order of keys and metrics
                mu_h_metrics = mixed_uses_hill_types[mu_h_keys]
                mu_o_metrics = mixed_use_other_types[mu_o_keys]
                ac_metrics = ac_codes[ac_keys]
                # prepare network and compute
                N_temp = networks.NetworkLayerFromNX(primal_graph,
                                                     distances=distances)
                D_temp = layers.DataLayerFromDict(data_dict)
                D_temp.assign_to_network(N_temp, max_dist=500)
                D_temp.compute_landuses(
                    landuse_labels,
                    mixed_use_keys=list(mu_h_metrics) + list(mu_o_metrics),
                    accessibility_keys=ac_metrics,
                    cl_disparity_wt_matrix=mock_disparity_wt_matrix,
                    qs=qs)
                # test against underlying method
                mu_data_hill, mu_data_other, ac_data, ac_data_wt = \
                    data.aggregate_landuses(node_map,
                                            edge_map,
                                            node_edge_map,
                                            data_map,
                                            distances,
                                            betas,
                                            landuse_encodings,
                                            qs=qs,
                                            mixed_use_hill_keys=mu_h_keys,
                                            mixed_use_other_keys=mu_o_keys,
                                            accessibility_keys=ac_keys,
                                            cl_disparity_wt_matrix=mock_disparity_wt_matrix)
                for mu_h_idx, mu_h_met in enumerate(mu_h_metrics):
                    for q_idx, q_key in enumerate(qs):
                        for d_idx, d_key in enumerate(distances):
                            assert np.allclose(
                                N_temp.metrics['mixed_uses'][mu_h_met][q_key]
                                [d_key],
                                mu_data_hill[mu_h_idx][q_idx][d_idx],
                                atol=0.001,
                                rtol=0)
                for mu_o_idx, mu_o_met in enumerate(mu_o_metrics):
                    for d_idx, d_key in enumerate(distances):
                        assert np.allclose(
                            N_temp.metrics['mixed_uses'][mu_o_met][d_key],
                            mu_data_other[mu_o_idx][d_idx],
                            atol=0.001,
                            rtol=0)
                for ac_idx, ac_met in enumerate(ac_metrics):
                    for d_idx, d_key in enumerate(distances):
                        assert np.allclose(N_temp.metrics['accessibility']
                                           ['non_weighted'][ac_met][d_key],
                                           ac_data[ac_idx][d_idx],
                                           atol=0.001,
                                           rtol=0)
                        assert np.allclose(N_temp.metrics['accessibility']
                                           ['weighted'][ac_met][d_key],
                                           ac_data_wt[ac_idx][d_idx],
                                           atol=0.001,
                                           rtol=0)
    # most integrity checks happen in underlying method, though check here for mismatching labels length and typos
    with pytest.raises(ValueError):
        D.compute_landuses(landuse_labels[-1], mixed_use_keys=['shannon'])
    with pytest.raises(ValueError):
        D.compute_landuses(landuse_labels, mixed_use_keys=['spelling_typo'])
    # don't check accessibility_labels for typos - because only warning is triggered (not all labels will be in all data)
    # check that unassigned data layer flags
    with pytest.raises(ValueError):
        D_new = layers.DataLayerFromDict(data_dict)
        D_new.compute_landuses(landuse_labels, mixed_use_keys=['shannon'])