def test_metrics_to_dict(): G = mock.mock_graph() G = graphs.nX_simple_geoms(G) # create a network layer and run some metrics N = networks.Network_Layer_From_nX(G, distances=[500, 1000]) # check with no metrics metrics_dict = N.metrics_to_dict() dict_check(metrics_dict, N) # check with centrality metrics N.compute_centrality(measures=['node_harmonic']) metrics_dict = N.metrics_to_dict() dict_check(metrics_dict, N) # check with data metrics data_dict = mock.mock_data_dict(G) landuse_labels = mock.mock_categorical_data(len(data_dict)) numerical_data = mock.mock_numerical_data(len(data_dict)) # TODO: ''' D = layers.Data_Layer_From_Dict(data_dict) D.assign_to_network(N, max_dist=400) D.compute_aggregated(landuse_labels, mixed_use_keys=['hill', 'shannon'], accessibility_keys=['a', 'c'], qs=[0, 1], stats_keys=['boo'], stats_data_arrs=numerical_data) ''' metrics_dict = N.metrics_to_dict() dict_check(metrics_dict, N)
def test_encode_categorical(): # generate mock data mock_categorical = mock.mock_categorical_data(50) classes, class_encodings = layers.encode_categorical(mock_categorical) for cl in classes: assert cl in mock_categorical for idx, label in enumerate(mock_categorical): assert label in classes assert classes.index(label) == class_encodings[idx]
def test_check_categorical_data(): mock_categorical = mock.mock_categorical_data(50) data_classes, data_encoding = layers.encode_categorical(mock_categorical) # check for malformed data # negatives with pytest.raises(ValueError): data_encoding[0] = -1 checks.check_categorical_data(data_encoding) # NaN with pytest.raises(ValueError): data_encoding[0] = np.nan checks.check_categorical_data(data_encoding) # floats with pytest.raises(ValueError): data_encoding_float = np.full(len(data_encoding), np.nan) data_encoding_float[:] = data_encoding[:].astype(np.float) data_encoding_float[0] = 1.2345 checks.check_categorical_data(data_encoding_float)
def test_compute_accessibilities(): for G, distances, betas in network_generator(): data_dict = mock.mock_data_dict(G) landuse_labels = mock.mock_categorical_data(len(data_dict)) # easy version N_easy = networks.Network_Layer_From_nX(G, distances) D_easy = layers.Data_Layer_From_Dict(data_dict) D_easy.assign_to_network(N_easy, max_dist=500) D_easy.compute_accessibilities(landuse_labels, ['c']) # custom version N_full = networks.Network_Layer_From_nX(G, distances) D_full = layers.Data_Layer_From_Dict(data_dict) D_full.assign_to_network(N_full, max_dist=500) D_full.compute_aggregated(landuse_labels, accessibility_keys=['c']) # compare for d in distances: for wt in ['weighted', 'non_weighted']: assert np.allclose(N_easy.metrics['accessibility'][wt]['c'][d], N_full.metrics['accessibility'][wt]['c'][d], atol=0.001, rtol=0)
def test_hill_diversity(): for G, distances, betas in network_generator(): data_dict = mock.mock_data_dict(G) landuse_labels = mock.mock_categorical_data(len(data_dict)) # easy version N_easy = networks.Network_Layer_From_nX(G, distances) D_easy = layers.Data_Layer_From_Dict(data_dict) D_easy.assign_to_network(N_easy, max_dist=500) D_easy.hill_diversity(landuse_labels, qs=[0, 1, 2]) # custom version N_full = networks.Network_Layer_From_nX(G, distances) D_full = layers.Data_Layer_From_Dict(data_dict) D_full.assign_to_network(N_full, max_dist=500) D_full.compute_aggregated(landuse_labels, mixed_use_keys=['hill'], qs=[0, 1, 2]) # compare for d in distances: for q in [0, 1, 2]: assert np.allclose(N_easy.metrics['mixed_uses']['hill'][q][d], N_full.metrics['mixed_uses']['hill'][q][d], atol=0.001, rtol=0)
# # INTRO PLOT G = mock.mock_graph() plot.plot_nX(G, path='graph.png', labels=True, dpi=150) # INTRO EXAMPLE PLOTS G = graphs.nX_simple_geoms(G) G = graphs.nX_decompose(G, 20) N = networks.Network_Layer_From_nX(G, distances=[400, 800]) N.compute_centrality(measures=['segment_harmonic']) data_dict = mock.mock_data_dict(G, random_seed=25) D = layers.Data_Layer_From_Dict(data_dict) D.assign_to_network(N, max_dist=400) landuse_labels = mock.mock_categorical_data(len(data_dict), random_seed=25) D.hill_branch_wt_diversity(landuse_labels, qs=[0]) G_metrics = N.to_networkX() segment_harmonic_vals = [] mixed_uses_vals = [] for node, data in G_metrics.nodes(data=True): segment_harmonic_vals.append( data['metrics']['centrality']['segment_harmonic'][800]) mixed_uses_vals.append( data['metrics']['mixed_uses']['hill_branch_wt'][0][400]) # custom colourmap cmap = colors.LinearSegmentedColormap.from_list('cityseer', ['#64c1ff', '#d32f2f']) segment_harmonic_vals = colors.Normalize()(segment_harmonic_vals)
def test_nX_from_graph_maps(): # also see test_networks.test_to_networkX for tests on implementation via Network layer # check round trip to and from graph maps results in same graph G = mock.mock_graph() G = graphs.nX_simple_geoms(G) # explicitly set live params for equality checks # graph_maps_from_networkX generates these implicitly if missing for n in G.nodes(): G.nodes[n]['live'] = bool(np.random.randint(0, 1)) # test directly from and to graph maps node_uids, node_data, edge_data, node_edge_map = graphs.graph_maps_from_nX(G) G_round_trip = graphs.nX_from_graph_maps(node_uids, node_data, edge_data, node_edge_map) assert list(G_round_trip.nodes) == list(G.nodes) assert list(G_round_trip.edges) == list(G.edges) # check with metrics dictionary N = networks.Network_Layer_From_nX(G, distances=[500, 1000]) N.compute_centrality(measures=['node_harmonic']) data_dict = mock.mock_data_dict(G) landuse_labels = mock.mock_categorical_data(len(data_dict)) D = layers.Data_Layer_From_Dict(data_dict) D.assign_to_network(N, max_dist=400) D.compute_aggregated(landuse_labels, mixed_use_keys=['hill', 'shannon'], accessibility_keys=['a', 'c'], qs=[0, 1]) metrics_dict = N.metrics_to_dict() # without backbone G_round_trip_data = graphs.nX_from_graph_maps(node_uids, node_data, edge_data, node_edge_map, metrics_dict=metrics_dict) for uid, metrics in metrics_dict.items(): assert G_round_trip_data.nodes[uid]['metrics'] == metrics # with backbone G_round_trip_data = graphs.nX_from_graph_maps(node_uids, node_data, edge_data, node_edge_map, networkX_graph=G, metrics_dict=metrics_dict) for uid, metrics in metrics_dict.items(): assert G_round_trip_data.nodes[uid]['metrics'] == metrics # test with decomposed G_decomposed = graphs.nX_decompose(G, decompose_max=20) # set live explicitly for n in G_decomposed.nodes(): G_decomposed.nodes[n]['live'] = bool(np.random.randint(0, 1)) node_uids_d, node_data_d, edge_data_d, node_edge_map_d = graphs.graph_maps_from_nX(G_decomposed) G_round_trip_d = graphs.nX_from_graph_maps(node_uids_d, node_data_d, edge_data_d, node_edge_map_d) assert list(G_round_trip_d.nodes) == list(G_decomposed.nodes) for n, node_data in G_round_trip.nodes(data=True): assert n in G_decomposed assert node_data['live'] == G_decomposed.nodes[n]['live'] assert node_data['x'] == G_decomposed.nodes[n]['x'] assert node_data['y'] == G_decomposed.nodes[n]['y'] assert G_round_trip_d.edges == G_decomposed.edges # error checks for when using backbone graph: # mismatching numbers of nodes corrupt_G = G.copy() corrupt_G.remove_node(0) with pytest.raises(ValueError): graphs.nX_from_graph_maps(node_uids, node_data, edge_data, node_edge_map, networkX_graph=corrupt_G) # mismatching node uid with pytest.raises(ValueError): corrupt_node_uids = list(node_uids) corrupt_node_uids[0] = 'boo' graphs.nX_from_graph_maps(corrupt_node_uids, node_data, edge_data, node_edge_map, networkX_graph=G)
def test_compute_aggregated_A(): G = mock.mock_graph() G = graphs.nX_simple_geoms(G) betas = np.array([-0.01, -0.005]) distances = networks.distance_from_beta(betas) # network layer N = networks.Network_Layer_From_nX(G, distances) node_map = N._node_data edge_map = N._edge_data node_edge_map = N._node_edge_map # data layer data_dict = mock.mock_data_dict(G) qs = np.array([0, 1, 2]) D = layers.Data_Layer_From_Dict(data_dict) # check single metrics independently against underlying for some use-cases, e.g. hill, non-hill, accessibility... D.assign_to_network(N, max_dist=500) # generate some mock landuse data landuse_labels = mock.mock_categorical_data(len(data_dict)) landuse_classes, landuse_encodings = layers.encode_categorical( landuse_labels) # compute hill mixed uses D.compute_aggregated(landuse_labels, mixed_use_keys=['hill_branch_wt'], qs=qs) # test against underlying method data_map = D._data mu_data_hill, mu_data_other, ac_data, ac_data_wt, \ stats_sum, stats_sum_wt, stats_mean, stats_mean_wt, stats_variance, stats_variance_wt, stats_max, stats_min = \ data.local_aggregator(node_map, edge_map, node_edge_map, data_map, distances, betas, landuse_encodings, qs=qs, mixed_use_hill_keys=np.array([1])) for q_idx, q_key in enumerate(qs): for d_idx, d_key in enumerate(distances): assert np.allclose( N.metrics['mixed_uses']['hill_branch_wt'][q_key][d_key], mu_data_hill[0][q_idx][d_idx], atol=0.001, rtol=0) # gini simpson D.compute_aggregated(landuse_labels, mixed_use_keys=['gini_simpson']) # test against underlying method data_map = D._data mu_data_hill, mu_data_other, ac_data, ac_data_wt, \ stats_sum, stats_sum_wt, stats_mean, stats_mean_wt, stats_variance, stats_variance_wt, stats_max, stats_min = \ data.local_aggregator(node_map, edge_map, node_edge_map, data_map, distances, betas, landuse_encodings, mixed_use_other_keys=np.array([1])) for d_idx, d_key in enumerate(distances): assert np.allclose(N.metrics['mixed_uses']['gini_simpson'][d_key], mu_data_other[0][d_idx], atol=0.001, rtol=0) # accessibilities D.compute_aggregated(landuse_labels, accessibility_keys=['c']) # test against underlying method data_map = D._data mu_data_hill, mu_data_other, ac_data, ac_data_wt, \ stats_sum, stats_sum_wt, stats_mean, stats_mean_wt, stats_variance, stats_variance_wt, stats_max, stats_min = \ data.local_aggregator(node_map, edge_map, node_edge_map, data_map, distances, betas, landuse_encodings, accessibility_keys=np.array([landuse_classes.index('c')])) for d_idx, d_key in enumerate(distances): assert np.allclose( N.metrics['accessibility']['non_weighted']['c'][d_key], ac_data[0][d_idx], atol=0.001, rtol=0) assert np.allclose(N.metrics['accessibility']['weighted']['c'][d_key], ac_data_wt[0][d_idx], atol=0.001, rtol=0) # also check the number of returned types for a few assortments of metrics mixed_uses_hill_types = np.array([ 'hill', 'hill_branch_wt', 'hill_pairwise_wt', 'hill_pairwise_disparity' ]) mixed_use_other_types = np.array( ['shannon', 'gini_simpson', 'raos_pairwise_disparity']) ac_codes = np.array(landuse_classes) mu_hill_random = np.arange(len(mixed_uses_hill_types)) np.random.shuffle(mu_hill_random) mu_other_random = np.arange(len(mixed_use_other_types)) np.random.shuffle(mu_other_random) ac_random = np.arange(len(landuse_classes)) np.random.shuffle(ac_random) # mock disparity matrix mock_disparity_wt_matrix = np.full( (len(landuse_classes), len(landuse_classes)), 1) # not necessary to do all labels, first few should do for mu_h_min in range(3): mu_h_keys = np.array(mu_hill_random[mu_h_min:]) for mu_o_min in range(3): mu_o_keys = np.array(mu_other_random[mu_o_min:]) for ac_min in range(3): ac_keys = np.array(ac_random[ac_min:]) # in the final case, set accessibility to a single code otherwise an error would be raised if len(mu_h_keys) == 0 and len(mu_o_keys) == 0 and len( ac_keys) == 0: ac_keys = np.array([0]) # randomise order of keys and metrics mu_h_metrics = mixed_uses_hill_types[mu_h_keys] mu_o_metrics = mixed_use_other_types[mu_o_keys] ac_metrics = ac_codes[ac_keys] N_temp = networks.Network_Layer_From_nX(G, distances) D_temp = layers.Data_Layer_From_Dict(data_dict) D_temp.assign_to_network(N_temp, max_dist=500) D_temp.compute_aggregated( landuse_labels, mixed_use_keys=list(mu_h_metrics) + list(mu_o_metrics), accessibility_keys=ac_metrics, cl_disparity_wt_matrix=mock_disparity_wt_matrix, qs=qs) # test against underlying method mu_data_hill, mu_data_other, ac_data, ac_data_wt, stats_sum, stats_sum_wt, \ stats_mean, stats_mean_wt, stats_variance, stats_variance_wt, stats_max, stats_min = \ data.local_aggregator(node_map, edge_map, node_edge_map, data_map, distances, betas, landuse_encodings, qs=qs, mixed_use_hill_keys=mu_h_keys, mixed_use_other_keys=mu_o_keys, accessibility_keys=ac_keys, cl_disparity_wt_matrix=mock_disparity_wt_matrix) for mu_h_idx, mu_h_met in enumerate(mu_h_metrics): for q_idx, q_key in enumerate(qs): for d_idx, d_key in enumerate(distances): assert np.allclose( N_temp.metrics['mixed_uses'][mu_h_met][q_key] [d_key], mu_data_hill[mu_h_idx][q_idx][d_idx], atol=0.001, rtol=0) for mu_o_idx, mu_o_met in enumerate(mu_o_metrics): for d_idx, d_key in enumerate(distances): assert np.allclose( N_temp.metrics['mixed_uses'][mu_o_met][d_key], mu_data_other[mu_o_idx][d_idx], atol=0.001, rtol=0) for ac_idx, ac_met in enumerate(ac_metrics): for d_idx, d_key in enumerate(distances): assert np.allclose(N_temp.metrics['accessibility'] ['non_weighted'][ac_met][d_key], ac_data[ac_idx][d_idx], atol=0.001, rtol=0) assert np.allclose(N_temp.metrics['accessibility'] ['weighted'][ac_met][d_key], ac_data_wt[ac_idx][d_idx], atol=0.001, rtol=0) # most integrity checks happen in underlying method, though check here for mismatching labels length and typos with pytest.raises(ValueError): D.compute_aggregated(landuse_labels[-1], mixed_use_keys=['shannon']) with pytest.raises(ValueError): D.compute_aggregated(landuse_labels, mixed_use_keys=['spelling_typo']) # don't check accessibility_labels for typos - because only warning is triggered (not all labels will be in all data) # check that unassigned data layer flags with pytest.raises(ValueError): D_new = layers.Data_Layer_From_Dict(data_dict) D_new.compute_aggregated(landuse_labels, mixed_use_keys=['shannon'])
20, Netw_Layer, _randomised=randomised) # population state and map - note that the state doesn't change (changes occur via assignments) pop_state = np.full(len(Pop_Layer.uids), 1.0) # use floats! pop_map = np.full((iters, spans), 0.0) # generate the landuse layer Landuse_Layer = generate_data_layer(spans, 1, Netw_Layer, _randomised=randomised) lu_flow_a = np.full((iters, spans), 0.0) lu_flow_b = np.full((iters, spans), 0.0) lu_flow_c = np.full((iters, spans), 0.0) # get the landuse encodings - note that the labels don't change (changes occur via assignments) landuse_labels = mock.mock_categorical_data(length=len(Landuse_Layer.uids), num_classes=3) if not randomised: l = len(Netw_Layer.uids) l_1 = int(l / 3) l_2 = l_1 * 2 for d_idx, assigned_idx in enumerate(Landuse_Layer._data[:, 2]): if assigned_idx < l_1: landuse_labels[d_idx] = 'a' elif assigned_idx < l_2: landuse_labels[d_idx] = 'b' else: landuse_labels[d_idx] = 'c' landuse_classes, landuse_encodings = layers.encode_categorical( landuse_labels) # iterate