def test_metrics_to_dict(): G = mock.mock_graph() G = graphs.nX_simple_geoms(G) # create a network layer and run some metrics N = networks.Network_Layer_From_nX(G, distances=[500, 1000]) # check with no metrics metrics_dict = N.metrics_to_dict() dict_check(metrics_dict, N) # check with centrality metrics N.compute_centrality(measures=['node_harmonic']) metrics_dict = N.metrics_to_dict() dict_check(metrics_dict, N) # check with data metrics data_dict = mock.mock_data_dict(G) landuse_labels = mock.mock_categorical_data(len(data_dict)) numerical_data = mock.mock_numerical_data(len(data_dict)) # TODO: ''' D = layers.Data_Layer_From_Dict(data_dict) D.assign_to_network(N, max_dist=400) D.compute_aggregated(landuse_labels, mixed_use_keys=['hill', 'shannon'], accessibility_keys=['a', 'c'], qs=[0, 1], stats_keys=['boo'], stats_data_arrs=numerical_data) ''' metrics_dict = N.metrics_to_dict() dict_check(metrics_dict, N)
def test_compute_stats_single(): for G, distances, betas in network_generator(): data_dict = mock.mock_data_dict(G) numeric_data = mock.mock_numerical_data(len(data_dict), num_arrs=1) # easy version N_easy = networks.Network_Layer_From_nX(G, distances) D_easy = layers.Data_Layer_From_Dict(data_dict) D_easy.assign_to_network(N_easy, max_dist=500) D_easy.compute_stats_single('boo', numeric_data[0]) # custom version N_full = networks.Network_Layer_From_nX(G, distances) D_full = layers.Data_Layer_From_Dict(data_dict) D_full.assign_to_network(N_full, max_dist=500) D_full.compute_aggregated(stats_keys=['boo'], stats_data_arrs=numeric_data) # compare for n_label in ['boo']: for s_label in [ 'max', 'min', 'mean', 'mean_weighted', 'variance', 'variance_weighted' ]: for dist in distances: assert np.allclose( N_easy.metrics['stats'][n_label][s_label][dist], N_full.metrics['stats'][n_label][s_label][dist], equal_nan=True, atol=0.001, rtol=0) # check that non-single dimension arrays are caught with pytest.raises(ValueError): D_easy.compute_stats_single('boo', numeric_data)
def test_compute_stats_multiple(): for G, distances, betas in network_generator(): data_dict = mock.mock_data_dict(G) numeric_data = mock.mock_numerical_data(len(data_dict), num_arrs=2) # easy version N_easy = networks.Network_Layer_From_nX(G, distances) D_easy = layers.Data_Layer_From_Dict(data_dict) D_easy.assign_to_network(N_easy, max_dist=500) D_easy.compute_stats_multiple(['boo', 'baa'], numeric_data) # custom version N_full = networks.Network_Layer_From_nX(G, distances) D_full = layers.Data_Layer_From_Dict(data_dict) D_full.assign_to_network(N_full, max_dist=500) D_full.compute_aggregated(stats_keys=['boo', 'baa'], stats_data_arrs=numeric_data) # compare for n_label in ['boo', 'baa']: for s_label in [ 'max', 'min', 'mean', 'mean_weighted', 'variance', 'variance_weighted' ]: for dist in distances: assert np.allclose( N_easy.metrics['stats'][n_label][s_label][dist], N_full.metrics['stats'][n_label][s_label][dist], equal_nan=True, atol=0.001, rtol=0)
def test_check_numerical_data(): mock_numerical = mock.mock_numerical_data(50) # check for malformed data # difficult to catch int arrays without running into numba type checking errors # single dimension with pytest.raises(ValueError): corrupt_numerical = mock_numerical[0] assert corrupt_numerical.ndim == 1 checks.check_numerical_data(corrupt_numerical) # catch infinites with pytest.raises(ValueError): mock_numerical[0][0] = np.inf checks.check_numerical_data(mock_numerical)
def test_compute_aggregated_B(): ''' Test stats component ''' G = mock.mock_graph() G = graphs.nX_simple_geoms(G) betas = np.array([-0.01, -0.005]) distances = networks.distance_from_beta(betas) # network layer N = networks.Network_Layer_From_nX(G, distances) node_map = N._node_data edge_map = N._edge_data node_edge_map = N._node_edge_map # data layer data_dict = mock.mock_data_dict(G) qs = np.array([0, 1, 2]) D = layers.Data_Layer_From_Dict(data_dict) # check single metrics independently against underlying for some use-cases, e.g. hill, non-hill, accessibility... D.assign_to_network(N, max_dist=500) # generate some mock landuse data mock_numeric = mock.mock_numerical_data(len(data_dict), num_arrs=2) # generate stats D.compute_aggregated(stats_keys=['boo', 'baa'], stats_data_arrs=mock_numeric) # test against underlying method data_map = D._data mu_data_hill, mu_data_other, ac_data, ac_data_wt, \ stats_sum, stats_sum_wt, stats_mean, stats_mean_wt, stats_variance, stats_variance_wt, stats_max, stats_min = \ data.local_aggregator(node_map, edge_map, node_edge_map, data_map, distances, betas, numerical_arrays=mock_numeric) stats_keys = [ 'max', 'min', 'sum', 'sum_weighted', 'mean', 'mean_weighted', 'variance', 'variance_weighted' ] stats_data = [ stats_max, stats_min, stats_sum, stats_sum_wt, stats_mean, stats_mean_wt, stats_variance, stats_variance_wt ] for num_idx, num_label in enumerate(['boo', 'baa']): for s_key, stats in zip(stats_keys, stats_data): for d_idx, d_key in enumerate(distances): assert np.allclose(N.metrics['stats'][num_label][s_key][d_key], stats[num_idx][d_idx], atol=0.001, rtol=0) # check that mismatching label and array lengths are caught for labels, arrs in ( (['a'], mock_numeric), # mismatching lengths (['a', 'b'], None), # missing arrays (None, mock_numeric)): # missing labels with pytest.raises(ValueError): D.compute_aggregated(stats_keys=labels, stats_data_arrs=arrs)