def test_fit_normal_dict(): fit_ = st.fit(NORMAL, 'norm') d = st.fit_results_to_dict(fit_, min_bound=-123, max_bound=123) nt.assert_almost_equal(d['mu'], NORMAL_MU, 1) nt.assert_almost_equal(d['sigma'], NORMAL_SIGMA, 1) nt.assert_almost_equal(d['min'], -123, 1) nt.assert_almost_equal(d['max'], 123, 1)
def transform_package(mtype, files, components): '''Put together header and list of data into one json output. feature_list contains all the information about the data to be extracted: features, feature_names, feature_components, feature_min, feature_max ''' data_dict = transform_header(mtype) neurons = load_neurons(files) for comp in components: params = PARAM_MAP[comp.name] for feature in comp.features: result = stats.fit_results_to_dict( extract_data(neurons, feature.name, params), feature.limits.min, feature.limits.max) # When the distribution is normal with sigma = 0 # it will be replaced with constant if result['type'] == 'normal' and result['sigma'] == 0.0: replace_result = OrderedDict( (('type', 'constant'), ('val', result['mu']))) result = replace_result data_dict["components"][comp.name].update({feature.name: result}) return data_dict
def test_fit_results_dict_exponential_min_max(): a = st.FitResults(params=[2, 2], errs=[3, 4], type="expon") d = st.fit_results_to_dict(a, min_bound=-100, max_bound=100) nt.assert_equal(d["lambda"], 1.0 / 2) nt.assert_equal(d["min"], -100) nt.assert_equal(d["max"], 100) nt.assert_equal(d["type"], "exponential")
def test_fit_results_dict_exponential_min_max(): a = st.FitResults(params=[2, 2], errs=[3, 4], type='expon') d = st.fit_results_to_dict(a, min_bound=-100, max_bound=100) assert d['lambda'] == 1. / 2 assert d['min'] == -100 assert d['max'] == 100 assert d['type'] == 'exponential'
def test_fit_normal_dict(): fit_ = st.fit(NORMAL, "norm") d = st.fit_results_to_dict(fit_, min_bound=-123, max_bound=123) nt.assert_almost_equal(d["mu"], NORMAL_MU, 1) nt.assert_almost_equal(d["sigma"], NORMAL_SIGMA, 1) nt.assert_almost_equal(d["min"], -123, 1) nt.assert_almost_equal(d["max"], 123, 1)
def transform_package(mtype, files, components): '''Put together header and list of data into one json output. feature_list contains all the information about the data to be extracted: features, feature_names, feature_components, feature_min, feature_max ''' data_dict = transform_header(mtype) neurons = load_neurons(files) for comp in components: params = PARAM_MAP[comp.name] for feature in comp.features: result = stats.fit_results_to_dict( extract_data(neurons, feature.name, params), feature.limits.min, feature.limits.max ) # When the distribution is normal with sigma = 0 # it will be replaced with constant if result['type'] == 'normal' and result['sigma'] == 0.0: replace_result = OrderedDict((('type', 'constant'), ('val', result['mu']))) result = replace_result data_dict["components"][comp.name].update({feature.name: result}) return data_dict
def test_fit_results_dict_exponential_min_max(): a = st.FitResults(params=[2, 2], errs=[3,4], type='expon') d = st.fit_results_to_dict(a, min_bound=-100, max_bound=100) nt.assert_equal(d['lambda'], 1./2) nt.assert_equal(d['min'], -100) nt.assert_equal(d['max'], 100) nt.assert_equal(d['type'], 'exponential')
def test_fit_results_dict_exponential_min_max(): a = st.FitResults(params=[2, 2], errs=[3, 4], type='expon') d = st.fit_results_to_dict(a, min_bound=-100, max_bound=100) nt.assert_equal(d['lambda'], 1. / 2) nt.assert_equal(d['min'], -100) nt.assert_equal(d['max'], 100) nt.assert_equal(d['type'], 'exponential')
def test_fit_normal_dict(): fit_ = st.fit(NORMAL, 'norm') d = st.fit_results_to_dict(fit_, min_bound=-123, max_bound=123) assert_almost_equal(d['mu'], NORMAL_MU, 1) assert_almost_equal(d['sigma'], NORMAL_SIGMA, 1) assert_almost_equal(d['min'], -123, 1) assert_almost_equal(d['max'], 123, 1)
def test_fit_results_dict_normal_min_max(): a = st.FitResults(params=[1, 2], errs=[3, 4], type='norm') d = st.fit_results_to_dict(a, min_bound=-100, max_bound=100) assert d['mu'] == 1 assert d['sigma'] == 2 assert d['min'] == -100 assert d['max'] == 100 assert d['type'] == 'normal'
def test_fit_results_dict_normal_min_max(): a = st.FitResults(params=[1, 2], errs=[3, 4], type="norm") d = st.fit_results_to_dict(a, min_bound=-100, max_bound=100) nt.assert_equal(d["mu"], 1) nt.assert_equal(d["sigma"], 2) nt.assert_equal(d["min"], -100) nt.assert_equal(d["max"], 100) nt.assert_equal(d["type"], "normal")
def test_fit_results_dict_normal_min_max(): a = st.FitResults(params=[1, 2], errs=[3, 4], type='norm') d = st.fit_results_to_dict(a, min_bound=-100, max_bound=100) nt.assert_equal(d['mu'], 1) nt.assert_equal(d['sigma'], 2) nt.assert_equal(d['min'], -100) nt.assert_equal(d['max'], 100) nt.assert_equal(d['type'], 'normal')
def test_fit_results_dict_normal_min_max(): a = st.FitResults(params=[1, 2], errs=[3,4], type='norm') d = st.fit_results_to_dict(a, min_bound=-100, max_bound=100) nt.assert_equal(d['mu'], 1) nt.assert_equal(d['sigma'], 2) nt.assert_equal(d['min'], -100) nt.assert_equal(d['max'], 100) nt.assert_equal(d['type'], 'normal')
def transform_package(mtype, files, components, feature_list): '''Put together header and list of data into one json output. feature_list contains all the information about the data to be extracted: features, feature_names, feature_components, feature_min, feature_max ''' data_dict = transform_header(mtype, components) for feature, name, comp, fmin, fmax, fparam in feature_list: result = stats.fit_results_to_dict(extract_data(files, feature, fparam), fmin, fmax) data_dict["components"][comp] = {name: result} return data_dict
def test_fit_results_dict_exponential(): a = st.FitResults(params=[2, 2], errs=[3,4], type='expon') d = st.fit_results_to_dict(a) nt.assert_equal(d['lambda'], 1./2) nt.assert_equal(d['type'], 'exponential')
def test_fit_results_dict_normal(): a = st.FitResults(params=[1, 2], errs=[3,4], type='norm') d = st.fit_results_to_dict(a) nt.assert_equal(d['mu'], 1) nt.assert_equal(d['sigma'], 2) nt.assert_equal(d['type'], 'normal')
def test_fit_results_dict_uniform_min_max(): a = st.FitResults(params=[1, 2], errs=[3,4], type='uniform') d = st.fit_results_to_dict(a, min_bound=-100, max_bound=100) nt.assert_equal(d['min'], 1) nt.assert_equal(d['max'], 3) nt.assert_equal(d['type'], 'uniform')
parser.add_argument('feature', help='Feature to be extracted with neurom.get') return parser.parse_args() def extract_data(data_path, feature): '''Loads a list of neurons, extracts feature and transforms the fitted distribution in the correct format. Returns the optimal distribution, corresponding parameters, minimun and maximum values. ''' population = nm.load_neurons(data_path) feature_data = [nm.get(feature, n) for n in population] feature_data = list(chain(*feature_data)) return stats.optimal_distribution(feature_data) if __name__ == '__main__': args = parse_args() d_path = args.datapath feat = args.feature _result = stats.fit_results_to_dict(extract_data(d_path, feat)) print(json.dumps(_result, indent=2, separators=(',', ': ')))
parser.add_argument('feature', help='Feature to be extracted with neurom.get') return parser.parse_args() def extract_data(data_path, feature): '''Loads a list of neurons, extracts feature and transforms the fitted distribution in the correct format. Returns the optimal distribution, corresponding parameters, minimun and maximum values. ''' population = nm.load_neurons(data_path) feature_data = [nm.get(feature, n) for n in population] feature_data = list(chain(*feature_data)) return stats.optimal_distribution(feature_data) if __name__ == '__main__': args = parse_args() d_path = args.datapath feat = args.feature _result = stats.fit_results_to_dict(extract_data(d_path, feat)) print json.dumps(_result, indent=2, separators=(',', ': '))
def test_fit_results_dict_uniform_min_max(): a = st.FitResults(params=[1, 2], errs=[3, 4], type="uniform") d = st.fit_results_to_dict(a, min_bound=-100, max_bound=100) nt.assert_equal(d["min"], 1) nt.assert_equal(d["max"], 3) nt.assert_equal(d["type"], "uniform")
def test_fit_results_dict_uniform(): a = st.FitResults(params=[1, 2], errs=[3, 4], type="uniform") d = st.fit_results_to_dict(a) nt.assert_equal(d["min"], 1) nt.assert_equal(d["max"], 3) nt.assert_equal(d["type"], "uniform")
def test_fit_results_dict_exponential(): a = st.FitResults(params=[2, 2], errs=[3, 4], type='expon') d = st.fit_results_to_dict(a) assert d['lambda'] == 1. / 2 assert d['type'] == 'exponential'
def test_fit_results_dict_normal(): a = st.FitResults(params=[1, 2], errs=[3, 4], type='norm') d = st.fit_results_to_dict(a) assert d['mu'] == 1 assert d['sigma'] == 2 assert d['type'] == 'normal'
def test_fit_results_dict_exponential(): a = st.FitResults(params=[2, 2], errs=[3, 4], type='expon') d = st.fit_results_to_dict(a) nt.assert_equal(d['lambda'], 1. / 2) nt.assert_equal(d['type'], 'exponential')
def test_fit_results_dict_uniform(): a = st.FitResults(params=[1, 2], errs=[3,4], type='uniform') d = st.fit_results_to_dict(a) nt.assert_equal(d['min'], 1) nt.assert_equal(d['max'], 3) nt.assert_equal(d['type'], 'uniform')
def test_fit_results_dict_normal(): a = st.FitResults(params=[1, 2], errs=[3, 4], type="norm") d = st.fit_results_to_dict(a) nt.assert_equal(d["mu"], 1) nt.assert_equal(d["sigma"], 2) nt.assert_equal(d["type"], "normal")
def test_fit_results_dict_uniform(): a = st.FitResults(params=[1, 2], errs=[3, 4], type='uniform') d = st.fit_results_to_dict(a) nt.assert_equal(d['min'], 1) nt.assert_equal(d['max'], 3) nt.assert_equal(d['type'], 'uniform')
def test_fit_results_dict_uniform_min_max(): a = st.FitResults(params=[1, 2], errs=[3, 4], type='uniform') d = st.fit_results_to_dict(a, min_bound=-100, max_bound=100) assert d['min'] == 1 assert d['max'] == 3 assert d['type'] == 'uniform'
def test_fit_results_dict_normal(): a = st.FitResults(params=[1, 2], errs=[3, 4], type='norm') d = st.fit_results_to_dict(a) nt.assert_equal(d['mu'], 1) nt.assert_equal(d['sigma'], 2) nt.assert_equal(d['type'], 'normal')
def test_fit_results_dict_uniform(): a = st.FitResults(params=[1, 2], errs=[3, 4], type='uniform') d = st.fit_results_to_dict(a) assert d['min'] == 1 assert d['max'] == 3 assert d['type'] == 'uniform'
def test_fit_results_dict_exponential(): a = st.FitResults(params=[2, 2], errs=[3, 4], type="expon") d = st.fit_results_to_dict(a) nt.assert_equal(d["lambda"], 1.0 / 2) nt.assert_equal(d["type"], "exponential")
def test_fit_results_dict_uniform_min_max(): a = st.FitResults(params=[1, 2], errs=[3, 4], type='uniform') d = st.fit_results_to_dict(a, min_bound=-100, max_bound=100) nt.assert_equal(d['min'], 1) nt.assert_equal(d['max'], 3) nt.assert_equal(d['type'], 'uniform')