示例#1
0
def test_fit_normal_dict():
    fit_ = st.fit(NORMAL, 'norm')
    d = st.fit_results_to_dict(fit_, min_bound=-123, max_bound=123)
    nt.assert_almost_equal(d['mu'], NORMAL_MU, 1)
    nt.assert_almost_equal(d['sigma'], NORMAL_SIGMA, 1)
    nt.assert_almost_equal(d['min'], -123, 1)
    nt.assert_almost_equal(d['max'], 123, 1)
示例#2
0
def transform_package(mtype, files, components):
    '''Put together header and list of data into one json output.
       feature_list contains all the information about the data to be extracted:
       features, feature_names, feature_components, feature_min, feature_max
    '''
    data_dict = transform_header(mtype)
    neurons = load_neurons(files)

    for comp in components:
        params = PARAM_MAP[comp.name]
        for feature in comp.features:
            result = stats.fit_results_to_dict(
                extract_data(neurons, feature.name, params),
                feature.limits.min, feature.limits.max)

            # When the distribution is normal with sigma = 0
            # it will be replaced with constant
            if result['type'] == 'normal' and result['sigma'] == 0.0:
                replace_result = OrderedDict(
                    (('type', 'constant'), ('val', result['mu'])))
                result = replace_result

            data_dict["components"][comp.name].update({feature.name: result})

    return data_dict
示例#3
0
def test_fit_results_dict_exponential_min_max():
    a = st.FitResults(params=[2, 2], errs=[3, 4], type="expon")
    d = st.fit_results_to_dict(a, min_bound=-100, max_bound=100)
    nt.assert_equal(d["lambda"], 1.0 / 2)
    nt.assert_equal(d["min"], -100)
    nt.assert_equal(d["max"], 100)
    nt.assert_equal(d["type"], "exponential")
示例#4
0
def test_fit_results_dict_exponential_min_max():
    a = st.FitResults(params=[2, 2], errs=[3, 4], type='expon')
    d = st.fit_results_to_dict(a, min_bound=-100, max_bound=100)
    assert d['lambda'] == 1. / 2
    assert d['min'] == -100
    assert d['max'] == 100
    assert d['type'] == 'exponential'
示例#5
0
def test_fit_normal_dict():
    fit_ = st.fit(NORMAL, "norm")
    d = st.fit_results_to_dict(fit_, min_bound=-123, max_bound=123)
    nt.assert_almost_equal(d["mu"], NORMAL_MU, 1)
    nt.assert_almost_equal(d["sigma"], NORMAL_SIGMA, 1)
    nt.assert_almost_equal(d["min"], -123, 1)
    nt.assert_almost_equal(d["max"], 123, 1)
示例#6
0
def transform_package(mtype, files, components):
    '''Put together header and list of data into one json output.
       feature_list contains all the information about the data to be extracted:
       features, feature_names, feature_components, feature_min, feature_max
    '''
    data_dict = transform_header(mtype)
    neurons = load_neurons(files)

    for comp in components:
        params = PARAM_MAP[comp.name]
        for feature in comp.features:
            result = stats.fit_results_to_dict(
                extract_data(neurons, feature.name, params),
                feature.limits.min, feature.limits.max
            )

            # When the distribution is normal with sigma = 0
            # it will be replaced with constant
            if result['type'] == 'normal' and result['sigma'] == 0.0:
                replace_result = OrderedDict((('type', 'constant'),
                                              ('val', result['mu'])))
                result = replace_result

            data_dict["components"][comp.name].update({feature.name: result})

    return data_dict
示例#7
0
def test_fit_results_dict_exponential_min_max():
    a = st.FitResults(params=[2, 2], errs=[3,4], type='expon')
    d = st.fit_results_to_dict(a, min_bound=-100, max_bound=100)
    nt.assert_equal(d['lambda'], 1./2)
    nt.assert_equal(d['min'], -100)
    nt.assert_equal(d['max'], 100)
    nt.assert_equal(d['type'], 'exponential')
示例#8
0
def test_fit_results_dict_exponential_min_max():
    a = st.FitResults(params=[2, 2], errs=[3, 4], type='expon')
    d = st.fit_results_to_dict(a, min_bound=-100, max_bound=100)
    nt.assert_equal(d['lambda'], 1. / 2)
    nt.assert_equal(d['min'], -100)
    nt.assert_equal(d['max'], 100)
    nt.assert_equal(d['type'], 'exponential')
示例#9
0
def test_fit_normal_dict():
    fit_ = st.fit(NORMAL, 'norm')
    d = st.fit_results_to_dict(fit_, min_bound=-123, max_bound=123)
    assert_almost_equal(d['mu'], NORMAL_MU, 1)
    assert_almost_equal(d['sigma'], NORMAL_SIGMA, 1)
    assert_almost_equal(d['min'], -123, 1)
    assert_almost_equal(d['max'], 123, 1)
示例#10
0
def test_fit_results_dict_normal_min_max():
    a = st.FitResults(params=[1, 2], errs=[3, 4], type='norm')
    d = st.fit_results_to_dict(a, min_bound=-100, max_bound=100)
    assert d['mu'] == 1
    assert d['sigma'] == 2
    assert d['min'] == -100
    assert d['max'] == 100
    assert d['type'] == 'normal'
示例#11
0
def test_fit_results_dict_normal_min_max():
    a = st.FitResults(params=[1, 2], errs=[3, 4], type="norm")
    d = st.fit_results_to_dict(a, min_bound=-100, max_bound=100)
    nt.assert_equal(d["mu"], 1)
    nt.assert_equal(d["sigma"], 2)
    nt.assert_equal(d["min"], -100)
    nt.assert_equal(d["max"], 100)
    nt.assert_equal(d["type"], "normal")
示例#12
0
def test_fit_results_dict_normal_min_max():
    a = st.FitResults(params=[1, 2], errs=[3, 4], type='norm')
    d = st.fit_results_to_dict(a, min_bound=-100, max_bound=100)
    nt.assert_equal(d['mu'], 1)
    nt.assert_equal(d['sigma'], 2)
    nt.assert_equal(d['min'], -100)
    nt.assert_equal(d['max'], 100)
    nt.assert_equal(d['type'], 'normal')
示例#13
0
def test_fit_results_dict_normal_min_max():
    a = st.FitResults(params=[1, 2], errs=[3,4], type='norm')
    d = st.fit_results_to_dict(a, min_bound=-100, max_bound=100)
    nt.assert_equal(d['mu'], 1)
    nt.assert_equal(d['sigma'], 2)
    nt.assert_equal(d['min'], -100)
    nt.assert_equal(d['max'], 100)
    nt.assert_equal(d['type'], 'normal')
示例#14
0
def transform_package(mtype, files, components, feature_list):
    '''Put together header and list of data into one json output.
       feature_list contains all the information about the data to be extracted:
       features, feature_names, feature_components, feature_min, feature_max
    '''
    data_dict = transform_header(mtype, components)

    for feature, name, comp, fmin, fmax, fparam in feature_list:

        result = stats.fit_results_to_dict(extract_data(files, feature, fparam),
                                           fmin, fmax)

        data_dict["components"][comp] = {name: result}

    return data_dict
示例#15
0
def test_fit_results_dict_exponential():
    a = st.FitResults(params=[2, 2], errs=[3,4], type='expon')
    d = st.fit_results_to_dict(a)
    nt.assert_equal(d['lambda'], 1./2)
    nt.assert_equal(d['type'], 'exponential')
示例#16
0
def test_fit_results_dict_normal():
    a = st.FitResults(params=[1, 2], errs=[3,4], type='norm')
    d = st.fit_results_to_dict(a)
    nt.assert_equal(d['mu'], 1)
    nt.assert_equal(d['sigma'], 2)
    nt.assert_equal(d['type'], 'normal')
示例#17
0
def test_fit_results_dict_uniform_min_max():
    a = st.FitResults(params=[1, 2], errs=[3,4], type='uniform')
    d = st.fit_results_to_dict(a, min_bound=-100, max_bound=100)
    nt.assert_equal(d['min'], 1)
    nt.assert_equal(d['max'], 3)
    nt.assert_equal(d['type'], 'uniform')
示例#18
0
    parser.add_argument('feature',
                        help='Feature to be extracted with neurom.get')

    return parser.parse_args()


def extract_data(data_path, feature):
    '''Loads a list of neurons, extracts feature
       and transforms the fitted distribution in the correct format.
       Returns the optimal distribution, corresponding parameters,
       minimun and maximum values.
    '''
    population = nm.load_neurons(data_path)

    feature_data = [nm.get(feature, n) for n in population]
    feature_data = list(chain(*feature_data))

    return stats.optimal_distribution(feature_data)


if __name__ == '__main__':
    args = parse_args()

    d_path = args.datapath

    feat = args.feature

    _result = stats.fit_results_to_dict(extract_data(d_path, feat))

    print(json.dumps(_result, indent=2, separators=(',', ': ')))
示例#19
0
    parser.add_argument('feature',
                        help='Feature to be extracted with neurom.get')

    return parser.parse_args()


def extract_data(data_path, feature):
    '''Loads a list of neurons, extracts feature
       and transforms the fitted distribution in the correct format.
       Returns the optimal distribution, corresponding parameters,
       minimun and maximum values.
    '''
    population = nm.load_neurons(data_path)

    feature_data = [nm.get(feature, n) for n in population]
    feature_data = list(chain(*feature_data))

    return stats.optimal_distribution(feature_data)


if __name__ == '__main__':
    args = parse_args()

    d_path = args.datapath

    feat = args.feature

    _result = stats.fit_results_to_dict(extract_data(d_path, feat))

    print json.dumps(_result, indent=2, separators=(',', ': '))
示例#20
0
def test_fit_results_dict_uniform_min_max():
    a = st.FitResults(params=[1, 2], errs=[3, 4], type="uniform")
    d = st.fit_results_to_dict(a, min_bound=-100, max_bound=100)
    nt.assert_equal(d["min"], 1)
    nt.assert_equal(d["max"], 3)
    nt.assert_equal(d["type"], "uniform")
示例#21
0
def test_fit_results_dict_uniform():
    a = st.FitResults(params=[1, 2], errs=[3, 4], type="uniform")
    d = st.fit_results_to_dict(a)
    nt.assert_equal(d["min"], 1)
    nt.assert_equal(d["max"], 3)
    nt.assert_equal(d["type"], "uniform")
示例#22
0
def test_fit_results_dict_exponential():
    a = st.FitResults(params=[2, 2], errs=[3, 4], type='expon')
    d = st.fit_results_to_dict(a)
    assert d['lambda'] == 1. / 2
    assert d['type'] == 'exponential'
示例#23
0
def test_fit_results_dict_normal():
    a = st.FitResults(params=[1, 2], errs=[3, 4], type='norm')
    d = st.fit_results_to_dict(a)
    assert d['mu'] == 1
    assert d['sigma'] == 2
    assert d['type'] == 'normal'
示例#24
0
def test_fit_results_dict_exponential():
    a = st.FitResults(params=[2, 2], errs=[3, 4], type='expon')
    d = st.fit_results_to_dict(a)
    nt.assert_equal(d['lambda'], 1. / 2)
    nt.assert_equal(d['type'], 'exponential')
示例#25
0
def test_fit_results_dict_uniform():
    a = st.FitResults(params=[1, 2], errs=[3,4], type='uniform')
    d = st.fit_results_to_dict(a)
    nt.assert_equal(d['min'], 1)
    nt.assert_equal(d['max'], 3)
    nt.assert_equal(d['type'], 'uniform')
示例#26
0
def test_fit_results_dict_normal():
    a = st.FitResults(params=[1, 2], errs=[3, 4], type="norm")
    d = st.fit_results_to_dict(a)
    nt.assert_equal(d["mu"], 1)
    nt.assert_equal(d["sigma"], 2)
    nt.assert_equal(d["type"], "normal")
示例#27
0
def test_fit_results_dict_uniform():
    a = st.FitResults(params=[1, 2], errs=[3, 4], type='uniform')
    d = st.fit_results_to_dict(a)
    nt.assert_equal(d['min'], 1)
    nt.assert_equal(d['max'], 3)
    nt.assert_equal(d['type'], 'uniform')
示例#28
0
def test_fit_results_dict_uniform_min_max():
    a = st.FitResults(params=[1, 2], errs=[3, 4], type='uniform')
    d = st.fit_results_to_dict(a, min_bound=-100, max_bound=100)
    assert d['min'] == 1
    assert d['max'] == 3
    assert d['type'] == 'uniform'
示例#29
0
def test_fit_results_dict_normal():
    a = st.FitResults(params=[1, 2], errs=[3, 4], type='norm')
    d = st.fit_results_to_dict(a)
    nt.assert_equal(d['mu'], 1)
    nt.assert_equal(d['sigma'], 2)
    nt.assert_equal(d['type'], 'normal')
示例#30
0
def test_fit_results_dict_uniform():
    a = st.FitResults(params=[1, 2], errs=[3, 4], type='uniform')
    d = st.fit_results_to_dict(a)
    assert d['min'] == 1
    assert d['max'] == 3
    assert d['type'] == 'uniform'
示例#31
0
def test_fit_results_dict_exponential():
    a = st.FitResults(params=[2, 2], errs=[3, 4], type="expon")
    d = st.fit_results_to_dict(a)
    nt.assert_equal(d["lambda"], 1.0 / 2)
    nt.assert_equal(d["type"], "exponential")
示例#32
0
def test_fit_results_dict_uniform_min_max():
    a = st.FitResults(params=[1, 2], errs=[3, 4], type='uniform')
    d = st.fit_results_to_dict(a, min_bound=-100, max_bound=100)
    nt.assert_equal(d['min'], 1)
    nt.assert_equal(d['max'], 3)
    nt.assert_equal(d['type'], 'uniform')