예제 #1
0
def ap_loguniform_sampler(obs, prior_weight, low, high,
        size=(), rng=None):
    prior_mu = 0.5 * (high + low)
    prior_sigma = 1.0 * (high - low)
    weights, mus, sigmas = scope.adaptive_parzen_normal(
            scope.log(obs), prior_weight, prior_mu, prior_sigma)
    rval = scope.LGMM1(weights, mus, sigmas, low=low, high=high,
            size=size, rng=rng)
    return rval
예제 #2
0
파일: test_tpe.py 프로젝트: wqren/hyperopt
def many_dists():
    a = hp_choice('a', [0, 1, 2])
    b = hp_randint('b', 10)
    c = hp_uniform('c', 4, 7)
    d = hp_loguniform('d', -2, 0)
    e = hp_quniform('e', 0, 10, 3)
    f = hp_qloguniform('f', 0, 3, 2)
    g = hp_normal('g', 4, 7)
    h = hp_lognormal('h', -2, 2)
    i = hp_qnormal('i', 0, 10, 2)
    j = hp_qlognormal('j', 0, 2, 1)
    z = a + b + c + d + e + f + g + h + i + j
    return {'loss': scope.float(scope.log(1e-12 + z**2))}
예제 #3
0
파일: test_tpe.py 프로젝트: ardila/hyperopt
def many_dists():
    a=hp_choice('a', [0, 1, 2])
    b=hp_randint('b', 10)
    c=hp_uniform('c', 4, 7)
    d=hp_loguniform('d', -2, 0)
    e=hp_quniform('e', 0, 10, 3)
    f=hp_qloguniform('f', 0, 3, 2)
    g=hp_normal('g', 4, 7)
    h=hp_lognormal('h', -2, 2)
    i=hp_qnormal('i', 0, 10, 2)
    j=hp_qlognormal('j', 0, 2, 1)
    z = a + b + c + d + e + f + g + h + i + j
    return {'loss': scope.float(scope.log(1e-12 + z ** 2))}
예제 #4
0
파일: tpe.py 프로젝트: npinto/hyperopt
def ap_qloguniform_sampler(obs, prior_weight, low, high, q, size=(), rng=None):
    prior_mu = 0.5 * (high + low)
    prior_sigma = 1.0 * (high - low)
    weights, mus, sigmas = scope.adaptive_parzen_normal(
        scope.log(
            # -- map observations that were quantized to be below exp(low)
            #    (particularly 0) back up to exp(low) where they will
            #    interact in a reasonable way with the AdaptiveParzen
            #    thing.
            scope.maximum(obs, scope.maximum(EPS, scope.exp(low)))  # -- protect against exp(low) underflow
        ),
        prior_weight,
        prior_mu,
        prior_sigma,
    )
    return scope.LGMM1(weights, mus, sigmas, low, high, q=q, size=size, rng=rng)
예제 #5
0
def many_dists():
    a = hp_choice('a', [0, 1, 2])
    b = hp_randint('b', 10)
    c = hp_uniform('c', 4, 7)
    d = hp_loguniform('d', -2, 0)
    e = hp_quniform('e', 0, 10, 3)
    f = hp_qloguniform('f', 0, 3, 2)
    g = hp_normal('g', 4, 7)
    h = hp_lognormal('h', -2, 2)
    i = hp_qnormal('i', 0, 10, 2)
    j = hp_qlognormal('j', 0, 2, 1)
    k = hp_pchoice('k', [(.1, 0), (.9, 1)])
    z = a + b + c + d + e + f + g + h + i + j + k
    return {
        'loss': scope.float(scope.log(1e-12 + z**2)),
        'status': base.STATUS_OK
    }
예제 #6
0
def ap_qloguniform_sampler(obs, prior_weight, low, high, q,
        size=(), rng=None):
    prior_mu = 0.5 * (high + low)
    prior_sigma = 1.0 * (high - low)
    weights, mus, sigmas = scope.adaptive_parzen_normal(
            scope.log(
                # -- map observations that were quantized to be below exp(low)
                #    (particularly 0) back up to exp(low) where they will
                #    interact in a reasonable way with the AdaptiveParzen
                #    thing.
                scope.maximum(
                    obs,
                    scope.maximum(  # -- protect against exp(low) underflow
                        EPS,
                        scope.exp(low)))),
            prior_weight, prior_mu, prior_sigma)
    return scope.LGMM1(weights, mus, sigmas, low, high, q=q,
            size=size, rng=rng)
예제 #7
0
def ap_qlognormal_sampler(obs, prior_weight, mu, sigma, q, size=(), rng=None):
    log_obs = scope.log(scope.maximum(obs, EPS))
    weights, mus, sigmas = scope.adaptive_parzen_normal(
            log_obs, prior_weight, mu, sigma)
    rval = scope.LGMM1(weights, mus, sigmas, q=q, size=size, rng=rng)
    return rval
예제 #8
0
def ap_loglognormal_sampler(obs, prior_weight, mu, sigma, size=(), rng=None):
    weights, mus, sigmas = scope.adaptive_parzen_normal(
            scope.log(obs), prior_weight, mu, sigma)
    rval = scope.LGMM1(weights, mus, sigmas, size=size, rng=rng)
    return rval