Beispiel #1
0
def trainCG(distInit, accumulate, ps = d.getDefaultParamSpec(), length = -50, verbosity = 0):
    """Re-estimates a distribution using a conjugate gradient optimizer.

    See the note in the docstring for this module for information on how the
    log likelihood is scaled. This scaling is presumed to have only a small
    impact on the dist returned by this function.
    """
    # (FIXME : investigate how large the effect of the scale factor is for
    #   a few example dists?)
    def negLogLike_derivParams(params):
        dist = ps.parseAll(distInit, params)
        acc = ps.createAccG(dist)
        accumulate(acc)
        count = acc.count()
        count = max(count, 1.0)
        return -acc.logLike() / count, -ps.derivParams(acc) / count

    params = ps.params(distInit)
    if verbosity >= 2:
        print 'trainCG: initial params =', params
        print 'trainCG: initial derivParams =', -negLogLike_derivParams(params)[1]
    params, negLogLikes, lengthUsed = minimize(negLogLike_derivParams, params, length = length, verbosity = verbosity)
    if verbosity >= 3:
        print 'trainCG: logLikes =', map(lambda x: -x, negLogLikes)
    if verbosity >= 2:
        print 'trainCG: final params =', params
        print 'trainCG: final derivParams =', -negLogLike_derivParams(params)[1]
    if verbosity >= 1:
        print 'trainCG: logLike %s -> %s (delta = %s)' % (-negLogLikes[0], -negLogLikes[-1], negLogLikes[0] - negLogLikes[-1])
        print 'trainCG: (used', lengthUsed, 'function evaluations)'
    dist = ps.parseAll(distInit, params)

    return dist
Beispiel #2
0
def trainCGandEM(distInit, accumulate, ps = d.getDefaultParamSpec(), createAccEM = d.getDefaultCreateAcc(), estimateTotAux = d.getDefaultEstimateTotAux(), iterations = 5, length = -50, afterEst = None, verbosity = 0):
    """Re-estimates a distribution using conjugate gradients and EM.

    See the note in the docstring for this module for information on how the
    log likelihood is scaled. This scaling is presumed to have only a small
    impact on the dist returned by this function (via its impact on trainCG).
    """
    assert iterations >= 1

    dist = distInit
    for it in range(1, iterations + 1):
        if verbosity >= 1:
            print 'trainCGandEM: starting it =', it, 'of CG and EM'

        dist = (timed(trainCG) if verbosity >= 2 else trainCG)(dist, accumulate, ps = ps, length = length, verbosity = verbosity)

        dist, _, _, _ = expectationMaximization(dist, accumulate, createAcc = createAccEM, estimateTotAux = estimateTotAux, verbosity = verbosity)

        if afterEst is not None:
            afterEst(dist = dist, it = it)

        if verbosity >= 1:
            print 'trainCGandEM: finished it =', it, 'of CG and EM'
            print 'trainCGandEM:'

    return dist
Beispiel #3
0
def trainCG(distInit,
            accumulate,
            ps=d.getDefaultParamSpec(),
            length=-50,
            verbosity=0):
    """Re-estimates a distribution using a conjugate gradient optimizer.

    See the note in the docstring for this module for information on how the
    log likelihood is scaled. This scaling is presumed to have only a small
    impact on the dist returned by this function.
    """

    # (FIXME : investigate how large the effect of the scale factor is for
    #   a few example dists?)
    def negLogLike_derivParams(params):
        dist = ps.parseAll(distInit, params)
        acc = ps.createAccG(dist)
        accumulate(acc)
        count = acc.count()
        count = max(count, 1.0)
        return -acc.logLike() / count, -ps.derivParams(acc) / count

    params = ps.params(distInit)
    if verbosity >= 2:
        print 'trainCG: initial params =', params
        print 'trainCG: initial derivParams =', -negLogLike_derivParams(
            params)[1]
    params, negLogLikes, lengthUsed = minimize(negLogLike_derivParams,
                                               params,
                                               length=length,
                                               verbosity=verbosity)
    if verbosity >= 3:
        print 'trainCG: logLikes =', map(lambda x: -x, negLogLikes)
    if verbosity >= 2:
        print 'trainCG: final params =', params
        print 'trainCG: final derivParams =', -negLogLike_derivParams(
            params)[1]
    if verbosity >= 1:
        print 'trainCG: logLike %s -> %s (delta = %s)' % (
            -negLogLikes[0], -negLogLikes[-1],
            negLogLikes[0] - negLogLikes[-1])
        print 'trainCG: (used', lengthUsed, 'function evaluations)'
    dist = ps.parseAll(distInit, params)

    return dist
Beispiel #4
0
def trainCGandEM(distInit,
                 accumulate,
                 ps=d.getDefaultParamSpec(),
                 createAccEM=d.getDefaultCreateAcc(),
                 estimateTotAux=d.getDefaultEstimateTotAux(),
                 iterations=5,
                 length=-50,
                 afterEst=None,
                 verbosity=0):
    """Re-estimates a distribution using conjugate gradients and EM.

    See the note in the docstring for this module for information on how the
    log likelihood is scaled. This scaling is presumed to have only a small
    impact on the dist returned by this function (via its impact on trainCG).
    """
    assert iterations >= 1

    dist = distInit
    for it in range(1, iterations + 1):
        if verbosity >= 1:
            print 'trainCGandEM: starting it =', it, 'of CG and EM'

        dist = (timed(trainCG) if verbosity >= 2 else trainCG)(
            dist, accumulate, ps=ps, length=length, verbosity=verbosity)

        dist, _, _, _ = expectationMaximization(dist,
                                                accumulate,
                                                createAcc=createAccEM,
                                                estimateTotAux=estimateTotAux,
                                                verbosity=verbosity)

        if afterEst is not None:
            afterEst(dist=dist, it=it)

        if verbosity >= 1:
            print 'trainCGandEM: finished it =', it, 'of CG and EM'
            print 'trainCGandEM:'

    return dist
Beispiel #5
0
 def __init__(self, mdlFactor, paramSpec=d.getDefaultParamSpec()):
     self.mdlFactor = mdlFactor
     self.paramSpec = paramSpec
Beispiel #6
0
 def __init__(self, mdlFactor, paramSpec = d.getDefaultParamSpec()):
     self.mdlFactor = mdlFactor
     self.paramSpec = paramSpec