Example #1
0
def l2sgd(model,
          instances,
          nr_epoth,
          init_learning_rate,
          adjust_learning_rate = False):

    _sigma = 1.
    _gamma = init_learning_rate
    _t = 1.

    _eta = 0.
    samples = random.sample(instances, min(int(len(instances) * 0.1), 1000))

    for epoth in xrange(nr_epoth):
        LOG(INFO, "Training epoth [%d]" % epoth)
        # randomly shuffle the training instances
        random.shuffle(instances)

        # loop over the training instances
        for index, instance in enumerate(instances):
            # first need to clear the cache
            build_instance(model.w, model.attrs, model.tags, instance)

            for k, v in expectation(model, instance).iteritems():
                model.w[k] -= v * _gamma
            for k, v in instance.correct_features.iteritems():
                model.w[k] += v * _gamma

            # re-calculate the scale
            _gamma = init_learning_rate / (1 + sqrt(float(epoth)))

            _t += 1.

            if (index + 1) % 1000 == 0:
                LOG(INFO, "%d instances is trained" % (index + 1))

            destroy_instance(instance)

        LOG(INFO, "%d instances is trained" % (index + 1))
        LOG(INFO, "Parameters norm %f" % norm(model.w))
Example #2
0
def l2sgd(model,
          instances,
          nr_epoth,
          init_learning_rate,
          adjust_learning_rate=False):

    _sigma = 1.
    _gamma = init_learning_rate
    _t = 1.

    _eta = 0.
    samples = random.sample(instances, min(int(len(instances) * 0.1), 1000))

    for epoth in xrange(nr_epoth):
        LOG(INFO, "Training epoth [%d]" % epoth)
        # randomly shuffle the training instances
        random.shuffle(instances)

        # loop over the training instances
        for index, instance in enumerate(instances):
            # first need to clear the cache
            build_instance(model.w, model.attrs, model.tags, instance)

            for k, v in expectation(model, instance).iteritems():
                model.w[k] -= v * _gamma
            for k, v in instance.correct_features.iteritems():
                model.w[k] += v * _gamma

            # re-calculate the scale
            _gamma = init_learning_rate / (1 + sqrt(float(epoth)))

            _t += 1.

            if (index + 1) % 1000 == 0:
                LOG(INFO, "%d instances is trained" % (index + 1))

            destroy_instance(instance)

        LOG(INFO, "%d instances is trained" % (index + 1))
        LOG(INFO, "Parameters norm %f" % norm(model.w))
Example #3
0
def viterbi(model, instance):
    '''
    '''
    L = len(instance)
    T = model.nr_tags
    A = model.nr_attrs

    build_instance(model.attrs, model.tags, instance, False)
    g0, g = build_score_cache(model.w, L, T, A, instance)
    destroy_instance(instance)

    s, p = argmax(g0, g, L, T)

    v, i = s[L - 1].argmax(), L - 1

    ret = []
    while i >= 0:
        ret.append(v)
        v = p[i][v]
        i -= 1

    ret.reverse()
    return ret
Example #4
0
def viterbi(model, instance):
    '''
    '''
    L = len(instance)
    T = model.nr_tags
    A = model.nr_attrs

    build_instance(model.attrs, model.tags, instance, False)
    g0, g = build_score_cache(model.w, L, T, A, instance)
    destroy_instance(instance)

    s, p = argmax(g0, g, L, T)

    v, i = s[L -1].argmax(), L -1

    ret = []
    while i >= 0:
        ret.append(v)
        v = p[i][v]
        i -= 1

    ret.reverse()
    return ret