Exemplo n.º 1
0
Arquivo: logreg.py Projeto: osdf/utils
def demo_mnist(opt, epochs=10, btsz=100,
        lr = 0.1, beta = 0.9,
        eta0 = 0.0005, mu=0.02, lmbd=0.99,
        w=None):
    """
    """
    from misc import load_mnist
    from losses import zero_one
    from opt import msgd, smd
    #
    trainset, valset, testset = load_mnist()
    inputs, targets = trainset
    test_in, test_tar = testset
    #
    di = inputs.shape[1]
    dt = np.max(targets) + 1
    # setup weights
    if w is None:
        if opt is smd:
            # needs complex initialization
            weights = np.zeros((di*dt+dt), dtype=np.complex)
            weights[:] = 0.001 * np.random.randn(di*dt+dt)
        else:
            weights = 0.* np.random.randn(di*dt+dt)
        weights[-dt:] = 0.
    else:
        print "Continue with provided weights w."
        weights = w
    #
    print "Training starts..."
    params = dict()
    params["x0"] = weights
    if opt is msgd or opt is smd:
        params["fandprime"] = score_grad_xe
        params["nos"] = inputs.shape[0]
        params["args"] = {}
        params["batch_args"] = {"inputs": inputs, "targets": targets}
        params["epochs"] = epochs
        params["btsz"] = btsz
        # msgd
        params["beta"] = beta
        params["lr"] = lr 
        # smd
        params["eta0"] = eta0
        params["mu"] = mu
        params["lmbd"] = lmbd
        params["verbose"] = True
    else:
        # opt from scipy
        params["func"] = score_xe
        params["fprime"] = grad_xe
        params["args"] = (inputs, targets)
        params["maxfun"] = epochs
        params["m"] = 50
    weights = opt(**params)[0]
    print "Training done."
    #
    print "Test set preformance:",\
            zero_one(predict(weights, test_in), test_tar)
    return weights
Exemplo n.º 2
0
def demo_mnist(opt,
               epochs=10,
               btsz=100,
               lr=0.1,
               beta=0.9,
               eta0=0.0005,
               mu=0.02,
               lmbd=0.99,
               w=None):
    """
    """
    from misc import load_mnist
    from losses import zero_one
    from opt import msgd, smd
    #
    trainset, valset, testset = load_mnist()
    inputs, targets = trainset
    test_in, test_tar = testset
    #
    di = inputs.shape[1]
    dt = np.max(targets) + 1
    # setup weights
    if w is None:
        if opt is smd:
            # needs complex initialization
            weights = np.zeros((di * dt + dt), dtype=np.complex)
            weights[:] = 0.001 * np.random.randn(di * dt + dt)
        else:
            weights = 0. * np.random.randn(di * dt + dt)
        weights[-dt:] = 0.
    else:
        print "Continue with provided weights w."
        weights = w
    #
    print "Training starts..."
    params = dict()
    params["x0"] = weights
    if opt is msgd or opt is smd:
        params["fandprime"] = score_grad_xe
        params["nos"] = inputs.shape[0]
        params["args"] = {}
        params["batch_args"] = {"inputs": inputs, "targets": targets}
        params["epochs"] = epochs
        params["btsz"] = btsz
        # msgd
        params["beta"] = beta
        params["lr"] = lr
        # smd
        params["eta0"] = eta0
        params["mu"] = mu
        params["lmbd"] = lmbd
        params["verbose"] = True
    else:
        # opt from scipy
        params["func"] = score_xe
        params["fprime"] = grad_xe
        params["args"] = (inputs, targets)
        params["maxfun"] = epochs
        params["m"] = 50
    weights = opt(**params)[0]
    print "Training done."
    #
    print "Test set preformance:",\
            zero_one(predict(weights, test_in), test_tar)
    return weights
Exemplo n.º 3
0
Arquivo: lafnn.py Projeto: osdf/utils
def demo_mnist(hiddens, opt, l2=1e-6, epochs=10, 
        lr=1e-4, beta=0., btsz=128, eta0 = 0.0005, 
        mu=0.02, lmbd=0.99, weightvar=0.01, 
        w=None):
    """
    """
    from misc import sigmoid, load_mnist
    from losses import xe, zero_one
    from opt import msgd, smd 
    #
    trainset, valset, testset = load_mnist()
    inputs, targets = trainset
    test_in, test_tar = testset
    di = inputs.shape[1]
    dt = np.max(targets) + 1
    structure = {}
    structure["hdim"] = hiddens
    structure["odim"] = dt
    structure["af"] = np.tanh
    structure["score"] = xe
    structure["l2"] = l2
    # get weight initialized
    if w is None:
        weights = np.zeros(di*hiddens + hiddens + hiddens*dt + dt)
        weights[:hiddens*di] = 0.001 * np.random.randn(di*hiddens)
        weights[hiddens*(di+1):-dt] = 0.001 * np.random.randn(hiddens*dt)
        if opt is smd:
            # needs complex weights
            weights = np.asarray(weights, dtype=np.complex)
    else:
        print "Continue with provided weights w."
        weights = w
    #
    print "Training starts..."
    params = dict()
    params["x0"] = weights
    params["fandprime"] = score_grad
    if opt is msgd or opt is smd:
        params["nos"] = inputs.shape[0]
        params["args"] = {"structure": structure}
        params["batch_args"] = {"inputs": inputs, "targets": targets}
        params["epochs"] = epochs
        params["btsz"] = btsz
        params["verbose"] = True
        # for msgd
        params["lr"] = lr
        params["beta"] = beta
        # for smd
        params["eta0"] = eta0
        params["mu"] = mu
        params["lmbd"] = lmbd
    else:
        params["args"] = (structure, inputs, targets)
        params["maxfun"] = epochs
        # for lbfgs
        params["m"] = 25
    
    weights = opt(**params)[0]
    print "Training done."
    
    # Evaluate on test set
    test_perf = zero_one(predict(weights, structure, test_in), test_tar)
    print "Test set performance:", test_perf
    return weights
Exemplo n.º 4
0
def demo_mnist(hiddens,
               opt,
               l2=1e-6,
               epochs=10,
               lr=1e-4,
               beta=0.,
               btsz=128,
               eta0=0.0005,
               mu=0.02,
               lmbd=0.99,
               weightvar=0.01,
               w=None):
    """
    """
    from misc import sigmoid, load_mnist
    from losses import xe, zero_one
    from opt import msgd, smd
    #
    trainset, valset, testset = load_mnist()
    inputs, targets = trainset
    test_in, test_tar = testset
    di = inputs.shape[1]
    dt = np.max(targets) + 1
    structure = {}
    structure["hdim"] = hiddens
    structure["odim"] = dt
    structure["af"] = np.tanh
    structure["score"] = xe
    structure["l2"] = l2
    # get weight initialized
    if w is None:
        weights = np.zeros(di * hiddens + hiddens + hiddens * dt + dt)
        weights[:hiddens * di] = 0.001 * np.random.randn(di * hiddens)
        weights[hiddens * (di + 1):-dt] = 0.001 * np.random.randn(hiddens * dt)
        if opt is smd:
            # needs complex weights
            weights = np.asarray(weights, dtype=np.complex)
    else:
        print "Continue with provided weights w."
        weights = w
    #
    print "Training starts..."
    params = dict()
    params["x0"] = weights
    params["fandprime"] = score_grad
    if opt is msgd or opt is smd:
        params["nos"] = inputs.shape[0]
        params["args"] = {"structure": structure}
        params["batch_args"] = {"inputs": inputs, "targets": targets}
        params["epochs"] = epochs
        params["btsz"] = btsz
        params["verbose"] = True
        # for msgd
        params["lr"] = lr
        params["beta"] = beta
        # for smd
        params["eta0"] = eta0
        params["mu"] = mu
        params["lmbd"] = lmbd
    else:
        params["args"] = (structure, inputs, targets)
        params["maxfun"] = epochs
        # for lbfgs
        params["m"] = 25

    weights = opt(**params)[0]
    print "Training done."

    # Evaluate on test set
    test_perf = zero_one(predict(weights, structure, test_in), test_tar)
    print "Test set performance:", test_perf
    return weights