Exemple #1
0
def main():
    # -- top-level parameters of this script
    dtype = 'float32'  # XXX
    n_examples = 50000
    online_batch_size = 1
    online_epochs = 2
    batch_epochs = 30
    lbfgs_m = 20

    # -- load and prepare the data set
    data_view = mnist.views.OfficialVectorClassification(x_dtype=dtype)
    n_classes = 10
    x = data_view.train.x[:n_examples]
    y = data_view.train.y[:n_examples]
    y1 = -1 * ones((len(y), n_classes)).astype(dtype)
    y1[arange(len(y)), y] = 1

    # --initialize the SVM model
    w = zeros((x.shape[1], n_classes), dtype=dtype)
    b = zeros(n_classes, dtype=dtype)

    def svm(ww, bb, xx=x, yy=y1):
        # -- one vs. all linear SVM loss
        margin = yy * (dot(xx, ww) + bb)
        hinge = maximum(0, 1 - margin)
        cost = hinge.mean(axis=0).sum()
        return cost

    # -- stage-1 optimization by stochastic gradient descent
    print 'Starting SGD'
    n_batches = n_examples / online_batch_size
    w, b = fmin_sgd(
        svm,
        (w, b),
        streams={
            'xx': x.reshape((n_batches, online_batch_size, x.shape[1])),
            'yy': y1.reshape((n_batches, online_batch_size, y1.shape[1]))
        },
        loops=online_epochs,
        stepsize=0.001,
        print_interval=10000,
    )

    print 'SGD complete, about to start L-BFGS'
    show_filters(w.T, (28, 28), (
        2,
        5,
    ))

    # -- stage-2 optimization by L-BFGS
    print 'Starting L-BFGS'
    w, b = fmin_l_bfgs_b(svm, (w, b), maxfun=batch_epochs, iprint=1, m=lbfgs_m)

    print 'L-BFGS complete'
    show_filters(w.T, (28, 28), (
        2,
        5,
    ))
def main():
    # -- top-level parameters of this script
    dtype = 'float32'  # XXX
    n_examples = 50000
    online_batch_size = 1
    online_epochs = 2
    batch_epochs = 30
    lbfgs_m = 20
    n_mlp_hiddens = [200]  # -- one entry per hidden layer

    # -- load and prepare the data set
    data_view = mnist.views.OfficialVectorClassification(x_dtype=dtype)
    n_classes = 10
    x = data_view.train.x[:n_examples]
    y = data_view.train.y[:n_examples]
    y1 = -1 * ones((len(y), n_classes)).astype(dtype)
    y1[arange(len(y)), y] = 1

    # -- allocate the model by running one example through it
    init_params = {}
    mlp_svm(init_params, x[:1], y[:1], n_mlp_hiddens, n_classes)

    if online_epochs:
        # -- stage-1 optimization by stochastic gradient descent
        print 'Starting SGD'
        n_batches = n_examples / online_batch_size
        stage1_params, = fmin_sgd(mlp_svm, (init_params,),
                streams={
                    'x': x.reshape((n_batches, online_batch_size, x.shape[1])),
                    'y1': y1.reshape((n_batches, online_batch_size, y1.shape[1]))},
                loops=online_epochs,
                stepsize=0.001,
                print_interval=10000,
                )

        print 'SGD complete, about to start L-BFGS'
        show_filters(stage1_params['mlp']['weights'][0].T, (28, 28), (8, 25,))
    else:
        print 'Skipping stage-1 SGD'
        stage1_params = init_params

    # -- stage-2 optimization by L-BFGS
    if batch_epochs:
        def batch_mlp_svm(p):
            return mlp_svm(p, x, y1)

        print 'Starting L-BFGS'
        stage2_params, = fmin_l_bfgs_b(lambda p: mlp_svm(p, x, y1),
                args=(stage1_params,),
                maxfun=batch_epochs,
                iprint=1,
                m=lbfgs_m)

        print 'L-BFGS complete'
        show_filters(stage2_params['mlp']['weights'][0].T, (28, 28), (8, 25,))
def main():
    # -- top-level parameters of this script
    dtype = "float32"  # XXX
    n_examples = 50000
    online_batch_size = 1
    online_epochs = 2
    batch_epochs = 30
    lbfgs_m = 20

    # -- load and prepare the data set
    data_view = mnist.views.OfficialVectorClassification(x_dtype=dtype)
    n_classes = 10
    x = data_view.train.x[:n_examples]
    y = data_view.train.y[:n_examples]
    y1 = -1 * ones((len(y), n_classes)).astype(dtype)
    y1[arange(len(y)), y] = 1

    # --initialize the SVM model
    w = zeros((x.shape[1], n_classes), dtype=dtype)
    b = zeros(n_classes, dtype=dtype)

    def svm(ww, bb, xx=x, yy=y1):
        # -- one vs. all linear SVM loss
        margin = yy * (dot(xx, ww) + bb)
        hinge = maximum(0, 1 - margin)
        cost = hinge.mean(axis=0).sum()
        return cost

    # -- stage-1 optimization by stochastic gradient descent
    print "Starting SGD"
    n_batches = n_examples / online_batch_size
    w, b = fmin_sgd(
        svm,
        (w, b),
        streams={
            "xx": x.reshape((n_batches, online_batch_size, x.shape[1])),
            "yy": y1.reshape((n_batches, online_batch_size, y1.shape[1])),
        },
        loops=online_epochs,
        stepsize=0.001,
        print_interval=10000,
    )

    print "SGD complete, about to start L-BFGS"
    show_filters(w.T, (28, 28), (2, 5))

    # -- stage-2 optimization by L-BFGS
    print "Starting L-BFGS"
    w, b = fmin_l_bfgs_b(svm, (w, b), maxfun=batch_epochs, iprint=1, m=lbfgs_m)

    print "L-BFGS complete"
    show_filters(w.T, (28, 28), (2, 5))
def main():
    # -- top-level parameters of this script
    n_hidden1 = n_hidden2 = 25
    dtype = "float32"
    n_examples = 10000
    online_batch_size = 1
    online_epochs = 3

    # -- TIP: partial creates a new function with some parameters filled in
    # algo = partial(denoising_autoencoder_binary_x, noise_level=0.3)
    algo = logistic_autoencoder_binary_x

    batch_epochs = 10
    lbfgs_m = 20

    n_hidden = n_hidden1 * n_hidden2
    rng = np.random.RandomState(123)

    data_view = mnist.views.OfficialVectorClassification(x_dtype=dtype)
    x = data_view.train.x[:n_examples]
    n_examples, n_visible = x.shape
    x_img_res = 28, 28

    # -- uncomment this line to see sample images from the data set
    # show_filters(x[:100], x_img_res, (10, 10))

    # -- create a new model  (w, visbias, hidbias)
    w = rng.uniform(
        low=-4 * np.sqrt(6.0 / (n_hidden + n_visible)),
        high=4 * np.sqrt(6.0 / (n_hidden + n_visible)),
        size=(n_visible, n_hidden),
    ).astype(dtype)
    visbias = np.zeros(n_visible).astype(dtype)
    hidbias = np.zeros(n_hidden).astype(dtype)

    # show_filters(w.T, x_img_res, (n_hidden1, n_hidden2))
    x_stream = x.reshape((n_examples / online_batch_size, online_batch_size, x.shape[1]))

    def train_criterion(ww, hbias, vbias, x_i=x):
        cost, hid = algo(x_i, ww, hbias, vbias)
        l1_cost = abs(ww).sum() * 0.0  # -- raise 0.0 to enforce l1 penalty
        l2_cost = (ww ** 2).sum() * 0.0  # -- raise 0.0 to enforce l2 penalty
        return cost.mean() + l1_cost + l2_cost

    # -- ONLINE TRAINING
    for epoch in range(online_epochs):
        t0 = time.time()
        w, hidbias, visbias = autodiff.fmin_sgd(
            train_criterion,
            args=(w, hidbias, visbias),
            stream=x_stream,  # -- fmin_sgd will loop through this once
            stepsize=0.005,  # -- QQ: you should always tune this
            print_interval=1000,
        )
        print "Online training epoch %i took %f seconds" % (epoch, time.time() - t0)
        show_filters(w.T, x_img_res, (n_hidden1, n_hidden2))

    # -- BATCH TRAINING
    w, hidbias, visbias = autodiff.fmin_l_bfgs_b(
        train_criterion,
        args=(w, hidbias, visbias),
        # -- scipy.fmin_l_bfgs_b kwargs follow
        maxfun=batch_epochs,
        iprint=1,  # -- 1 for verbose, 0 for normal, -1 for quiet
        m=lbfgs_m,  # -- how well to approximate the Hessian
    )

    show_filters(w.T, x_img_res, (n_hidden1, n_hidden2))
def main():
    # -- top-level parameters of this script
    n_hidden1 = n_hidden2 = 25
    dtype = 'float32'
    n_examples = 10000
    online_batch_size = 1
    online_epochs = 3

    # -- TIP: partial creates a new function with some parameters filled in
    # algo = partial(denoising_autoencoder_binary_x, noise_level=0.3)
    algo = logistic_autoencoder_binary_x

    batch_epochs = 10
    lbfgs_m = 20

    n_hidden = n_hidden1 * n_hidden2
    rng = np.random.RandomState(123)

    data_view = mnist.views.OfficialVectorClassification(x_dtype=dtype)
    x = data_view.train.x[:n_examples]
    n_examples, n_visible = x.shape
    x_img_res = 28, 28

    # -- uncomment this line to see sample images from the data set
    # show_filters(x[:100], x_img_res, (10, 10))

    # -- create a new model  (w, visbias, hidbias)
    w = rng.uniform(low=-4 * np.sqrt(6. / (n_hidden + n_visible)),
                    high=4 * np.sqrt(6. / (n_hidden + n_visible)),
                    size=(n_visible, n_hidden)).astype(dtype)
    visbias = np.zeros(n_visible).astype(dtype)
    hidbias = np.zeros(n_hidden).astype(dtype)

    # show_filters(w.T, x_img_res, (n_hidden1, n_hidden2))
    x_stream = x.reshape(
        (n_examples / online_batch_size, online_batch_size, x.shape[1]))

    def train_criterion(ww, hbias, vbias, x_i=x):
        cost, hid = algo(x_i, ww, hbias, vbias)
        l1_cost = abs(ww).sum() * 0.0  # -- raise 0.0 to enforce l1 penalty
        l2_cost = (ww**2).sum() * 0.0  # -- raise 0.0 to enforce l2 penalty
        return cost.mean() + l1_cost + l2_cost

    # -- ONLINE TRAINING
    for epoch in range(online_epochs):
        t0 = time.time()
        w, hidbias, visbias = autodiff.fmin_sgd(
            train_criterion,
            args=(w, hidbias, visbias),
            stream=x_stream,  # -- fmin_sgd will loop through this once
            stepsize=0.005,  # -- QQ: you should always tune this
            print_interval=1000,
        )
        print 'Online training epoch %i took %f seconds' % (epoch,
                                                            time.time() - t0)
        show_filters(w.T, x_img_res, (n_hidden1, n_hidden2))

    # -- BATCH TRAINING
    w, hidbias, visbias = autodiff.fmin_l_bfgs_b(
        train_criterion,
        args=(w, hidbias, visbias),
        # -- scipy.fmin_l_bfgs_b kwargs follow
        maxfun=batch_epochs,
        iprint=1,  # -- 1 for verbose, 0 for normal, -1 for quiet
        m=lbfgs_m,  # -- how well to approximate the Hessian
    )

    show_filters(w.T, x_img_res, (n_hidden1, n_hidden2))