Пример #1
0
def main():
    # -- top-level parameters of this script
    dtype = 'float32'  # XXX
    n_examples = 50000
    online_batch_size = 1
    online_epochs = 2
    batch_epochs = 30
    lbfgs_m = 20

    # -- load and prepare the data set
    data_view = mnist.views.OfficialVectorClassification(x_dtype=dtype)
    n_classes = 10
    x = data_view.train.x[:n_examples]
    y = data_view.train.y[:n_examples]
    y1 = -1 * ones((len(y), n_classes)).astype(dtype)
    y1[arange(len(y)), y] = 1

    # --initialize the SVM model
    w = zeros((x.shape[1], n_classes), dtype=dtype)
    b = zeros(n_classes, dtype=dtype)

    def svm(ww, bb, xx=x, yy=y1):
        # -- one vs. all linear SVM loss
        margin = yy * (dot(xx, ww) + bb)
        hinge = maximum(0, 1 - margin)
        cost = hinge.mean(axis=0).sum()
        return cost

    # -- stage-1 optimization by stochastic gradient descent
    print 'Starting SGD'
    n_batches = n_examples / online_batch_size
    w, b = fmin_sgd(
        svm,
        (w, b),
        streams={
            'xx': x.reshape((n_batches, online_batch_size, x.shape[1])),
            'yy': y1.reshape((n_batches, online_batch_size, y1.shape[1]))
        },
        loops=online_epochs,
        stepsize=0.001,
        print_interval=10000,
    )

    print 'SGD complete, about to start L-BFGS'
    show_filters(w.T, (28, 28), (
        2,
        5,
    ))

    # -- stage-2 optimization by L-BFGS
    print 'Starting L-BFGS'
    w, b = fmin_l_bfgs_b(svm, (w, b), maxfun=batch_epochs, iprint=1, m=lbfgs_m)

    print 'L-BFGS complete'
    show_filters(w.T, (28, 28), (
        2,
        5,
    ))
Пример #2
0
    def fit(self, x, y, xw=None):
        """
        x - n_examples x n_features design matrix.
        y - vector of integer labels
        xw - matrix of real-valued incoming biases obtained
            by multiplying the existing weight vectors by x
        """
        assert set(y) <= set([-1, 1])

        if x.shape[0] != y.shape[0]:
            raise ValueError('length mismatch between x and y')
        n_examples, n_features = x.shape
        if n_features != self.n_features:
            raise ValueError('n_feature mismatch', (n_features,
                self.n_features))

        weights = self.weights
        bias = self.bias
        alpha = self.alpha

        x = x.astype(self.dtype)
        y = y.astype(self.dtype)

        xw = self.as_xw(x, xw)
        print 'WARNING: IncrementalSVM should use alpha0, n_sgd_iters'

        # -- warm up with some sgd
        weights, bias, alpha, = autodiff.fmin_sgd(
                lambda w, b, a, xi, yi, xwi:
                    binary_svm_hinge_loss(xi, yi, w, b, a, None,
                        None,
                        self.l2_regularization),
                (weights, bias, alpha),
                streams={
                    'xi': x.reshape((n_examples, 1, x.shape[1])),
                    'yi': y.reshape((n_examples, 1)),
                    },
                stepsize=0.01,
                loops=max(1, 100000 // len(x)),
                )

        # -- fine-tune without alpha by L-BFGS
        weights, bias, alpha, = autodiff.fmin_l_bfgs_b(
                lambda w, b, a:
                    binary_svm_hinge_loss(x, y,
                        w, b, a, None, None,
                        self.l2_regularization),
                (weights, bias, alpha),
                # -- the graph is tiny, time spent optimizing it is wasted.
                theano_mode=theano.Mode(linker='cvm', optimizer='fast_run'),
                **self.bfgs_kwargs)


        self.weights = weights
        self.bias = bias
        self.alpha = alpha
Пример #3
0
def main():
    # -- top-level parameters of this script
    dtype = 'float32'  # XXX
    n_examples = 50000
    online_batch_size = 1
    online_epochs = 2
    batch_epochs = 30
    lbfgs_m = 20
    n_mlp_hiddens = [200]  # -- one entry per hidden layer

    # -- load and prepare the data set
    data_view = mnist.views.OfficialVectorClassification(x_dtype=dtype)
    n_classes = 10
    x = data_view.train.x[:n_examples]
    y = data_view.train.y[:n_examples]
    y1 = -1 * ones((len(y), n_classes)).astype(dtype)
    y1[arange(len(y)), y] = 1

    # -- allocate the model by running one example through it
    init_params = {}
    mlp_svm(init_params, x[:1], y[:1], n_mlp_hiddens, n_classes)

    if online_epochs:
        # -- stage-1 optimization by stochastic gradient descent
        print 'Starting SGD'
        n_batches = n_examples / online_batch_size
        stage1_params, = fmin_sgd(mlp_svm, (init_params,),
                streams={
                    'x': x.reshape((n_batches, online_batch_size, x.shape[1])),
                    'y1': y1.reshape((n_batches, online_batch_size, y1.shape[1]))},
                loops=online_epochs,
                stepsize=0.001,
                print_interval=10000,
                )

        print 'SGD complete, about to start L-BFGS'
        show_filters(stage1_params['mlp']['weights'][0].T, (28, 28), (8, 25,))
    else:
        print 'Skipping stage-1 SGD'
        stage1_params = init_params

    # -- stage-2 optimization by L-BFGS
    if batch_epochs:
        def batch_mlp_svm(p):
            return mlp_svm(p, x, y1)

        print 'Starting L-BFGS'
        stage2_params, = fmin_l_bfgs_b(lambda p: mlp_svm(p, x, y1),
                args=(stage1_params,),
                maxfun=batch_epochs,
                iprint=1,
                m=lbfgs_m)

        print 'L-BFGS complete'
        show_filters(stage2_params['mlp']['weights'][0].T, (28, 28), (8, 25,))
Пример #4
0
def main():
    # -- top-level parameters of this script
    dtype = "float32"  # XXX
    n_examples = 50000
    online_batch_size = 1
    online_epochs = 2
    batch_epochs = 30
    lbfgs_m = 20

    # -- load and prepare the data set
    data_view = mnist.views.OfficialVectorClassification(x_dtype=dtype)
    n_classes = 10
    x = data_view.train.x[:n_examples]
    y = data_view.train.y[:n_examples]
    y1 = -1 * ones((len(y), n_classes)).astype(dtype)
    y1[arange(len(y)), y] = 1

    # --initialize the SVM model
    w = zeros((x.shape[1], n_classes), dtype=dtype)
    b = zeros(n_classes, dtype=dtype)

    def svm(ww, bb, xx=x, yy=y1):
        # -- one vs. all linear SVM loss
        margin = yy * (dot(xx, ww) + bb)
        hinge = maximum(0, 1 - margin)
        cost = hinge.mean(axis=0).sum()
        return cost

    # -- stage-1 optimization by stochastic gradient descent
    print "Starting SGD"
    n_batches = n_examples / online_batch_size
    w, b = fmin_sgd(
        svm,
        (w, b),
        streams={
            "xx": x.reshape((n_batches, online_batch_size, x.shape[1])),
            "yy": y1.reshape((n_batches, online_batch_size, y1.shape[1])),
        },
        loops=online_epochs,
        stepsize=0.001,
        print_interval=10000,
    )

    print "SGD complete, about to start L-BFGS"
    show_filters(w.T, (28, 28), (2, 5))

    # -- stage-2 optimization by L-BFGS
    print "Starting L-BFGS"
    w, b = fmin_l_bfgs_b(svm, (w, b), maxfun=batch_epochs, iprint=1, m=lbfgs_m)

    print "L-BFGS complete"
    show_filters(w.T, (28, 28), (2, 5))
Пример #5
0
    def fit(self, x, y, xw=None):
        """
        x - n_examples x n_features design matrix.
        y - vector of integer labels
        xw - matrix of real-valued incoming biases obtained
            by multiplying the existing weight vectors by x
        """
        assert set(y) <= set([-1, 1])

        if x.shape[0] != y.shape[0]:
            raise ValueError('length mismatch between x and y')
        n_examples, n_features = x.shape
        if n_features != self.n_features:
            raise ValueError('n_feature mismatch',
                             (n_features, self.n_features))

        weights = self.weights
        bias = self.bias
        alpha = self.alpha

        x = x.astype(self.dtype)
        y = y.astype(self.dtype)

        xw = self.as_xw(x, xw)
        print 'WARNING: IncrementalSVM should use alpha0, n_sgd_iters'

        # -- warm up with some sgd
        weights, bias, alpha, = autodiff.fmin_sgd(
            lambda w, b, a, xi, yi, xwi: binary_svm_hinge_loss(
                xi, yi, w, b, a, None, None, self.l2_regularization),
            (weights, bias, alpha),
            streams={
                'xi': x.reshape((n_examples, 1, x.shape[1])),
                'yi': y.reshape((n_examples, 1)),
            },
            stepsize=0.01,
            loops=max(1, 100000 // len(x)),
        )

        # -- fine-tune without alpha by L-BFGS
        weights, bias, alpha, = autodiff.fmin_l_bfgs_b(
            lambda w, b, a: binary_svm_hinge_loss(x, y, w, b, a, None, None,
                                                  self.l2_regularization),
            (weights, bias, alpha),
            # -- the graph is tiny, time spent optimizing it is wasted.
            theano_mode=theano.Mode(linker='cvm', optimizer='fast_run'),
            **self.bfgs_kwargs)

        self.weights = weights
        self.bias = bias
        self.alpha = alpha
Пример #6
0
def fit_sgd_0(weights, bias, x, y, l2_regularization, n_iters,
        print_interval):
    """
    Refine `weights` and `bias` by n_iters steps of SGD
    """
    if n_iters <= 0:
        return weights, bias

    n_examples = len(x)
    n_features, n_classes = weights.shape
    alpha0 = np.empty((0, n_classes), dtype=weights.dtype)

    # -- use the first few elements of x to estimate the average
    #    example norm
    # -- fixing these learning rates makes sense to me because the
    #    hinge loss puts a bound on the slope of the function being
    #    optimized, the only variable is the norm / magnitude of the
    #    data.
    avg_w_norm = np.mean(np.sqrt((x[:200] ** 2).sum(axis=1)))
    step_size_w = 0.01 / (avg_w_norm + 1e-8)
    step_size_b = 0.01
    step_size_a = 0.0

    weights, bias, alpha0, = autodiff.fmin_sgd(
            lambda w, b, a, xx, yy1:
                multi_svm_hinge_loss(xx, yy1, w, b, a,
                    None, # xwi,
                    None, # prev_w_l2_sqr,
                    l2_regularization,
                    None),
            (weights, bias, alpha0),
            streams={
                'xx': x.reshape((n_examples, 1, n_features)),
                'yy1': y.reshape((n_examples, 1, n_classes)),
                },
            print_interval=print_interval,
            step_size=(step_size_w, step_size_b, step_size_a),
            step_size_backoff=0.1,
            loops=n_iters / float(len(x)),
            theano_mode=theano.Mode(
                linker='cvm_nogc',
                #linker='c|py',
                optimizer='fast_run').excluding('gpu'),
            theano_device='cpu',
            floatX=x.dtype,
            )
    return weights, bias
Пример #7
0
def convnet_prediction(W_fb, b_fb, V, c, W, b, x):
    layer1 = tanh_conv_layer(W_fb, b_fb, x)
    layer1_size = np.prod(layer1.shape[1:])
    layer2 = tanh_layer(V, c,
            np.reshape(layer1, (x.shape[0], layer1_size)))
    prediction = ova_svm_prediction(W, b, layer2)
    return prediction

def convnet_cost(W_fb, b_fb, V, c, W, b, x, y1):
    layer1 = tanh_conv_layer(W_fb, b_fb, x)
    layer1_size = np.prod(layer1.shape[1:])
    layer2 = tanh_layer(V, c,
            np.reshape(layer1, (x.shape[0], layer1_size)))
    cost = ova_svm_cost(W, b, layer2, y1)
    return cost

print convnet_cost(W_fb, b_fb, V, c, W, b, x[:3], y1[:3])

online_batch_size = 1
n_batches = n_examples / online_batch_size
W_fb, b_fb, V, c, W, b = autodiff.fmin_sgd(convnet_cost, (W_fb, b_fb, V, c, W, b),
            streams={
                'x': x.reshape((n_batches, online_batch_size,) + x.shape[1:]),
                'y1': y1.reshape((n_batches, online_batch_size, y1.shape[1]))},
            loops=5,
            stepsize=0.01,
            print_interval=1000,
            )
print 'SGD took %.2f seconds' % (time.time() - t0)

def main():
    # -- top-level parameters of this script
    n_hidden1 = n_hidden2 = 25
    dtype = "float32"
    n_examples = 10000
    online_batch_size = 1
    online_epochs = 3

    # -- TIP: partial creates a new function with some parameters filled in
    # algo = partial(denoising_autoencoder_binary_x, noise_level=0.3)
    algo = logistic_autoencoder_binary_x

    batch_epochs = 10
    lbfgs_m = 20

    n_hidden = n_hidden1 * n_hidden2
    rng = np.random.RandomState(123)

    data_view = mnist.views.OfficialVectorClassification(x_dtype=dtype)
    x = data_view.train.x[:n_examples]
    n_examples, n_visible = x.shape
    x_img_res = 28, 28

    # -- uncomment this line to see sample images from the data set
    # show_filters(x[:100], x_img_res, (10, 10))

    # -- create a new model  (w, visbias, hidbias)
    w = rng.uniform(
        low=-4 * np.sqrt(6.0 / (n_hidden + n_visible)),
        high=4 * np.sqrt(6.0 / (n_hidden + n_visible)),
        size=(n_visible, n_hidden),
    ).astype(dtype)
    visbias = np.zeros(n_visible).astype(dtype)
    hidbias = np.zeros(n_hidden).astype(dtype)

    # show_filters(w.T, x_img_res, (n_hidden1, n_hidden2))
    x_stream = x.reshape((n_examples / online_batch_size, online_batch_size, x.shape[1]))

    def train_criterion(ww, hbias, vbias, x_i=x):
        cost, hid = algo(x_i, ww, hbias, vbias)
        l1_cost = abs(ww).sum() * 0.0  # -- raise 0.0 to enforce l1 penalty
        l2_cost = (ww ** 2).sum() * 0.0  # -- raise 0.0 to enforce l2 penalty
        return cost.mean() + l1_cost + l2_cost

    # -- ONLINE TRAINING
    for epoch in range(online_epochs):
        t0 = time.time()
        w, hidbias, visbias = autodiff.fmin_sgd(
            train_criterion,
            args=(w, hidbias, visbias),
            stream=x_stream,  # -- fmin_sgd will loop through this once
            stepsize=0.005,  # -- QQ: you should always tune this
            print_interval=1000,
        )
        print "Online training epoch %i took %f seconds" % (epoch, time.time() - t0)
        show_filters(w.T, x_img_res, (n_hidden1, n_hidden2))

    # -- BATCH TRAINING
    w, hidbias, visbias = autodiff.fmin_l_bfgs_b(
        train_criterion,
        args=(w, hidbias, visbias),
        # -- scipy.fmin_l_bfgs_b kwargs follow
        maxfun=batch_epochs,
        iprint=1,  # -- 1 for verbose, 0 for normal, -1 for quiet
        m=lbfgs_m,  # -- how well to approximate the Hessian
    )

    show_filters(w.T, x_img_res, (n_hidden1, n_hidden2))
Пример #9
0
def main():
    # -- top-level parameters of this script
    n_hidden1 = n_hidden2 = 25
    dtype = 'float32'
    n_examples = 10000
    online_batch_size = 1
    online_epochs = 3

    # -- TIP: partial creates a new function with some parameters filled in
    # algo = partial(denoising_autoencoder_binary_x, noise_level=0.3)
    algo = logistic_autoencoder_binary_x

    batch_epochs = 10
    lbfgs_m = 20

    n_hidden = n_hidden1 * n_hidden2
    rng = np.random.RandomState(123)

    data_view = mnist.views.OfficialVectorClassification(x_dtype=dtype)
    x = data_view.train.x[:n_examples]
    n_examples, n_visible = x.shape
    x_img_res = 28, 28

    # -- uncomment this line to see sample images from the data set
    # show_filters(x[:100], x_img_res, (10, 10))

    # -- create a new model  (w, visbias, hidbias)
    w = rng.uniform(low=-4 * np.sqrt(6. / (n_hidden + n_visible)),
                    high=4 * np.sqrt(6. / (n_hidden + n_visible)),
                    size=(n_visible, n_hidden)).astype(dtype)
    visbias = np.zeros(n_visible).astype(dtype)
    hidbias = np.zeros(n_hidden).astype(dtype)

    # show_filters(w.T, x_img_res, (n_hidden1, n_hidden2))
    x_stream = x.reshape(
        (n_examples / online_batch_size, online_batch_size, x.shape[1]))

    def train_criterion(ww, hbias, vbias, x_i=x):
        cost, hid = algo(x_i, ww, hbias, vbias)
        l1_cost = abs(ww).sum() * 0.0  # -- raise 0.0 to enforce l1 penalty
        l2_cost = (ww**2).sum() * 0.0  # -- raise 0.0 to enforce l2 penalty
        return cost.mean() + l1_cost + l2_cost

    # -- ONLINE TRAINING
    for epoch in range(online_epochs):
        t0 = time.time()
        w, hidbias, visbias = autodiff.fmin_sgd(
            train_criterion,
            args=(w, hidbias, visbias),
            stream=x_stream,  # -- fmin_sgd will loop through this once
            stepsize=0.005,  # -- QQ: you should always tune this
            print_interval=1000,
        )
        print 'Online training epoch %i took %f seconds' % (epoch,
                                                            time.time() - t0)
        show_filters(w.T, x_img_res, (n_hidden1, n_hidden2))

    # -- BATCH TRAINING
    w, hidbias, visbias = autodiff.fmin_l_bfgs_b(
        train_criterion,
        args=(w, hidbias, visbias),
        # -- scipy.fmin_l_bfgs_b kwargs follow
        maxfun=batch_epochs,
        iprint=1,  # -- 1 for verbose, 0 for normal, -1 for quiet
        m=lbfgs_m,  # -- how well to approximate the Hessian
    )

    show_filters(w.T, x_img_res, (n_hidden1, n_hidden2))
Пример #10
0
    return prediction


def convnet_cost(W_fb, b_fb, V, c, W, b, x, y1):
    layer1 = tanh_conv_layer(W_fb, b_fb, x)
    layer1_size = np.prod(layer1.shape[1:])
    layer2 = tanh_layer(V, c, np.reshape(layer1, (x.shape[0], layer1_size)))
    cost = ova_svm_cost(W, b, layer2, y1)
    return cost


print convnet_cost(W_fb, b_fb, V, c, W, b, x[:3], y1[:3])

online_batch_size = 1
n_batches = n_examples / online_batch_size
W_fb, b_fb, V, c, W, b = autodiff.fmin_sgd(
    convnet_cost,
    (W_fb, b_fb, V, c, W, b),
    streams={
        'x': x.reshape((
            n_batches,
            online_batch_size,
        ) + x.shape[1:]),
        'y1': y1.reshape((n_batches, online_batch_size, y1.shape[1]))
    },
    loops=5,
    stepsize=0.01,
    print_interval=1000,
)
print 'SGD took %.2f seconds' % (time.time() - t0)
Пример #11
0
# How big does \alpha have to be to make any difference?

# initialize the model
W = zeros((x.shape[1], n_classes), dtype=dtype)
b = zeros(n_classes, dtype=dtype)

# -- do n_online_loops passes through the data set doing SGD
#    This can be faster at the beginning than L-BFGS
t0 = time.time()
online_batch_size = 1
n_online_epochs = 1
n_batches = n_examples / online_batch_size
W, b = autodiff.fmin_sgd(ova_svm_cost, (W, b),
            streams={
                'x': x.reshape((n_batches, online_batch_size, x.shape[1])),
                'y1': y1.reshape((n_batches, online_batch_size, y1.shape[1]))},
            loops=n_online_epochs,
            step_size=0.001,
            print_interval=1000,
            )
print 'SGD took %.2f seconds' % (time.time() - t0)
show_filters(W.T, img_shape, (2, 5))

# -- L-BFGS optimization of our SVM cost.

def batch_criterion(W, b):
    return ova_svm_cost(W, b, x, y1)

W, b = autodiff.fmin_l_bfgs_b(batch_criterion, (W, b), maxfun=20, m=20, iprint=1)

print 'final_cost', batch_criterion(W, b)
# -- N. B. the output from this command comes from Fortran, so iPython does not see it.