Пример #1
0
def main(argv=None):
    dataset = load_data()
    model = create_model()

    # Restore previously trained model weights (if they exist)
    chkpt = tf.train.latest_checkpoint(CHECKPOINT_DIR)
    if chkpt:
        model.load_weights(chkpt)
    else:
        raise RuntimeError('Predictions require a trained model!')

    test_images = dataset['test'].images[0:10]
    test_labels = dataset['test'].labels[0:10]

    predictions = (model.predict(test_images) > 0.8).astype(int)

    print(predictions)
    print(test_labels.astype(int))

    cleanup()
Пример #2
0
def test_dcca(learning_rate=0.01,
              L1_reg=0.0001,
              L2_reg=0.0001,
              n_epochs=1000,
              dataset='mnist.pkl.gz',
              batch_size=20,
              n_hidden=500):
    """
    Demonstrate stochastic gradient descent optimization for a multilayer
    perceptron

    This is demonstrated on MNIST.

    :type learning_rate: float
    :param learning_rate: learning rate used (factor for the stochastic
    gradient

    :type L1_reg: float
    :param L1_reg: L1-norm's weight when added to the cost (see
    regularization)

    :type L2_reg: float
    :param L2_reg: L2-norm's weight when added to the cost (see
    regularization)

    :type n_epochs: int
    :param n_epochs: maximal number of epochs to run the optimizer

    :type dataset: string
    :param dataset: the path of the MNIST dataset file from
                 http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz


   """
    datasets = load_data(dataset)

    train_set_x, train_set_y = datasets[0]
    valid_set_x, valid_set_y = datasets[1]
    test_set_x, test_set_y = datasets[2]

    # compute number of minibatches for training, validation and testing
    n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
    n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
    n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size

    ######################
    # BUILD ACTUAL MODEL #
    ######################
    print '... building the model'

    # allocate symbolic variables for the data
    index = T.lscalar()  # index to a [mini]batch
    x1 = T.matrix('x1')  # the data is presented as rasterized images
    x2 = T.matrix('x2')  # the labels are presented as 1D vector of
    # [int] labels
    h1 = T.matrix('h1')  # the data is presented as rasterized images
    h2 = T.matrix('h2')  # the labels are presented as 1D vector of

    rng = numpy.random.RandomState(1234)

    # construct the MLP class
    net1 = DCCA(rng=rng, input=x1, n_in=28 * 28, n_hidden=300, n_out=8)
    net2 = DCCA(rng=rng, input=x2, n_in=10, n_hidden=20, n_out=8)
    if 1:
        cost1 = (net1.correlation(h1, h2) + L1_reg * net1.L1 +
                 L2_reg * net1.L2_sqr)
        cost2 = (net2.correlation(h1, h2) + L1_reg * net2.L1 +
                 L2_reg * net2.L2_sqr)
    """
    test_model = theano.function(
        inputs=[index],
        outputs=net1.errors(y),
        givens={
            x: test_set_x[index * batch_size:(index + 1) * batch_size],
            y: test_set_y[index * batch_size:(index + 1) * batch_size]
        }
    )

    validate_model = theano.function(
        inputs=[index],
        outputs=classifier.errors(y),
        givens={
            x: valid_set_x[index * batch_size:(index + 1) * batch_size],
            y: valid_set_y[index * batch_size:(index + 1) * batch_size]
        }
    )
    """
    fprop_model1 = theano.function(inputs=[],
                                   outputs=(net1.hiddenLayer.output,
                                            net1.output),
                                   givens={x1: test_set_x})
    fprop_model2 = theano.function(inputs=[],
                                   outputs=(net2.hiddenLayer.output,
                                            net2.output),
                                   givens={x2: test_set_y})

    if 1:  # grad compute for net1 in theano
        U, V, D = theano.tensor.nlinalg.svd(net1.lastLayer.Tval)
        UVT = T.dot(U, V.T)
        Delta12 = T.dot(net1.lastLayer.SigmaHat11**(-0.5),
                        T.dot(UVT, net1.lastLayer.SigmaHat22**(-0.5)))
        UDUT = T.dot(U, T.dot(D, U.T))
        Delta11 = (-0.5) * T.dot(
            net1.lastLayer.SigmaHat11**(-0.5),
            T.dot(UDUT, net1.lastLayer.SigmaHat22**(-0.5)))
        grad_E_to_o = (1.0 / 8) * (2 * Delta11 * net1.lastLayer.H1bar +
                                   Delta12 * net1.lastLayer.H2bar)
        gparam1_W = (grad_E_to_o) * (net1.lastLayer.output *
                                     (1 - net1.lastLayer.output)) * (
                                         net1.hiddenLayer.output)
        gparam1_b = (grad_E_to_o) * (
            net1.lastLayer.output *
            (1 - net1.lastLayer.output)) * theano.shared(
                numpy.array([1.0], dtype=theano.config.floatX), borrow=True)
        #gparams1 = [T.grad(cost1, param) for param in net1.params]
        gparams1 = [T.grad(cost1, param) for param in net1.hiddenLayer.params]
        gparams1.append(gparam1_W)
        #gparams1.append(gparam1_b)
    if 1:  # grad compute for net2
        U, V, D = theano.tensor.nlinalg.svd(net2.lastLayer.Tval)
        UVT = T.dot(U, V.T)
        Delta12 = T.dot(net2.lastLayer.SigmaHat11**(-0.5),
                        T.dot(UVT, net2.lastLayer.SigmaHat22**(-0.5)))
        UDUT = T.dot(U, T.dot(D, U.T))
        Delta11 = (-0.5) * T.dot(net2.lastLayer.SigmaHat11**(-0.5),
                                 T.dot(UVT, net2.lastLayer.SigmaHat22**(-0.5)))
        grad_E_to_o = (1.0 / 8) * (2 * Delta11 * net2.lastLayer.H1bar +
                                   Delta12 * net2.lastLayer.H2bar)
        gparam2_W = (grad_E_to_o) * (net2.lastLayer.output *
                                     (1 - net2.lastLayer.output)) * (
                                         net2.hiddenLayer.output)
        gparam2_b = (grad_E_to_o) * (net2.lastLayer.output *
                                     (1 - net2.lastLayer.output)) * 1
        #gparams1 = [T.grad(cost1, param) for param in net1.params]
        gparams2 = [T.grad(cost2, param) for param in net2.hiddenLayer.params]
        gparams2.append(gparam2_W)
        gparams2.append(gparam2_b)

        #gparams2 = [T.grad(cost2, param) for param in net2.params]

    updates1 = [(param, param - learning_rate * gparam)
                for param, gparam in zip(net1.params, gparams1)]
    updates2 = [(param, param - learning_rate * gparam)
                for param, gparam in zip(net2.params, gparams2)]

    ###############
    # TRAIN MODEL #
    ###############
    print '... training'

    # early-stopping parameters
    patience = 10000  # look as this many examples regardless
    patience_increase = 2  # wait this much longer when a new best is
    # found
    improvement_threshold = 0.995  # a relative improvement of this much is
    # considered significant
    validation_frequency = min(n_train_batches, patience / 2)
    # go through this many
    # minibatche before checking the network
    # on the validation set; in this case we
    # check every epoch

    best_validation_loss = numpy.inf
    best_iter = 0
    test_score = 0.
    start_time = time.clock()

    epoch = 0
    done_looping = False

    while (epoch < n_epochs) and (not done_looping):
        epoch = epoch + 1
        print 'epoch', epoch
        #net1.fprop(test_set_x)
        #net2.fprop(test_set_y)
        h1hidden, h1tmpval = fprop_model1()
        h2hidden, h2tmpval = fprop_model2()
        h1hidden = h1hidden.T
        h2hidden = h2hidden.T
        h1tmpval = h1tmpval.T
        h2tmpval = h2tmpval.T
        if 1:  # compute cost(H1, H2)

            H1 = h1tmpval
            H2 = h2tmpval
            m = H1.shape[1]

            H1bar = H1 - (1.0 / m) * numpy.dot(
                H1, numpy.ones((m, m), dtype=numpy.float32))
            H2bar = H2 - (1.0 / m) * numpy.dot(
                H2, numpy.ones((m, m), dtype=numpy.float32))
            SigmaHat12 = (1.0 / (m - 1)) * numpy.dot(H1bar, H2bar.T)
            SigmaHat11 = (1.0 / (m - 1)) * numpy.dot(H1bar, H1bar.T)
            SigmaHat11 = SigmaHat11 + 0.0001 * numpy.identity(
                SigmaHat11.shape[0], dtype=numpy.float32)
            SigmaHat22 = (1.0 / (m - 1)) * numpy.dot(H2bar, H2bar.T)
            SigmaHat22 = SigmaHat22 + 0.0001 * numpy.identity(
                SigmaHat22.shape[0], dtype=numpy.float32)

            Tval = numpy.dot(mat_pow(SigmaHat11),
                             numpy.dot(SigmaHat12, mat_pow(SigmaHat22)))

            corr = numpy.trace(numpy.dot(Tval.T, Tval))**(0.5)
        if 1:  # compute gradient dcost(H1,H2)/dH1

            U, D, V, = numpy.linalg.svd(Tval)
            UVT = numpy.dot(U, V.T)
            Delta12 = numpy.dot(mat_pow(SigmaHat11),
                                numpy.dot(UVT, mat_pow(SigmaHat22)))
            UDUT = numpy.dot(U, numpy.dot(D, U.T))
            Delta11 = (-0.5) * numpy.dot(mat_pow(SigmaHat11),
                                         numpy.dot(UDUT, mat_pow(SigmaHat22)))
            grad_E_to_o = (1.0 / m) * (2 * numpy.dot(Delta11, H1bar) +
                                       numpy.dot(Delta12, H2bar))
            ##gparam1_W = (grad_E_to_o) * (h1tmpval*(1-h1tmpval)) * (h1hidden)
            gparam1_W = numpy.dot(
                (h1hidden), ((grad_E_to_o) * (h1tmpval * (1 - h1tmpval))).T)
            ##gparam1_b = (grad_E_to_o) * (h1tmpval*(1-h1tmpval)) * theano.shared(numpy.array([1.0],dtype=theano.config.floatX), borrow=True)
            gparam1_b = numpy.dot(
                numpy.ones((1, 10000), dtype=theano.config.floatX),
                ((grad_E_to_o) * (h1tmpval * (1 - h1tmpval))).T)
            gparam1_W = theano.shared(gparam1_W, borrow=True)
            gparam1_b = theano.shared(gparam1_b[0, :], borrow=True)

            #gparams1 = [T.grad(cost1, param) for param in net1.params]
            gparams1 = [
                T.grad(cost1, param) for param in net1.hiddenLayer.params
            ]
            gparams1.append(gparam1_W)
            updates1 = [(param, param - learning_rate * gparam)
                        for param, gparam in zip(net1.params, gparams1)]
            #gparams1.append(gparam1_b)
        if 1:  # compute gradient dcost(H1,H2)/dH2
            Tval2 = numpy.dot(mat_pow(SigmaHat22),
                              numpy.dot(SigmaHat12.T, mat_pow(SigmaHat11)))
            U, D, V, = numpy.linalg.svd(Tval2)
            UVT = numpy.dot(U, V.T)
            Delta12 = numpy.dot(mat_pow(SigmaHat22),
                                numpy.dot(UVT, mat_pow(SigmaHat11)))
            UDUT = numpy.dot(U, numpy.dot(D, U.T))
            Delta11 = (-0.5) * numpy.dot(mat_pow(SigmaHat22),
                                         numpy.dot(UDUT, mat_pow(SigmaHat11)))
            grad_E_to_o = (1.0 / m) * (2 * numpy.dot(Delta11, H2bar) +
                                       numpy.dot(Delta12, H1bar))
            ##gparam1_W = (grad_E_to_o) * (h1tmpval*(1-h1tmpval)) * (h1hidden)
            gparam2_W = numpy.dot(
                (h2hidden), ((grad_E_to_o) * (h2tmpval * (1 - h2tmpval))).T)
            ##gparam1_b = (grad_E_to_o) * (h1tmpval*(1-h1tmpval)) * theano.shared(numpy.array([1.0],dtype=theano.config.floatX), borrow=True)
            gparam2_b = numpy.dot(
                numpy.ones((1, 10000), dtype=theano.config.floatX),
                ((grad_E_to_o) * (h2tmpval * (1 - h2tmpval))).T)
            gparam2_W = theano.shared(gparam2_W, borrow=True)
            gparam2_b = theano.shared(gparam2_b[0, :], borrow=True)

            #gparams1 = [T.grad(cost1, param) for param in net1.params]
            gparams2 = [
                T.grad(cost2, param) for param in net2.hiddenLayer.params
            ]
            gparams2.append(gparam2_W)
            updates2 = [(param, param - learning_rate * gparam)
                        for param, gparam in zip(net2.params, gparams2)]
            #gparams1.append(gparam1_b)

        #X_theano = theano.shared(value=X, name='inputs')
        #h1tmp = theano.shared( value=h1tmpval, name='hidden1_rep', dtype=theano.config.floatX , borrow=True)
        h1tmp = theano.shared(numpy.asarray(H1bar, dtype=theano.config.floatX),
                              borrow=True)
        #h2tmp = theano.shared( value=h2tmpval, name='hidden2_rep', dtype=theano.config.floatX , borrow=True)
        h2tmp = theano.shared(numpy.asarray(H2bar, dtype=theano.config.floatX),
                              borrow=True)
        #h1tmp = T.shared( value=net1.output.eval(), name='hidden1_rep' )
        #h2tmp = T.shared( net2.output.eval() )

        train_model1 = theano.function(
            inputs=[],
            #outputs=cost1,
            updates=updates1,
            givens={
                #x1: test_set_x,
                h1: h1tmp,
                h2: h2tmp
            })
        train_model2 = theano.function(
            inputs=[],
            #outputs=cost2,
            updates=updates2,
            givens={
                #x2: test_set_y,
                h1: h1tmp,
                h2: h2tmp
            })

        minibatch_avg_cost1 = train_model1()
        minibatch_avg_cost2 = train_model2()
        #print 'corr1', minibatch_avg_cost1
        #print 'corr2', minibatch_avg_cost2
        print 'corr', corr
        if epoch > 10:
            break

    end_time = time.clock()
    print(('Optimization complete. Best validation score of %f %% '
           'obtained at iteration %i, with test performance %f %%') %
          (best_validation_loss * 100., best_iter + 1, test_score * 100.))
    print >> sys.stderr, ('The code for file ' + os.path.split(__file__)[1] +
                          ' ran for %.2fm' % ((end_time - start_time) / 60.))
    ''' Loads the dataset

    :type dataset: string
    :param dataset: the path to the dataset (here MNIST)
    '''

    #############
    # LOAD DATA #
    #############

    # Download the MNIST dataset if it is not present
    data_dir, data_file = os.path.split(dataset)
    if data_dir == "" and not os.path.isfile(dataset):
        # Check if dataset is in the data directory.
        new_path = os.path.join(
            os.path.split(__file__)[0], "..", "data", dataset)
        if os.path.isfile(new_path) or data_file == 'mnist.pkl.gz':
            dataset = new_path

    if (not os.path.isfile(dataset)) and data_file == 'mnist.pkl.gz':
        import urllib
        origin = (
            'http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz')
        print 'Downloading data from %s' % origin
        urllib.urlretrieve(origin, dataset)

    print '... loading data'

    # Load the dataset
    f = gzip.open(dataset, 'rb')
    train_set, valid_set, test_set = cPickle.load(f)
    f.close()

    #train_set, valid_set, test_set format: tuple(input, target)
    #input is an numpy.ndarray of 2 dimensions (a matrix)
    #witch row's correspond to an example. target is a
    #numpy.ndarray of 1 dimensions (vector)) that have the same length as
    #the number of rows in the input. It should give the target
    #target to the example with the same index in the input.

    def shared_dataset(data_xy, borrow=True):
        """ Function that loads the dataset into shared variables

        The reason we store our dataset in shared variables is to allow
        Theano to copy it into the GPU memory (when code is run on GPU).
        Since copying data into the GPU is slow, copying a minibatch everytime
        is needed (the default behaviour if the data is not in a shared
        variable) would lead to a large decrease in performance.
        """
        #import copy
        data_x, data_y = data_xy
        #daya_y = copy.deepcopy(data_x)
        data_y_new = numpy.zeros((data_y.shape[0], data_y.max() + 1))
        for i in range(data_y.shape[0]):
            data_y_new[i, data_y[i]] = 1
        data_y = data_y_new
        shared_x = theano.shared(numpy.asarray(data_x,
                                               dtype=theano.config.floatX),
                                 borrow=borrow)
        shared_y = theano.shared(numpy.asarray(data_y,
                                               dtype=theano.config.floatX),
                                 borrow=borrow)
        # When storing data on the GPU it has to be stored as floats
        # therefore we will store the labels as ``floatX`` as well
        # (``shared_y`` does exactly that). But during our computations
        # we need them as ints (we use labels as index, and if they are
        # floats it doesn't make sense) therefore instead of returning
        # ``shared_y`` we will have to cast it to int. This little hack
        # lets ous get around this issue
        return shared_x, shared_y

    test_set_x, test_set_y = shared_dataset(test_set)
    valid_set_x, valid_set_y = shared_dataset(valid_set)
    train_set_x, train_set_y = shared_dataset(train_set)

    rval = [(train_set_x, train_set_y), (valid_set_x, valid_set_y),
            (test_set_x, test_set_y)]
    return rval
Пример #3
0
def test_dcca_old(learning_rate=0.01,
                  L1_reg=0.0001,
                  L2_reg=0.0001,
                  n_epochs=1000,
                  dataset='mnist.pkl.gz',
                  batch_size=20,
                  n_hidden=500):
    """
    Demonstrate stochastic gradient descent optimization for a multilayer
    perceptron

    This is demonstrated on MNIST.

    :type learning_rate: float
    :param learning_rate: learning rate used (factor for the stochastic
    gradient

    :type L1_reg: float
    :param L1_reg: L1-norm's weight when added to the cost (see
    regularization)

    :type L2_reg: float
    :param L2_reg: L2-norm's weight when added to the cost (see
    regularization)

    :type n_epochs: int
    :param n_epochs: maximal number of epochs to run the optimizer

    :type dataset: string
    :param dataset: the path of the MNIST dataset file from
                 http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz


   """
    datasets = load_data(dataset)

    train_set_x, train_set_y = datasets[0]
    valid_set_x, valid_set_y = datasets[1]
    test_set_x, test_set_y = datasets[2]

    # compute number of minibatches for training, validation and testing
    n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
    n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
    n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size

    ######################
    # BUILD ACTUAL MODEL #
    ######################
    print '... building the model'

    # allocate symbolic variables for the data
    index = T.lscalar()  # index to a [mini]batch
    x = T.matrix('x')  # the data is presented as rasterized images
    y = T.matrix('y')  # the labels are presented as 1D vector of
    # [int] labels

    rng = numpy.random.RandomState(1234)

    # construct the MLP class
    if 0:
        net1 = MLP(rng=rng, input=x, n_in=28 * 28, n_hidden=300, n_out=50)
        net2 = MLP(rng=rng, input=y, n_in=10, n_hidden=20, n_out=5)

    net = DCCA(rng=rng,
               x1=x,
               x2=y,
               n_in1=28 * 28,
               n_hidden1=300,
               n_out1=50,
               n_in2=10,
               n_hidden2=20,
               n_out2=5)

    # start-snippet-4
    # the cost we minimize during training is the negative log likelihood of
    # the model plus the regularization terms (L1 and L2); cost is expressed
    # here symbolically
    cost1 = (net.correlation(y) + L1_reg * net.L11 + L2_reg * net.L2_sqr1)
    cost2 = (net.correlation(y) + L1_reg * net.L12 + L2_reg * net.L2_sqr2)
    # end-snippet-4

    # compiling a Theano function that computes the mistakes that are made
    # by the model on a minibatch
    """
    test_model = theano.function(
        inputs=[index],
        outputs=net1.errors(y),
        givens={
            x: test_set_x[index * batch_size:(index + 1) * batch_size],
            y: test_set_y[index * batch_size:(index + 1) * batch_size]
        }
    )

    validate_model = theano.function(
        inputs=[index],
        outputs=classifier.errors(y),
        givens={
            x: valid_set_x[index * batch_size:(index + 1) * batch_size],
            y: valid_set_y[index * batch_size:(index + 1) * batch_size]
        }
    )
    """

    # start-snippet-5
    # compute the gradient of cost with respect to theta (sotred in params)
    # the resulting gradients will be stored in a list gparams
    gparams1 = [T.grad(cost1, param) for param in net.params1]
    gparams2 = [T.grad(cost2, param) for param in net.params2]

    # specify how to update the parameters of the model as a list of
    # (variable, update expression) pairs

    # given two list the zip A = [a1, a2, a3, a4] and B = [b1, b2, b3, b4] of
    # same length, zip generates a list C of same size, where each element
    # is a pair formed from the two lists :
    #    C = [(a1, b1), (a2, b2), (a3, b3), (a4, b4)]
    updates1 = [(param, param - learning_rate * gparam)
                for param, gparam in zip(net.params1, gparams1)]
    updates2 = [(param, param - learning_rate * gparam)
                for param, gparam in zip(net.params2, gparams2)]

    # compiling a Theano function `train_model` that returns the cost, but
    # in the same time updates the parameter of the model based on the rules
    # defined in `updates`
    train_model1 = theano.function(
        inputs=[index],
        outputs=cost1,
        updates=updates1,
        givens={
            x: train_set_x[index * batch_size:(index + 1) * batch_size],
            y: train_set_y[index * batch_size:(index + 1) * batch_size]
        })
    train_model2 = theano.function(
        inputs=[index],
        outputs=cost2,
        updates=updates2,
        givens={
            x: train_set_x[index * batch_size:(index + 1) * batch_size],
            y: train_set_y[index * batch_size:(index + 1) * batch_size]
        })
    # end-snippet-5

    ###############
    # TRAIN MODEL #
    ###############
    print '... training'

    # early-stopping parameters
    patience = 10000  # look as this many examples regardless
    patience_increase = 2  # wait this much longer when a new best is
    # found
    improvement_threshold = 0.995  # a relative improvement of this much is
    # considered significant
    validation_frequency = min(n_train_batches, patience / 2)
    # go through this many
    # minibatche before checking the network
    # on the validation set; in this case we
    # check every epoch

    best_validation_loss = numpy.inf
    best_iter = 0
    test_score = 0.
    start_time = time.clock()

    epoch = 0
    done_looping = False

    while (epoch < n_epochs) and (not done_looping):
        epoch = epoch + 1
        for minibatch_index in xrange(n_train_batches):

            minibatch_avg_cost = train_model(minibatch_index)
            # iteration number
            iter = (epoch - 1) * n_train_batches + minibatch_index

            if (iter + 1) % validation_frequency == 0:
                # compute zero-one loss on validation set
                validation_losses = [
                    validate_model(i) for i in xrange(n_valid_batches)
                ]
                this_validation_loss = numpy.mean(validation_losses)

                print('epoch %i, minibatch %i/%i, validation error %f %%' %
                      (epoch, minibatch_index + 1, n_train_batches,
                       this_validation_loss * 100.))

                # if we got the best validation score until now
                if this_validation_loss < best_validation_loss:
                    #improve patience if loss improvement is good enough
                    if (this_validation_loss <
                            best_validation_loss * improvement_threshold):
                        patience = max(patience, iter * patience_increase)

                    best_validation_loss = this_validation_loss
                    best_iter = iter

                    # test it on the test set
                    test_losses = [
                        test_model(i) for i in xrange(n_test_batches)
                    ]
                    test_score = numpy.mean(test_losses)

                    print(('     epoch %i, minibatch %i/%i, test error of '
                           'best model %f %%') %
                          (epoch, minibatch_index + 1, n_train_batches,
                           test_score * 100.))

            if patience <= iter:
                done_looping = True
                break

    end_time = time.clock()
    print(('Optimization complete. Best validation score of %f %% '
           'obtained at iteration %i, with test performance %f %%') %
          (best_validation_loss * 100., best_iter + 1, test_score * 100.))
    print >> sys.stderr, ('The code for file ' + os.path.split(__file__)[1] +
                          ' ran for %.2fm' % ((end_time - start_time) / 60.))
    ''' Loads the dataset

    :type dataset: string
    :param dataset: the path to the dataset (here MNIST)
    '''

    #############
    # LOAD DATA #
    #############

    # Download the MNIST dataset if it is not present
    data_dir, data_file = os.path.split(dataset)
    if data_dir == "" and not os.path.isfile(dataset):
        # Check if dataset is in the data directory.
        new_path = os.path.join(
            os.path.split(__file__)[0], "..", "data", dataset)
        if os.path.isfile(new_path) or data_file == 'mnist.pkl.gz':
            dataset = new_path

    if (not os.path.isfile(dataset)) and data_file == 'mnist.pkl.gz':
        import urllib
        origin = (
            'http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz')
        print 'Downloading data from %s' % origin
        urllib.urlretrieve(origin, dataset)

    print '... loading data'

    # Load the dataset
    f = gzip.open(dataset, 'rb')
    train_set, valid_set, test_set = cPickle.load(f)
    f.close()

    #train_set, valid_set, test_set format: tuple(input, target)
    #input is an numpy.ndarray of 2 dimensions (a matrix)
    #witch row's correspond to an example. target is a
    #numpy.ndarray of 1 dimensions (vector)) that have the same length as
    #the number of rows in the input. It should give the target
    #target to the example with the same index in the input.

    def shared_dataset(data_xy, borrow=True):
        """ Function that loads the dataset into shared variables

        The reason we store our dataset in shared variables is to allow
        Theano to copy it into the GPU memory (when code is run on GPU).
        Since copying data into the GPU is slow, copying a minibatch everytime
        is needed (the default behaviour if the data is not in a shared
        variable) would lead to a large decrease in performance.
        """
        #import copy
        data_x, data_y = data_xy
        #daya_y = copy.deepcopy(data_x)
        data_y_new = numpy.zeros((data_y.shape[0], data_y.max() + 1))
        for i in range(data_y.shape[0]):
            data_y_new[i, data_y[i]] = 1
        data_y = data_y_new
        shared_x = theano.shared(numpy.asarray(data_x,
                                               dtype=theano.config.floatX),
                                 borrow=borrow)
        shared_y = theano.shared(numpy.asarray(data_y,
                                               dtype=theano.config.floatX),
                                 borrow=borrow)
        # When storing data on the GPU it has to be stored as floats
        # therefore we will store the labels as ``floatX`` as well
        # (``shared_y`` does exactly that). But during our computations
        # we need them as ints (we use labels as index, and if they are
        # floats it doesn't make sense) therefore instead of returning
        # ``shared_y`` we will have to cast it to int. This little hack
        # lets ous get around this issue
        return shared_x, shared_y

    test_set_x, test_set_y = shared_dataset(test_set)
    valid_set_x, valid_set_y = shared_dataset(valid_set)
    train_set_x, train_set_y = shared_dataset(train_set)

    rval = [(train_set_x, train_set_y), (valid_set_x, valid_set_y),
            (test_set_x, test_set_y)]
    return rval
Пример #4
0
def test_dcca(
    learning_rate=0.01, L1_reg=0.0001, L2_reg=0.0001, n_epochs=1000, dataset="mnist.pkl.gz", batch_size=20, n_hidden=500
):
    """
    Demonstrate stochastic gradient descent optimization for a multilayer
    perceptron

    This is demonstrated on MNIST.

    :type learning_rate: float
    :param learning_rate: learning rate used (factor for the stochastic
    gradient

    :type L1_reg: float
    :param L1_reg: L1-norm's weight when added to the cost (see
    regularization)

    :type L2_reg: float
    :param L2_reg: L2-norm's weight when added to the cost (see
    regularization)

    :type n_epochs: int
    :param n_epochs: maximal number of epochs to run the optimizer

    :type dataset: string
    :param dataset: the path of the MNIST dataset file from
                 http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz


   """
    datasets = load_data(dataset)

    train_set_x, train_set_y = datasets[0]
    valid_set_x, valid_set_y = datasets[1]
    test_set_x, test_set_y = datasets[2]

    # compute number of minibatches for training, validation and testing
    n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
    n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
    n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size

    ######################
    # BUILD ACTUAL MODEL #
    ######################
    print "... building the model"

    # allocate symbolic variables for the data
    index = T.lscalar()  # index to a [mini]batch
    x1 = T.matrix("x1")  # the data is presented as rasterized images
    x2 = T.matrix("x2")  # the labels are presented as 1D vector of
    # [int] labels
    h1 = T.matrix("h1")  # the data is presented as rasterized images
    h2 = T.matrix("h2")  # the labels are presented as 1D vector of

    rng = numpy.random.RandomState(1234)

    # construct the MLP class
    net1 = DCCA(rng=rng, input=x1, n_in=28 * 28, n_hidden=300, n_out=8)
    net2 = DCCA(rng=rng, input=x2, n_in=10, n_hidden=20, n_out=8)
    if 1:
        cost1 = net1.correlation(h1, h2) + L1_reg * net1.L1 + L2_reg * net1.L2_sqr
        cost2 = net2.correlation(h1, h2) + L1_reg * net2.L1 + L2_reg * net2.L2_sqr

    """
    test_model = theano.function(
        inputs=[index],
        outputs=net1.errors(y),
        givens={
            x: test_set_x[index * batch_size:(index + 1) * batch_size],
            y: test_set_y[index * batch_size:(index + 1) * batch_size]
        }
    )

    validate_model = theano.function(
        inputs=[index],
        outputs=classifier.errors(y),
        givens={
            x: valid_set_x[index * batch_size:(index + 1) * batch_size],
            y: valid_set_y[index * batch_size:(index + 1) * batch_size]
        }
    )
    """
    fprop_model1 = theano.function(inputs=[], outputs=(net1.hiddenLayer.output, net1.output), givens={x1: test_set_x})
    fprop_model2 = theano.function(inputs=[], outputs=(net2.hiddenLayer.output, net2.output), givens={x2: test_set_y})

    if 1:  # grad compute for net1 in theano
        U, V, D = theano.tensor.nlinalg.svd(net1.lastLayer.Tval)
        UVT = T.dot(U, V.T)
        Delta12 = T.dot(net1.lastLayer.SigmaHat11 ** (-0.5), T.dot(UVT, net1.lastLayer.SigmaHat22 ** (-0.5)))
        UDUT = T.dot(U, T.dot(D, U.T))
        Delta11 = (-0.5) * T.dot(net1.lastLayer.SigmaHat11 ** (-0.5), T.dot(UDUT, net1.lastLayer.SigmaHat22 ** (-0.5)))
        grad_E_to_o = (1.0 / 8) * (2 * Delta11 * net1.lastLayer.H1bar + Delta12 * net1.lastLayer.H2bar)
        gparam1_W = (grad_E_to_o) * (net1.lastLayer.output * (1 - net1.lastLayer.output)) * (net1.hiddenLayer.output)
        gparam1_b = (
            (grad_E_to_o)
            * (net1.lastLayer.output * (1 - net1.lastLayer.output))
            * theano.shared(numpy.array([1.0], dtype=theano.config.floatX), borrow=True)
        )
        # gparams1 = [T.grad(cost1, param) for param in net1.params]
        gparams1 = [T.grad(cost1, param) for param in net1.hiddenLayer.params]
        gparams1.append(gparam1_W)
        # gparams1.append(gparam1_b)
    if 1:  # grad compute for net2
        U, V, D = theano.tensor.nlinalg.svd(net2.lastLayer.Tval)
        UVT = T.dot(U, V.T)
        Delta12 = T.dot(net2.lastLayer.SigmaHat11 ** (-0.5), T.dot(UVT, net2.lastLayer.SigmaHat22 ** (-0.5)))
        UDUT = T.dot(U, T.dot(D, U.T))
        Delta11 = (-0.5) * T.dot(net2.lastLayer.SigmaHat11 ** (-0.5), T.dot(UVT, net2.lastLayer.SigmaHat22 ** (-0.5)))
        grad_E_to_o = (1.0 / 8) * (2 * Delta11 * net2.lastLayer.H1bar + Delta12 * net2.lastLayer.H2bar)
        gparam2_W = (grad_E_to_o) * (net2.lastLayer.output * (1 - net2.lastLayer.output)) * (net2.hiddenLayer.output)
        gparam2_b = (grad_E_to_o) * (net2.lastLayer.output * (1 - net2.lastLayer.output)) * 1
        # gparams1 = [T.grad(cost1, param) for param in net1.params]
        gparams2 = [T.grad(cost2, param) for param in net2.hiddenLayer.params]
        gparams2.append(gparam2_W)
        gparams2.append(gparam2_b)

        # gparams2 = [T.grad(cost2, param) for param in net2.params]

    updates1 = [(param, param - learning_rate * gparam) for param, gparam in zip(net1.params, gparams1)]
    updates2 = [(param, param - learning_rate * gparam) for param, gparam in zip(net2.params, gparams2)]

    ###############
    # TRAIN MODEL #
    ###############
    print "... training"

    # early-stopping parameters
    patience = 10000  # look as this many examples regardless
    patience_increase = 2  # wait this much longer when a new best is
    # found
    improvement_threshold = 0.995  # a relative improvement of this much is
    # considered significant
    validation_frequency = min(n_train_batches, patience / 2)
    # go through this many
    # minibatche before checking the network
    # on the validation set; in this case we
    # check every epoch

    best_validation_loss = numpy.inf
    best_iter = 0
    test_score = 0.0
    start_time = time.clock()

    epoch = 0
    done_looping = False

    while (epoch < n_epochs) and (not done_looping):
        epoch = epoch + 1
        print "epoch", epoch
        # net1.fprop(test_set_x)
        # net2.fprop(test_set_y)
        h1hidden, h1tmpval = fprop_model1()
        h2hidden, h2tmpval = fprop_model2()
        h1hidden = h1hidden.T
        h2hidden = h2hidden.T
        h1tmpval = h1tmpval.T
        h2tmpval = h2tmpval.T
        if 1:  # compute cost(H1, H2)

            H1 = h1tmpval
            H2 = h2tmpval
            m = H1.shape[1]

            H1bar = H1 - (1.0 / m) * numpy.dot(H1, numpy.ones((m, m), dtype=numpy.float32))
            H2bar = H2 - (1.0 / m) * numpy.dot(H2, numpy.ones((m, m), dtype=numpy.float32))
            SigmaHat12 = (1.0 / (m - 1)) * numpy.dot(H1bar, H2bar.T)
            SigmaHat11 = (1.0 / (m - 1)) * numpy.dot(H1bar, H1bar.T)
            SigmaHat11 = SigmaHat11 + 0.0001 * numpy.identity(SigmaHat11.shape[0], dtype=numpy.float32)
            SigmaHat22 = (1.0 / (m - 1)) * numpy.dot(H2bar, H2bar.T)
            SigmaHat22 = SigmaHat22 + 0.0001 * numpy.identity(SigmaHat22.shape[0], dtype=numpy.float32)

            Tval = numpy.dot(mat_pow(SigmaHat11), numpy.dot(SigmaHat12, mat_pow(SigmaHat22)))

            corr = numpy.trace(numpy.dot(Tval.T, Tval)) ** (0.5)
        if 1:  # compute gradient dcost(H1,H2)/dH1

            U, D, V, = numpy.linalg.svd(Tval)
            UVT = numpy.dot(U, V.T)
            Delta12 = numpy.dot(mat_pow(SigmaHat11), numpy.dot(UVT, mat_pow(SigmaHat22)))
            UDUT = numpy.dot(U, numpy.dot(D, U.T))
            Delta11 = (-0.5) * numpy.dot(mat_pow(SigmaHat11), numpy.dot(UDUT, mat_pow(SigmaHat22)))
            grad_E_to_o = (1.0 / m) * (2 * numpy.dot(Delta11, H1bar) + numpy.dot(Delta12, H2bar))
            ##gparam1_W = (grad_E_to_o) * (h1tmpval*(1-h1tmpval)) * (h1hidden)
            gparam1_W = numpy.dot((h1hidden), ((grad_E_to_o) * (h1tmpval * (1 - h1tmpval))).T)
            ##gparam1_b = (grad_E_to_o) * (h1tmpval*(1-h1tmpval)) * theano.shared(numpy.array([1.0],dtype=theano.config.floatX), borrow=True)
            gparam1_b = numpy.dot(
                numpy.ones((1, 10000), dtype=theano.config.floatX), ((grad_E_to_o) * (h1tmpval * (1 - h1tmpval))).T
            )
            gparam1_W = theano.shared(gparam1_W, borrow=True)
            gparam1_b = theano.shared(gparam1_b[0, :], borrow=True)

            # gparams1 = [T.grad(cost1, param) for param in net1.params]
            gparams1 = [T.grad(cost1, param) for param in net1.hiddenLayer.params]
            gparams1.append(gparam1_W)
            updates1 = [(param, param - learning_rate * gparam) for param, gparam in zip(net1.params, gparams1)]
            # gparams1.append(gparam1_b)
        if 1:  # compute gradient dcost(H1,H2)/dH2
            Tval2 = numpy.dot(mat_pow(SigmaHat22), numpy.dot(SigmaHat12.T, mat_pow(SigmaHat11)))
            U, D, V, = numpy.linalg.svd(Tval2)
            UVT = numpy.dot(U, V.T)
            Delta12 = numpy.dot(mat_pow(SigmaHat22), numpy.dot(UVT, mat_pow(SigmaHat11)))
            UDUT = numpy.dot(U, numpy.dot(D, U.T))
            Delta11 = (-0.5) * numpy.dot(mat_pow(SigmaHat22), numpy.dot(UDUT, mat_pow(SigmaHat11)))
            grad_E_to_o = (1.0 / m) * (2 * numpy.dot(Delta11, H2bar) + numpy.dot(Delta12, H1bar))
            ##gparam1_W = (grad_E_to_o) * (h1tmpval*(1-h1tmpval)) * (h1hidden)
            gparam2_W = numpy.dot((h2hidden), ((grad_E_to_o) * (h2tmpval * (1 - h2tmpval))).T)
            ##gparam1_b = (grad_E_to_o) * (h1tmpval*(1-h1tmpval)) * theano.shared(numpy.array([1.0],dtype=theano.config.floatX), borrow=True)
            gparam2_b = numpy.dot(
                numpy.ones((1, 10000), dtype=theano.config.floatX), ((grad_E_to_o) * (h2tmpval * (1 - h2tmpval))).T
            )
            gparam2_W = theano.shared(gparam2_W, borrow=True)
            gparam2_b = theano.shared(gparam2_b[0, :], borrow=True)

            # gparams1 = [T.grad(cost1, param) for param in net1.params]
            gparams2 = [T.grad(cost2, param) for param in net2.hiddenLayer.params]
            gparams2.append(gparam2_W)
            updates2 = [(param, param - learning_rate * gparam) for param, gparam in zip(net2.params, gparams2)]
            # gparams1.append(gparam1_b)

        # X_theano = theano.shared(value=X, name='inputs')
        # h1tmp = theano.shared( value=h1tmpval, name='hidden1_rep', dtype=theano.config.floatX , borrow=True)
        h1tmp = theano.shared(numpy.asarray(H1bar, dtype=theano.config.floatX), borrow=True)
        # h2tmp = theano.shared( value=h2tmpval, name='hidden2_rep', dtype=theano.config.floatX , borrow=True)
        h2tmp = theano.shared(numpy.asarray(H2bar, dtype=theano.config.floatX), borrow=True)
        # h1tmp = T.shared( value=net1.output.eval(), name='hidden1_rep' )
        # h2tmp = T.shared( net2.output.eval() )

        train_model1 = theano.function(
            inputs=[],
            # outputs=cost1,
            updates=updates1,
            givens={
                # x1: test_set_x,
                h1: h1tmp,
                h2: h2tmp,
            },
        )
        train_model2 = theano.function(
            inputs=[],
            # outputs=cost2,
            updates=updates2,
            givens={
                # x2: test_set_y,
                h1: h1tmp,
                h2: h2tmp,
            },
        )

        minibatch_avg_cost1 = train_model1()
        minibatch_avg_cost2 = train_model2()
        # print 'corr1', minibatch_avg_cost1
        # print 'corr2', minibatch_avg_cost2
        print "corr", corr
        if epoch > 10:
            break

    end_time = time.clock()
    print (
        (
            "Optimization complete. Best validation score of %f %% "
            "obtained at iteration %i, with test performance %f %%"
        )
        % (best_validation_loss * 100.0, best_iter + 1, test_score * 100.0)
    )
    print >> sys.stderr, (
        "The code for file " + os.path.split(__file__)[1] + " ran for %.2fm" % ((end_time - start_time) / 60.0)
    )

    """ Loads the dataset

    :type dataset: string
    :param dataset: the path to the dataset (here MNIST)
    """

    #############
    # LOAD DATA #
    #############

    # Download the MNIST dataset if it is not present
    data_dir, data_file = os.path.split(dataset)
    if data_dir == "" and not os.path.isfile(dataset):
        # Check if dataset is in the data directory.
        new_path = os.path.join(os.path.split(__file__)[0], "..", "data", dataset)
        if os.path.isfile(new_path) or data_file == "mnist.pkl.gz":
            dataset = new_path

    if (not os.path.isfile(dataset)) and data_file == "mnist.pkl.gz":
        import urllib

        origin = "http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz"
        print "Downloading data from %s" % origin
        urllib.urlretrieve(origin, dataset)

    print "... loading data"

    # Load the dataset
    f = gzip.open(dataset, "rb")
    train_set, valid_set, test_set = cPickle.load(f)
    f.close()
    # train_set, valid_set, test_set format: tuple(input, target)
    # input is an numpy.ndarray of 2 dimensions (a matrix)
    # witch row's correspond to an example. target is a
    # numpy.ndarray of 1 dimensions (vector)) that have the same length as
    # the number of rows in the input. It should give the target
    # target to the example with the same index in the input.

    def shared_dataset(data_xy, borrow=True):
        """ Function that loads the dataset into shared variables

        The reason we store our dataset in shared variables is to allow
        Theano to copy it into the GPU memory (when code is run on GPU).
        Since copying data into the GPU is slow, copying a minibatch everytime
        is needed (the default behaviour if the data is not in a shared
        variable) would lead to a large decrease in performance.
        """
        # import copy
        data_x, data_y = data_xy
        # daya_y = copy.deepcopy(data_x)
        data_y_new = numpy.zeros((data_y.shape[0], data_y.max() + 1))
        for i in range(data_y.shape[0]):
            data_y_new[i, data_y[i]] = 1
        data_y = data_y_new
        shared_x = theano.shared(numpy.asarray(data_x, dtype=theano.config.floatX), borrow=borrow)
        shared_y = theano.shared(numpy.asarray(data_y, dtype=theano.config.floatX), borrow=borrow)
        # When storing data on the GPU it has to be stored as floats
        # therefore we will store the labels as ``floatX`` as well
        # (``shared_y`` does exactly that). But during our computations
        # we need them as ints (we use labels as index, and if they are
        # floats it doesn't make sense) therefore instead of returning
        # ``shared_y`` we will have to cast it to int. This little hack
        # lets ous get around this issue
        return shared_x, shared_y

    test_set_x, test_set_y = shared_dataset(test_set)
    valid_set_x, valid_set_y = shared_dataset(valid_set)
    train_set_x, train_set_y = shared_dataset(train_set)

    rval = [(train_set_x, train_set_y), (valid_set_x, valid_set_y), (test_set_x, test_set_y)]
    return rval
Пример #5
0
def test_dcca_old(
    learning_rate=0.01, L1_reg=0.0001, L2_reg=0.0001, n_epochs=1000, dataset="mnist.pkl.gz", batch_size=20, n_hidden=500
):
    """
    Demonstrate stochastic gradient descent optimization for a multilayer
    perceptron

    This is demonstrated on MNIST.

    :type learning_rate: float
    :param learning_rate: learning rate used (factor for the stochastic
    gradient

    :type L1_reg: float
    :param L1_reg: L1-norm's weight when added to the cost (see
    regularization)

    :type L2_reg: float
    :param L2_reg: L2-norm's weight when added to the cost (see
    regularization)

    :type n_epochs: int
    :param n_epochs: maximal number of epochs to run the optimizer

    :type dataset: string
    :param dataset: the path of the MNIST dataset file from
                 http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz


   """
    datasets = load_data(dataset)

    train_set_x, train_set_y = datasets[0]
    valid_set_x, valid_set_y = datasets[1]
    test_set_x, test_set_y = datasets[2]

    # compute number of minibatches for training, validation and testing
    n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
    n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
    n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size

    ######################
    # BUILD ACTUAL MODEL #
    ######################
    print "... building the model"

    # allocate symbolic variables for the data
    index = T.lscalar()  # index to a [mini]batch
    x = T.matrix("x")  # the data is presented as rasterized images
    y = T.matrix("y")  # the labels are presented as 1D vector of
    # [int] labels

    rng = numpy.random.RandomState(1234)

    # construct the MLP class
    if 0:
        net1 = MLP(rng=rng, input=x, n_in=28 * 28, n_hidden=300, n_out=50)
        net2 = MLP(rng=rng, input=y, n_in=10, n_hidden=20, n_out=5)

    net = DCCA(rng=rng, x1=x, x2=y, n_in1=28 * 28, n_hidden1=300, n_out1=50, n_in2=10, n_hidden2=20, n_out2=5)

    # start-snippet-4
    # the cost we minimize during training is the negative log likelihood of
    # the model plus the regularization terms (L1 and L2); cost is expressed
    # here symbolically
    cost1 = net.correlation(y) + L1_reg * net.L11 + L2_reg * net.L2_sqr1
    cost2 = net.correlation(y) + L1_reg * net.L12 + L2_reg * net.L2_sqr2
    # end-snippet-4

    # compiling a Theano function that computes the mistakes that are made
    # by the model on a minibatch
    """
    test_model = theano.function(
        inputs=[index],
        outputs=net1.errors(y),
        givens={
            x: test_set_x[index * batch_size:(index + 1) * batch_size],
            y: test_set_y[index * batch_size:(index + 1) * batch_size]
        }
    )

    validate_model = theano.function(
        inputs=[index],
        outputs=classifier.errors(y),
        givens={
            x: valid_set_x[index * batch_size:(index + 1) * batch_size],
            y: valid_set_y[index * batch_size:(index + 1) * batch_size]
        }
    )
    """

    # start-snippet-5
    # compute the gradient of cost with respect to theta (sotred in params)
    # the resulting gradients will be stored in a list gparams
    gparams1 = [T.grad(cost1, param) for param in net.params1]
    gparams2 = [T.grad(cost2, param) for param in net.params2]

    # specify how to update the parameters of the model as a list of
    # (variable, update expression) pairs

    # given two list the zip A = [a1, a2, a3, a4] and B = [b1, b2, b3, b4] of
    # same length, zip generates a list C of same size, where each element
    # is a pair formed from the two lists :
    #    C = [(a1, b1), (a2, b2), (a3, b3), (a4, b4)]
    updates1 = [(param, param - learning_rate * gparam) for param, gparam in zip(net.params1, gparams1)]
    updates2 = [(param, param - learning_rate * gparam) for param, gparam in zip(net.params2, gparams2)]

    # compiling a Theano function `train_model` that returns the cost, but
    # in the same time updates the parameter of the model based on the rules
    # defined in `updates`
    train_model1 = theano.function(
        inputs=[index],
        outputs=cost1,
        updates=updates1,
        givens={
            x: train_set_x[index * batch_size : (index + 1) * batch_size],
            y: train_set_y[index * batch_size : (index + 1) * batch_size],
        },
    )
    train_model2 = theano.function(
        inputs=[index],
        outputs=cost2,
        updates=updates2,
        givens={
            x: train_set_x[index * batch_size : (index + 1) * batch_size],
            y: train_set_y[index * batch_size : (index + 1) * batch_size],
        },
    )
    # end-snippet-5

    ###############
    # TRAIN MODEL #
    ###############
    print "... training"

    # early-stopping parameters
    patience = 10000  # look as this many examples regardless
    patience_increase = 2  # wait this much longer when a new best is
    # found
    improvement_threshold = 0.995  # a relative improvement of this much is
    # considered significant
    validation_frequency = min(n_train_batches, patience / 2)
    # go through this many
    # minibatche before checking the network
    # on the validation set; in this case we
    # check every epoch

    best_validation_loss = numpy.inf
    best_iter = 0
    test_score = 0.0
    start_time = time.clock()

    epoch = 0
    done_looping = False

    while (epoch < n_epochs) and (not done_looping):
        epoch = epoch + 1
        for minibatch_index in xrange(n_train_batches):

            minibatch_avg_cost = train_model(minibatch_index)
            # iteration number
            iter = (epoch - 1) * n_train_batches + minibatch_index

            if (iter + 1) % validation_frequency == 0:
                # compute zero-one loss on validation set
                validation_losses = [validate_model(i) for i in xrange(n_valid_batches)]
                this_validation_loss = numpy.mean(validation_losses)

                print (
                    "epoch %i, minibatch %i/%i, validation error %f %%"
                    % (epoch, minibatch_index + 1, n_train_batches, this_validation_loss * 100.0)
                )

                # if we got the best validation score until now
                if this_validation_loss < best_validation_loss:
                    # improve patience if loss improvement is good enough
                    if this_validation_loss < best_validation_loss * improvement_threshold:
                        patience = max(patience, iter * patience_increase)

                    best_validation_loss = this_validation_loss
                    best_iter = iter

                    # test it on the test set
                    test_losses = [test_model(i) for i in xrange(n_test_batches)]
                    test_score = numpy.mean(test_losses)

                    print (
                        ("     epoch %i, minibatch %i/%i, test error of " "best model %f %%")
                        % (epoch, minibatch_index + 1, n_train_batches, test_score * 100.0)
                    )

            if patience <= iter:
                done_looping = True
                break

    end_time = time.clock()
    print (
        (
            "Optimization complete. Best validation score of %f %% "
            "obtained at iteration %i, with test performance %f %%"
        )
        % (best_validation_loss * 100.0, best_iter + 1, test_score * 100.0)
    )
    print >> sys.stderr, (
        "The code for file " + os.path.split(__file__)[1] + " ran for %.2fm" % ((end_time - start_time) / 60.0)
    )

    """ Loads the dataset

    :type dataset: string
    :param dataset: the path to the dataset (here MNIST)
    """

    #############
    # LOAD DATA #
    #############

    # Download the MNIST dataset if it is not present
    data_dir, data_file = os.path.split(dataset)
    if data_dir == "" and not os.path.isfile(dataset):
        # Check if dataset is in the data directory.
        new_path = os.path.join(os.path.split(__file__)[0], "..", "data", dataset)
        if os.path.isfile(new_path) or data_file == "mnist.pkl.gz":
            dataset = new_path

    if (not os.path.isfile(dataset)) and data_file == "mnist.pkl.gz":
        import urllib

        origin = "http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz"
        print "Downloading data from %s" % origin
        urllib.urlretrieve(origin, dataset)

    print "... loading data"

    # Load the dataset
    f = gzip.open(dataset, "rb")
    train_set, valid_set, test_set = cPickle.load(f)
    f.close()
    # train_set, valid_set, test_set format: tuple(input, target)
    # input is an numpy.ndarray of 2 dimensions (a matrix)
    # witch row's correspond to an example. target is a
    # numpy.ndarray of 1 dimensions (vector)) that have the same length as
    # the number of rows in the input. It should give the target
    # target to the example with the same index in the input.

    def shared_dataset(data_xy, borrow=True):
        """ Function that loads the dataset into shared variables

        The reason we store our dataset in shared variables is to allow
        Theano to copy it into the GPU memory (when code is run on GPU).
        Since copying data into the GPU is slow, copying a minibatch everytime
        is needed (the default behaviour if the data is not in a shared
        variable) would lead to a large decrease in performance.
        """
        # import copy
        data_x, data_y = data_xy
        # daya_y = copy.deepcopy(data_x)
        data_y_new = numpy.zeros((data_y.shape[0], data_y.max() + 1))
        for i in range(data_y.shape[0]):
            data_y_new[i, data_y[i]] = 1
        data_y = data_y_new
        shared_x = theano.shared(numpy.asarray(data_x, dtype=theano.config.floatX), borrow=borrow)
        shared_y = theano.shared(numpy.asarray(data_y, dtype=theano.config.floatX), borrow=borrow)
        # When storing data on the GPU it has to be stored as floats
        # therefore we will store the labels as ``floatX`` as well
        # (``shared_y`` does exactly that). But during our computations
        # we need them as ints (we use labels as index, and if they are
        # floats it doesn't make sense) therefore instead of returning
        # ``shared_y`` we will have to cast it to int. This little hack
        # lets ous get around this issue
        return shared_x, shared_y

    test_set_x, test_set_y = shared_dataset(test_set)
    valid_set_x, valid_set_y = shared_dataset(valid_set)
    train_set_x, train_set_y = shared_dataset(train_set)

    rval = [(train_set_x, train_set_y), (valid_set_x, valid_set_y), (test_set_x, test_set_y)]
    return rval
Пример #6
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--batchsize',
                        '-b',
                        type=int,
                        default=64,
                        help='Number of images in each mini-batch')
    parser.add_argument('--unit',
                        '-u',
                        type=int,
                        default=64,
                        help='Number of units')
    parser.add_argument('--best-model', help='path to best model')
    args = parser.parse_args()

    DATA_DIR = '/baobab/kiyomaru/2018-shinjin/jumanpp.midasi'
    PATH_TO_TEST = os.path.join(DATA_DIR, 'test.csv')
    PATH_TO_WE = '/share/data/word2vec/2016.08.02/w2v.midasi.256.100K.bin'

    # load test data
    test_x, test_y = load_data(PATH_TO_TEST)
    word_vectors = KeyedVectors.load_word2vec_format(PATH_TO_WE, binary=True)
    word2index = {}
    for index, word in enumerate(word_vectors.index2word):
        word2index[word] = index

    # convert document to ids
    test_ids = assign_id_to_document(test_x, word2index)

    # convert test_y to numpy.array
    y_true = numpy.array(test_y)

    # define model
    model = MLP(n_vocab=len(word2index),
                n_embed=word_vectors.vector_size,
                n_units=args.unit,
                W=None)
    model = L.Classifier(model)

    # load pre-trained model
    try:
        chainer.serializers.load_npz(args.best_model, model)
    except Exception as e:
        print('error:', str(e))
        sys.exit(1)

    # predict labels for test data
    with chainer.using_config('train', False):
        y_pred = []
        for i in range(0, len(test_ids), args.batchsize):
            x = test_ids[i:i + args.batchsize]
            y = model.predictor(x)
            y_pred.append(F.argmax(y, axis=1).data[:, None])
        y_pred = numpy.vstack(y_pred)

    # calculate macro-f1
    print(f1_score(y_true, y_pred, average='macro'))

    print('\nClass\tPre\tRec\tF1\tSupport')
    for i, (p, r, f, s) in enumerate(
            zip(*precision_recall_fscore_support(y_true, y_pred))):
        print('%d\t%.4f\t%.4f\t%.4f\t%d' % (i, p, r, f, s))

    print('\nConfusion matrix (row: true, column: prediction)')
    for pred in confusion_matrix(y_true, y_pred):
        print('\t'.join([str(p)
                         for p in pred]) + '\t(Support: %d)' % sum(pred))
Пример #7
0
def evaluate_lenet5(learning_rate=0.1, n_epochs=200,
                    dataset='mnist.pkl.gz',
                    nkerns=[20, 50], batch_size=500):
    """ Demonstrates lenet on MNIST dataset
    实验数据集是MNIST数据集。
    :type learning_rate: float
    :param learning_rate: learning rate used (factor for the stochastic
                          gradient)

    :type n_epochs: int
    :param n_epochs: maximal number of epochs to run the optimizer
    n_epochs是最大迭代次数。一次完整迭代包括计算完所有完整数据,即(总数size/batch_size)次
    
    :type dataset: string
    :param dataset: path to the dataset used for training /testing (MNIST here)
    数据集路径
    
    :type nkerns: list of ints
    :param nkerns: number of kernels on each layer
    卷积核数目。第一个下采样层有20个卷积核,第二个下采样有50个卷积核。
    一个卷积核经过卷积计算会生成一张特征图。
    (我认为卷积核就相当于神经元的个数,对应着权值的元素个数)
    """

    # 下面这些和MLP中的都是一样的。
    rng = numpy.random.RandomState(23455)

    datasets = load_data(dataset)

    train_set_x, train_set_y = datasets[0]
    valid_set_x, valid_set_y = datasets[1]
    test_set_x, test_set_y = datasets[2]

    # compute number of minibatches for training, validation and testing
    n_train_batches = train_set_x.get_value(borrow=True).shape[0]
    n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
    n_test_batches = test_set_x.get_value(borrow=True).shape[0]
    n_train_batches /= batch_size
    n_valid_batches /= batch_size
    n_test_batches /= batch_size

    # allocate symbolic variables for the data
    index = T.lscalar()  # index to a [mini]batch

    # start-snippet-1
    x = T.matrix('x')   # the data is presented as rasterized images
    y = T.ivector('y')  # the labels are presented as 1D vector of
                        # [int] labels

    ######################
    # BUILD ACTUAL MODEL #
    ######################
    print '... building the model'

    # Reshape matrix of rasterized images of shape (batch_size, 28 * 28)
    # to a 4D tensor, compatible with our LeNetConvPoolLayer
    # (28, 28) is the size of MNIST images.
    # 构造第0层的输入数据。就是把shape为(batch_size,28*28)数据块转化为四维(batch_size,1,28,28)
    # ()batch_size,28*28)就是有batch_size行,一行对应一个样本,每行有28*28列,是对应样本的具体数据。
    layer0_input = x.reshape((batch_size, 1, 28, 28))

    # Construct the first convolutional pooling layer:
    # filtering reduces the image size to (28-5+1 , 28-5+1) = (24, 24)
    # maxpooling reduces this further to (24/2, 24/2) = (12, 12)
    # 4D output tensor is thus of shape (batch_size, nkerns[0], 12, 12)
    layer0 = LeNetConvPoolLayer(
        rng,
        input=layer0_input,
        image_shape=(batch_size, 1, 28, 28),
        filter_shape=(nkerns[0], 1, 5, 5),
        poolsize=(2, 2)
    )

    # Construct the second convolutional pooling layer
    # filtering reduces the image size to (12-5+1, 12-5+1) = (8, 8)
    # maxpooling reduces this further to (8/2, 8/2) = (4, 4)
    # 4D output tensor is thus of shape (batch_size, nkerns[1], 4, 4)
    layer1 = LeNetConvPoolLayer(
        rng,
        input=layer0.output,
        image_shape=(batch_size, nkerns[0], 12, 12),
        filter_shape=(nkerns[1], nkerns[0], 5, 5),
        poolsize=(2, 2)
    )

    # the HiddenLayer being fully-connected, it operates on 2D matrices of
    # shape (batch_size, num_pixels) (i.e matrix of rasterized images).
    # This will generate a matrix of shape (batch_size, nkerns[1] * 4 * 4),
    # or (500, 50 * 4 * 4) = (500, 800) with the default values.
    layer2_input = layer1.output.flatten(2)

    # construct a fully-connected sigmoidal layer
    layer2 = HiddenLayer(
        rng,
        input=layer2_input,
        n_in=nkerns[1] * 4 * 4,
        n_out=500,
        activation=T.tanh
    )

    # classify the values of the fully-connected sigmoidal layer
    layer3 = LogisticRegression(input=layer2.output, n_in=500, n_out=10)

    # the cost we minimize during training is the NLL of the model
    cost = layer3.negative_log_likelihood(y)

    # create a function to compute the mistakes that are made by the model
    test_model = theano.function(
        [index],
        layer3.errors(y),
        givens={
            x: test_set_x[index * batch_size: (index + 1) * batch_size],
            y: test_set_y[index * batch_size: (index + 1) * batch_size]
        }
    )

    validate_model = theano.function(
        [index],
        layer3.errors(y),
        givens={
            x: valid_set_x[index * batch_size: (index + 1) * batch_size],
            y: valid_set_y[index * batch_size: (index + 1) * batch_size]
        }
    )

    # create a list of all model parameters to be fit by gradient descent
    params = layer3.params + layer2.params + layer1.params + layer0.params

    # create a list of gradients for all model parameters
    grads = T.grad(cost, params)

    # train_model is a function that updates the model parameters by
    # SGD Since this model has many parameters, it would be tedious to
    # manually create an update rule for each model parameter. We thus
    # create the updates list by automatically looping over all
    # (params[i], grads[i]) pairs.
    updates = [
        (param_i, param_i - learning_rate * grad_i)
        for param_i, grad_i in zip(params, grads)
    ]

    train_model = theano.function(
        [index],
        cost,
        updates=updates,
        givens={
            x: train_set_x[index * batch_size: (index + 1) * batch_size],
            y: train_set_y[index * batch_size: (index + 1) * batch_size]
        }
    )
    # end-snippet-1

    ###############
    # TRAIN MODEL #
    ###############
    print '... training'
    # early-stopping parameters
    patience = 10000  # look as this many examples regardless
    patience_increase = 2  # wait this much longer when a new best is
                           # found
    improvement_threshold = 0.995  # a relative improvement of this much is
                                   # considered significant
    validation_frequency = min(n_train_batches, patience / 2)
                                  # go through this many
                                  # minibatche before checking the network
                                  # on the validation set; in this case we
                                  # check every epoch

    best_validation_loss = numpy.inf
    best_iter = 0
    test_score = 0.
    start_time = timeit.default_timer()

    epoch = 0
    done_looping = False

    while (epoch < n_epochs) and (not done_looping):
        epoch = epoch + 1
        for minibatch_index in xrange(n_train_batches):

            iter = (epoch - 1) * n_train_batches + minibatch_index

            if iter % 100 == 0:
                print 'training @ iter = ', iter
            cost_ij = train_model(minibatch_index)

            if (iter + 1) % validation_frequency == 0:

                # compute zero-one loss on validation set
                validation_losses = [validate_model(i) for i
                                     in xrange(n_valid_batches)]
                this_validation_loss = numpy.mean(validation_losses)
                print('epoch %i, minibatch %i/%i, validation error %f %%' %
                      (epoch, minibatch_index + 1, n_train_batches,
                       this_validation_loss * 100.))

                # if we got the best validation score until now
                if this_validation_loss < best_validation_loss:

                    #improve patience if loss improvement is good enough
                    if this_validation_loss < best_validation_loss *  \
                       improvement_threshold:
                        patience = max(patience, iter * patience_increase)

                    # save best validation score and iteration number
                    best_validation_loss = this_validation_loss
                    best_iter = iter

                    # test it on the test set
                    test_losses = [
                        test_model(i)
                        for i in xrange(n_test_batches)
                    ]
                    test_score = numpy.mean(test_losses)
                    print(('     epoch %i, minibatch %i/%i, test error of '
                           'best model %f %%') %
                          (epoch, minibatch_index + 1, n_train_batches,
                           test_score * 100.))

            if patience <= iter:
                done_looping = True
                break

    end_time = timeit.default_timer()
    print('Optimization complete.')
    print('Best validation score of %f %% obtained at iteration %i, '
          'with test performance %f %%' %
          (best_validation_loss * 100., best_iter + 1, test_score * 100.))
    print >> sys.stderr, ('The code for file ' +
                          os.path.split(__file__)[1] +
                          ' ran for %.2fm' % ((end_time - start_time) / 60.))