Beispiel #1
0
def init_tparams(params,
                 given_tparams=None,
                 given_dup_tparams=None,
                 use_mv=False):
    """Initialize Theano shared variables according to the initial parameters"""

    tparams = OrderedDict() if given_tparams is None else given_tparams
    dup_tparams = OrderedDict(
    ) if given_dup_tparams is None else given_dup_tparams

    if not use_mv:
        for kk, pp in params.iteritems():
            tparams[kk] = theano.shared(params[kk], name=kk)
    else:
        try:
            from multiverso.theano_ext import sharedvar
        except ImportError:
            from ..multiverso_.theano_ext import sharedvar

        for kk, pp in params.iteritems():
            if any(kk.startswith(var) for var in dup_shared_var_list):
                tparams[kk] = theano.shared(params[kk], name=kk)
                dup_tparams[kk] = sharedvar.mv_shared(value=np.ones(dup_size) *
                                                      params[kk][0],
                                                      name=kk,
                                                      borrow=False)
            else:
                tparams[kk] = sharedvar.mv_shared(value=params[kk],
                                                  name=kk,
                                                  borrow=False)
    return tparams, dup_tparams
    def __init__(self, input, n_in, n_out):
        """ Initialize the parameters of the logistic regression

        :type input: theano.tensor.TensorType
        :param input: symbolic variable that describes the input of the
                      architecture (one minibatch)

        :type n_in: int
        :param n_in: number of input units, the dimension of the space in
                     which the datapoints lie

        :type n_out: int
        :param n_out: number of output units, the dimension of the space in
                      which the labels lie

        """
        # start-snippet-1
        # initialize with 0 the weights W as a matrix of shape (n_in, n_out)
        # MULTIVERSO: replace the shared variable with mv_shared
        self.W = sharedvar.mv_shared(value=numpy.zeros(
            (n_in, n_out), dtype=theano.config.floatX),
                                     name='W',
                                     borrow=True)
        # initialize the biases b as a vector of n_out 0s
        # MULTIVERSO: replace the shared variable with mv_shared
        self.b = sharedvar.mv_shared(value=numpy.zeros(
            (n_out, ), dtype=theano.config.floatX),
                                     name='b',
                                     borrow=True)

        # symbolic expression for computing the matrix of class-membership
        # probabilities
        # Where:
        # W is a matrix where column-k represent the separation hyperplane for
        # class-k
        # x is a matrix where row-j  represents input training sample-j
        # b is a vector where element-k represent the free parameter of
        # hyperplane-k
        self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)

        # symbolic description of how to compute prediction as class whose
        # probability is maximal
        self.y_pred = T.argmax(self.p_y_given_x, axis=1)
        # end-snippet-1

        # parameters of the model
        self.params = [self.W, self.b]

        # keep track of model input
        self.input = input
Beispiel #3
0
def momentum(cost, params, learning_rate, momentum):
    grads = theano.grad(cost, params)
    updates = []

    for p, g in zip(params, grads):
        # MULTIVERSO: relace the shared variable with mv_shared
        mparam_i = sharedvar.mv_shared(np.zeros(p.get_value().shape, dtype=theano.config.floatX))
        v = momentum * mparam_i - learning_rate * g
        updates.append((mparam_i, v))
        updates.append((p, p + v))

    return updates
Beispiel #4
0
def momentum(cost, params, learning_rate, momentum):
    grads = theano.grad(cost, params)
    updates = []

    for p, g in zip(params, grads):
        # MULTIVERSO: relace the shared variable with mv_shared
        mparam_i = sharedvar.mv_shared(
            np.zeros(p.get_value().shape, dtype=theano.config.floatX))
        v = momentum * mparam_i - learning_rate * g
        updates.append((mparam_i, v))
        updates.append((p, p + v))

    return updates
Beispiel #5
0
    def _test_sharedvar(self, row, col):
        W = sharedvar.mv_shared(value=np.zeros((row, col),
                                               dtype=theano.config.floatX),
                                name='W',
                                borrow=True)
        delta = np.array(range(1, row * col + 1),
                         dtype=theano.config.floatX).reshape((row, col))
        train_model = theano.function([], updates=[(W, W + delta)])
        mv.barrier()

        for i in xrange(100):
            train_model()
            train_model()
            sharedvar.sync_all_mv_shared_vars()
            mv.barrier()
            # to get the newest value, we must sync again
            sharedvar.sync_all_mv_shared_vars()
            for j, actual in enumerate(W.get_value().reshape(-1)):
                self.assertEqual((j + 1) * (i + 1) * 2 * mv.workers_num(),
                                 actual)
            mv.barrier()
    def _test_sharedvar(self, row, col):
        W = sharedvar.mv_shared(
            value=np.zeros(
                (row, col),
                dtype=theano.config.floatX
            ),
            name='W',
            borrow=True
        )
        delta = np.array(range(1, row * col + 1),
                        dtype=theano.config.floatX).reshape((row, col))
        train_model = theano.function([], updates=[(W, W + delta)])
        mv.barrier()

        for i in xrange(100):
            train_model()
            train_model()
            sharedvar.sync_all_mv_shared_vars()
            mv.barrier()
            # to get the newest value, we must sync again
            sharedvar.sync_all_mv_shared_vars()
            for j, actual in enumerate(W.get_value().reshape(-1)):
                self.assertEqual((j + 1) * (i + 1) * 2 * mv.workers_num(), actual)
            mv.barrier()
Beispiel #7
0
def init_weights(shape, name):
    # MULTIVERSO: relace the shared variable with mv_shared
    return sharedvar.mv_shared(floatX(np.random.randn(*shape) * 0.1),
                               name=name)
Beispiel #8
0
def init_weights(shape, name):
    # MULTIVERSO: relace the shared variable with mv_shared
    return sharedvar.mv_shared(floatX(np.random.randn(*shape) * 0.1), name=name)