Example #1
0
    def inv_prop(self, state_above):
        if not isinstance(state_above, tuple):
            expected_space = VectorSpace(self.output_space.get_total_dimension())
            state_above = expected_space.format_as(state_above, self.output_space)

        self.output_space.validate(state_above)
        return tuple(layer.inv_prop(state) for layer,state in safe_zip(self.layers, state_above))
Example #2
0
def test_np_format_as_vector2conv2D():
    vector_space = VectorSpace(dim=8*8*3, sparse=False)
    conv2d_space = Conv2DSpace(shape=(8,8), num_channels=3,
                               axes=('b','c',0,1))
    data = np.arange(5*8*8*3).reshape(5, 8*8*3)
    rval = vector_space.np_format_as(data, conv2d_space)
    assert np.all(rval == data.reshape((5,3,8,8)))
Example #3
0
    def __init__(self, nvis, nhid, hidden_transition_model, irange=0.05,
                 non_linearity='sigmoid', use_ground_truth=True):
        allowed_non_linearities = {'sigmoid': T.nnet.sigmoid,
                                   'tanh': T.tanh}
        self.nvis = nvis
        self.nhid = nhid
        self.hidden_transition_model = hidden_transition_model
        self.use_ground_truth = use_ground_truth
        self.alpha = sharedX(1)
        self.alpha_decrease_rate = 0.999

        assert non_linearity in allowed_non_linearities
        self.non_linearity = allowed_non_linearities[non_linearity]

        # Space initialization
        self.input_space = VectorSpace(dim=self.nvis)
        self.hidden_space = VectorSpace(dim=self.nhid)
        self.output_space = VectorSpace(dim=1)
        self.input_source = 'features'
        self.target_source = 'targets'

        # Features-to-hidden matrix
        W_value = numpy.random.uniform(low=-irange, high=irange,
                                       size=(self.nvis, self.nhid))
        self.W = sharedX(W_value, name='W')
        # Hidden biases
        b_value = numpy.zeros(self.nhid)
        self.b = sharedX(b_value, name='b')
        # Hidden-to-out matrix
        U_value = numpy.random.uniform(low=-irange, high=irange,
                                       size=(self.nhid, 1))
        self.U = sharedX(U_value, name='U')
        # Output bias
        c_value = numpy.zeros(1)
        self.c = sharedX(c_value, name='c')
Example #4
0
def test_vector_to_conv_c01b_invertible():

    """
    Tests that the format_as methods between Conv2DSpace
    and VectorSpace are invertible for the ('c', 0, 1, 'b')
    axis format.
    """

    rng = np.random.RandomState([2013, 5, 1])

    batch_size = 3
    rows = 4
    cols = 5
    channels = 2

    conv = Conv2DSpace([rows, cols], channels = channels, axes = ('c', 0, 1, 'b'))
    vec = VectorSpace(conv.get_total_dimension())

    X = conv.make_batch_theano()
    Y = conv.format_as(X, vec)
    Z = vec.format_as(Y, conv)

    A = vec.make_batch_theano()
    B = vec.format_as(A, conv)
    C = conv.format_as(B, vec)

    f = function([X, A], [Z, C])

    X = rng.randn(*(conv.get_origin_batch(batch_size).shape)).astype(X.dtype)
    A = rng.randn(*(vec.get_origin_batch(batch_size).shape)).astype(A.dtype)

    Z, C = f(X,A)

    np.testing.assert_allclose(Z, X)
    np.testing.assert_allclose(C, A)
Example #5
0
def simulate(inputs, model):
    space = VectorSpace(inputs.shape[1])
    X = space.get_theano_batch()
    Y = model.fprop(space.format_as(X, model.get_input_space()))
    f = theano.function([X], Y)
    result = []
    for x in xrange(0, len(inputs), 100):
      result.extend(f(inputs[x:x + 100]))
    return result
Example #6
0
def test_np_format_as_conv2d_vector_conv2d():
    conv2d_space1 = Conv2DSpace(shape=(8, 8), num_channels=3,
                                axes=('c', 'b', 1, 0))
    vector_space = VectorSpace(dim=8*8*3, sparse=False)
    conv2d_space0 = Conv2DSpace(shape=(8, 8), num_channels=3,
                                axes=('b', 'c', 0, 1))
    data = np.arange(5*8*8*3).reshape(5, 3, 8, 8)

    vecval = conv2d_space0.np_format_as(data, vector_space)
    rval1 = vector_space.np_format_as(vecval, conv2d_space1)
    rval2 = conv2d_space0.np_format_as(data, conv2d_space1)
    assert np.allclose(rval1, rval2)

    nval = data.transpose(1, 0, 3, 2)
    assert np.allclose(nval, rval1)
Example #7
0
def test_np_format_as_vector2conv2D():
    vector_space = VectorSpace(dim=8*8*3, sparse=False)
    conv2d_space = Conv2DSpace(shape=(8, 8), num_channels=3,
                               axes=('b', 'c', 0, 1))
    data = np.arange(5*8*8*3).reshape(5, 8*8*3)
    rval = vector_space.np_format_as(data, conv2d_space)

    # Get data in a Conv2DSpace with default axes
    new_axes = conv2d_space.default_axes
    axis_to_shape = {'b': 5, 'c': 3, 0: 8, 1: 8}
    new_shape = tuple([axis_to_shape[ax] for ax in new_axes])
    nval = data.reshape(new_shape)
    # Then transpose
    nval = nval.transpose(*[new_axes.index(ax) for ax in conv2d_space.axes])
    assert np.all(rval == nval)
Example #8
0
    def __init__(self,
            nvis,
            bias_from_marginals = None):
        """
            nvis: the dimension of the space
            bias_from_marginals: a dataset, whose marginals are used to
                            initialize the visible biases
        """

        self.__dict__.update(locals())
        del self.self
        # Don't serialize the dataset
        del self.bias_from_marginals

        self.space = VectorSpace(nvis)
        self.input_space = self.space

        origin = self.space.get_origin()

        if bias_from_marginals is None:
            init_bias = np.zeros((nvis,))
        else:
            X = bias_from_marginals.get_design_matrix()
            assert X.max() == 1.
            assert X.min() == 0.
            assert not np.any( (X > 0.) * (X < 1.) )

            mean = X.mean(axis=0)

            mean = np.clip(mean, 1e-7, 1-1e-7)

            init_bias = inverse_sigmoid_numpy(mean)

        self.bias = sharedX(init_bias, 'visible_bias')
Example #9
0
    def set_input_space(self, space):
        self.input_space = space

        if not isinstance(space, Space):
            raise TypeError("Expected Space, got "+
                    str(space)+" of type "+str(type(space)))

        self.input_dim = space.get_total_dimension()
        self.needs_reformat = not isinstance(space, VectorSpace)

        self.desired_space = VectorSpace(self.input_dim)

        if not self.needs_reformat:
            assert self.desired_space == self.input_space

        rng = self.dbm.rng

        if self.irange is not None:
            assert self.sparse_init is None
            W = rng.uniform(-self.irange,self.irange, (self.input_dim,self.n_classes))
        else:
            assert self.sparse_init is not None
            W = np.zeros((self.input_dim, self.n_classes))
            for i in xrange(self.n_classes):
                for j in xrange(self.sparse_init):
                    idx = rng.randint(0, self.input_dim)
                    while W[idx, i] != 0.:
                        idx = rng.randint(0, self.input_dim)
                    W[idx, i] = rng.randn()

        self.W = sharedX(W,  'softmax_W' )

        self._params = [ self.b, self.W ]
Example #10
0
    def _format_as(self, batch, space):
        raise NotImplementedError()
        if isinstance(space, CompositeSpace):
            pos = 0
            pieces = []
            for component in space.components:
                width = component.get_total_dimension()
                subtensor = batch[:, pos:pos + width]
                pos += width
                formatted = VectorSpace(width).format_as(subtensor, component)
                pieces.append(formatted)
            return tuple(pieces)

        if isinstance(space, Conv2DSpace):
            if space.axes[0] != 'b':
                raise NotImplementedError(
                    "Will need to reshape to ('b',*) then do a dimshuffle. Be sure to make this the inverse of space._format_as(x, self)"
                )
            dims = {
                'b': batch.shape[0],
                'c': space.num_channels,
                0: space.shape[0],
                1: space.shape[1]
            }

            shape = tuple([dims[elem] for elem in space.axes])

            rval = batch.reshape(shape)

            return rval

        raise NotImplementedError(
            "VectorSpace doesn't know how to format as " + str(type(space)))
 def __init__(self, classes_number, which_set):
     self.classes_number = classes_number
     self.path = '/home/gortolan/MachineLearning/'
     self.which_set = which_set
     denseMatrix = pickle.load(
         open(self.path + self.which_set + '_cons_small.pkl', "rb"))
     self.x = denseMatrix.X
     self.y = denseMatrix.y
     X_space = VectorSpace(dim=273)
     X_source = 'features'
     Y_space = VectorSpace(dim=32)
     Y_source = 'targets'
     space = VectorSpace(X_space, Y_space)
     source = VectorSpace(X_source, Y_source)
     self.data_specs = (space, source)
     super(TIMIT, self).__init__(X=self.x, y=self.y, y_labels=32)
Example #12
0
def test_conditional_initialize_parameters():
    """
    Conditional.initialize_parameters does the following:
    * Set its input_space and ndim attributes
    * Calls its MLP's set_mlp method
    * Sets its MLP's input_space
    * Validates its MLP
    * Sets its params and param names
    """
    mlp = MLP(layers=[Linear(layer_name='h', dim=5, irange=0.01,
                             max_col_norm=0.01)])
    conditional = DummyConditional(mlp=mlp, name='conditional')
    vae = DummyVAE()
    conditional.set_vae(vae)
    input_space = VectorSpace(dim=5)
    conditional.initialize_parameters(input_space=input_space, ndim=5)

    testing.assert_same_object(input_space, conditional.input_space)
    testing.assert_equal(conditional.ndim, 5)
    testing.assert_same_object(mlp.get_mlp(), conditional)
    testing.assert_same_object(mlp.input_space, input_space)
    mlp_params = mlp.get_params()
    conditional_params = conditional.get_params()
    assert all([mp in conditional_params for mp in mlp_params])
    assert all([cp in mlp_params for cp in conditional_params])
    def set_input_space(self, space):
        self.input_space = space

        if not isinstance(space, Space):
            raise TypeError("Expected Space, got "+
                    str(space)+" of type "+str(type(space)))

        self.input_dim = space.get_total_dimension()
        self.needs_reformat = not isinstance(space, VectorSpace)

        desired_dim = self.input_dim
        self.desired_space = VectorSpace(desired_dim)

        if not self.needs_reformat:
            assert self.desired_space == self.input_space

        rng = self.mlp.rng

        self._params = []
        V = np.zeros((self.n_classes, self.input_dim),dtype=np.float32)
        self.V = sharedX(V,   self.layer_name + "_V" )

        U = np.identity( self.input_dim)
        self.U = sharedX(U, self.layer_name + "_U")

        Q =  np.zeros((self.input_dim, self.input_dim),dtype=np.float32)
        self.Q = sharedX(Q, self.layer_name + "_Q")

        Ui =  np.identity(self.input_dim,dtype=np.float32)
        self.Ui = sharedX(Ui, self.layer_name + "_Ui")

        self._params = [ self.U, self.Ui, self.V, self.Q]
Example #14
0
    def __init__(self, nvis, nhid):
        super(AEModel, self).__init__()

        self.nvis = nvis
        self.nhid = nhid

        self.W = sharedX(np.random.uniform(-1e-3, 1e-3, (nhid, nvis)),
                         name="W")
        self.W_prime = self.W.T
        self.theta = sharedX(np.zeros(nhid))
        self.theta_prime = sharedX(np.zeros(nvis))

        self._params = [self.W, self.theta, self.theta_prime]

        self.input_space = VectorSpace(dim=nvis)
        self.output_space = VectorSpace(dim=nhid)
Example #15
0
 def _build_output_space(self, space):
     if isinstance(space, IndexSpace):
         return VectorSpace(self.dim * space.dim)
     if isinstance(space, CompositeSpace):
         return CompositeSpace(
             [self._build_output_space(c) for c in space.components])
     assert False
Example #16
0
 def create_input_space(self):
     ws = (self.ws * 2 + 1)
     return CompositeSpace([
         IndexSpace(max_labels=self.vocab_size, dim=ws),
         IndexSpace(max_labels=self.total_feats, dim=self.feat_num),
         VectorSpace(dim=self.extender_dim * ws)
     ])
Example #17
0
    def __init__(self,
            nvis,
            bias_from_marginals = None):
        """
            nvis: the dimension of the space
            bias_from_marginals: a dataset, whose marginals are used to
                            initialize the visible biases
        """

        self.__dict__.update(locals())
        del self.self
        # Don't serialize the dataset
        del self.bias_from_marginals

        self.space = VectorSpace(nvis)
        self.input_space = self.space

        origin = self.space.get_origin()

        if bias_from_marginals is None:
            init_bias = np.zeros((nvis,))
        else:
            init_bias = init_tanh_bias_from_marginals(bias_from_marginals)

        self.bias = sharedX(init_bias, 'visible_bias')
Example #18
0
    def test_fprop(self):
        """
        Use an RNN without non-linearity to create the Mersenne numbers
        (2 ** n - 1) to check whether fprop works correctly.
        """
        rnn = RNN(input_space=SequenceSpace(VectorSpace(dim=1)),
                  layers=[
                      Recurrent(dim=1,
                                layer_name='recurrent',
                                irange=0.1,
                                indices=[-1],
                                nonlinearity=lambda x: x)
                  ])
        W, U, b = rnn.layers[0].get_params()
        W.set_value([[1]])
        U.set_value([[2]])

        X_data, X_mask = rnn.get_input_space().make_theano_batch()
        y_hat = rnn.fprop((X_data, X_mask))

        seq_len = 20
        X_data_vals = np.ones((seq_len, seq_len, 1))
        X_mask_vals = np.triu(np.ones((seq_len, seq_len)))

        f = function([X_data, X_mask], y_hat, allow_input_downcast=True)
        np.testing.assert_allclose(2**np.arange(1, seq_len + 1) - 1,
                                   f(X_data_vals, X_mask_vals).flatten())
Example #19
0
    def __init__(self,
                 nvis,
                 nhid,
                 init_bias_hid,
                 init_beta,
                 init_scale,
                 min_beta,
                 fixed_point_orthogonalize=False,
                 censor_beta_norms=True):

        self.__dict__.update(locals())
        del self.self

        self.rng = np.random.RandomState([2012, 11, 13])

        self.scale = sharedX(np.zeros((nhid, )) + init_scale)
        self.scale.name = 'scale'

        self.b = sharedX(np.zeros((nhid, )) + init_bias_hid)
        self.b.name = 'b'

        self.beta = sharedX(np.zeros(nvis, ) + init_beta)
        self.beta.name = 'beta'

        self.W = sharedX(
            random_ortho_columns(nvis, nhid, self.beta.get_value(), self.rng))
        self.W.name = 'W'

        self._params = [self.scale, self.W, self.b, self.beta]
        self.input_space = VectorSpace(nvis)
Example #20
0
def test_set_get_weights_Softmax():
    """
    Tests setting and getting weights for Softmax layer.
    """
    num_classes = 2
    dim = 3
    conv_dim = [3, 4, 5]

    # VectorSpace input space
    layer = Softmax(num_classes, 's', irange=.1)
    softmax_mlp = MLP(layers=[layer], input_space=VectorSpace(dim=dim))
    vec_weights = np.random.randn(dim, num_classes).astype(config.floatX)
    layer.set_weights(vec_weights)
    assert np.allclose(layer.W.get_value(), vec_weights)
    layer.W.set_value(vec_weights)
    assert np.allclose(layer.get_weights(), vec_weights)

    # Conv2DSpace input space
    layer = Softmax(num_classes, 's', irange=.1)
    softmax_mlp = MLP(layers=[layer],
                      input_space=Conv2DSpace(shape=(conv_dim[0], conv_dim[1]),
                      num_channels=conv_dim[2]))
    conv_weights = np.random.randn(conv_dim[0], conv_dim[1], conv_dim[2],
                                   num_classes).astype(config.floatX)
    layer.set_weights(conv_weights.reshape(np.prod(conv_dim), num_classes))
    assert np.allclose(layer.W.get_value(),
                       conv_weights.reshape(np.prod(conv_dim), num_classes))
    layer.W.set_value(conv_weights.reshape(np.prod(conv_dim), num_classes))
    assert np.allclose(layer.get_weights_topo(),
                       np.transpose(conv_weights, axes=(3, 0, 1, 2)))
Example #21
0
    def __init__(self, mlp, n_classes = None, input_source='features', input_space=None, scale = False):
        """
        Parameters
        ----------
        mlp: Pylearn2 MLP class
            The frame based classifier

        """

        if n_classes is None:
            if hasattr(mlp .layers[-1], 'dim'):
                self.n_classes = mlp.layers[-1].dim
            elif hasattr(mlp.layers[-1], 'n_classes'):
                self.n_classes = mlp.layers[-1].n_classes
            else:
                raise ValueError("n_classes was not provided and couldn't be infered from the mlp's last layer")
        else:
            self.n_classes = n_classes

        self.mlp = mlp
        self.scale = scale
        self.input_source = input_source
        assert isinstance(input_space, FaceTubeSpace)
        self.input_space = input_space
        self.input_size = (input_space.shape[0]
                           * input_space.shape[1]
                            * input_space.num_channels)
        self.output_space = VectorSpace(dim=7)

        #rng = self.mlp.rng
        #self.W = theano.shared(rng.uniform(size=(n_classes, n_classes, n_classes)).astype(config.floatX))
        #self.W.name = 'crf_w'
        self.init_transition_matrix()
        self.name = 'crf'
Example #22
0
    def __init__(self,
            nvis,
            bias_from_marginals = None):
        """
            nvis: the dimension of the space
            bias_from_marginals: a dataset, whose marginals are used to
                            initialize the visible biases
        """

        self.__dict__.update(locals())
        del self.self
        # Don't serialize the dataset
        del self.bias_from_marginals

        self.space = VectorSpace(nvis)
        self.input_space = self.space

        origin = self.space.get_origin()

        if bias_from_marginals is None:
            init_bias = np.zeros((nvis,))
        else:
            # data is in [-1, 1], but want biases for a sigmoid
            init_bias = init_sigmoid_bias_from_array(bias_from_marginals.X / 2. + 0.5)
            # init_bias =
        self.boltzmann_bias = sharedX(init_bias, 'visible_bias')
Example #23
0
    def set_input_space(self, space):
        """ Note: this resets parameters! """

        self.input_space = space

        if isinstance(space, VectorSpace):
            self.requires_reformat = False
            self.input_dim = space.dim
        else:
            self.requires_reformat = True
            self.input_dim = space.get_total_dimension()
            self.desired_space = VectorSpace(self.input_dim)

        self.output_space = VectorSpace(self.dim)

        rng = self.dbm.rng
        if self.irange is not None:
            assert self.sparse_init is None
            W = rng.uniform(-self.irange,
                                 self.irange,
                                 (self.input_dim, self.dim)) * \
                    (rng.uniform(0.,1., (self.input_dim, self.dim))
                     < self.include_prob)
        else:
            assert self.sparse_init is not None
            W = np.zeros((self.input_dim, self.dim))
            W *= self.sparse_stdev

        W = sharedX(W)
        W.name = self.layer_name + '_W'

        self.transformer = MatrixMul(W)

        W ,= self.transformer.get_params()
        assert W.name is not None
Example #24
0
    def get_output_space(self):
        """
        .. todo::

            WRITEME
        """
        return VectorSpace(self.num_classes)
Example #25
0
    def __init__(self,
                 corruptor,
                 nvis,
                 nhid,
                 act_enc,
                 act_dec,
                 tied_weights=False,
                 irange=1e-3,
                 rng=9001):
        """
        .. todo::

            WRITEME
        """

        # sampling dot only supports tied weights
        assert tied_weights == True

        self.names_to_del = set()

        super(SparseDenoisingAutoencoder,
              self).__init__(corruptor,
                             nvis,
                             nhid,
                             act_enc,
                             act_dec,
                             tied_weights=tied_weights,
                             irange=irange,
                             rng=rng)

        # this step is crucial to save loads of space because w_prime is never used in
        # training the sparse da.
        del self.w_prime

        self.input_space = VectorSpace(nvis, sparse=True)
Example #26
0
    def __init__(self,
                 load_path=None,
                 from_scipy_sparse_dataset=None,
                 zipped_npy=True):

        self.load_path = load_path
        self.y = None

        if self.load_path is not None:
            if zipped_npy is True:
                logger.info('... loading sparse data set from a zip npy file')
                self.X = scipy.sparse.csr_matrix(numpy.load(
                    gzip.open(load_path)),
                                                 dtype=floatX)
            else:
                logger.info('... loading sparse data set from a npy file')
                self.X = scipy.sparse.csr_matrix(numpy.load(load_path).item(),
                                                 dtype=floatX)
        else:
            logger.info('... building from given sparse dataset')
            self.X = from_scipy_sparse_dataset
            if not scipy.sparse.issparse(from_scipy_sparse_dataset):
                msg = "from_scipy_sparse_dataset is not sparse : %s" \
                      % type(self.X)
                raise TypeError(msg)

        X_space = VectorSpace(dim=self.X.shape[1], sparse=True)
        self.X_space = X_space
        space = self.X_space
        source = 'features'
        self._iter_data_specs = (space, source)
        self.data_specs = (space, source)
Example #27
0
    def __init__(self,
                 k,
                 nvis,
                 convergence_th=1e-6,
                 max_iter=None,
                 verbose=False):
        """
        Parameters in conf:

        :type k: int
        :param k: number of clusters.

        :type convergence_th: float
        :param convergence_th: threshold of distance to clusters under which
        kmeans stops iterating.

        :type max_iter: int
        :param max_iter: maximum number of iterations. Defaults to infinity.
        """

        Block.__init__(self)
        Model.__init__(self)

        self.input_space = VectorSpace(nvis)

        self.k = k
        self.convergence_th = convergence_th
        if max_iter:
            if max_iter < 0:
                raise Exception('KMeans init: max_iter should be positive.')
            self.max_iter = max_iter
        else:
            self.max_iter = float('inf')

        self.verbose = verbose
Example #28
0
    def set_input_space(self, space):
        self.input_space = space

        if not isinstance(space, Space):
            raise TypeError("Expected Space, got "+
                    str(space)+" of type "+str(type(space)))

        self.input_dim = space.get_total_dimension()
        self.needs_reformat = not isinstance(space, VectorSpace)

        if self.no_affine:
            desired_dim = self.n_classes
            assert self.input_dim == desired_dim
        else:
            desired_dim = self.input_dim
        self.desired_space = VectorSpace(desired_dim)

        if not self.needs_reformat:
            assert self.desired_space == self.input_space

        rng = self.mlp.rng

        if self.irange is not None:
            assert self.istdev is None
            assert self.sparse_init is None
            W = rng.uniform(-self.irange,self.irange, (self.input_dim,self.n_groups,self.n_classes))
        elif self.istdev is not None:
            assert self.sparse_init is None
            W = rng.randn(self.input_dim,self.n_groups,self.n_classes) * self.istdev
        else:
            raise NotImplementedError()

        self.W = sharedX(W,  'softmax_W' )

        self._params = [ self.b, self.W ]
Example #29
0
def test_np_format_as_vector2conv2d():
    vector_space = VectorSpace(dim=8 * 8 * 3, sparse=False)
    conv2d_space = Conv2DSpace(shape=(8, 8),
                               num_channels=3,
                               axes=('b', 'c', 0, 1))
    data = np.arange(5 * 8 * 8 * 3).reshape(5, 8 * 8 * 3)
    rval = vector_space.np_format_as(data, conv2d_space)

    # Get data in a Conv2DSpace with default axes
    new_axes = conv2d_space.default_axes
    axis_to_shape = {'b': 5, 'c': 3, 0: 8, 1: 8}
    new_shape = tuple([axis_to_shape[ax] for ax in new_axes])
    nval = data.reshape(new_shape)
    # Then transpose
    nval = nval.transpose(*[new_axes.index(ax) for ax in conv2d_space.axes])
    assert np.all(rval == nval)
Example #30
0
    def set_input_space(self, space):
        """ Note: this resets parameters! """

        self.input_space = space

        if isinstance(space, VectorSpace):
            self.requires_reformat = False
            self.input_dim = space.dim
        else:
            self.requires_reformat = True
            self.input_dim = space.get_total_dimension()
            self.desired_space = VectorSpace(self.input_dim)


        if not (self.detector_layer_dim % self.pool_size == 0):
            raise ValueError("detector_layer_dim = %d, pool_size = %d. Should be divisible but remainder is %d" %
                    (self.detector_layer_dim, self.pool_size, self.detector_layer_dim % self.pool_size))

        self.h_space = VectorSpace(self.detector_layer_dim)
        self.pool_layer_dim = self.detector_layer_dim / self.pool_size
        self.output_space = VectorSpace(self.pool_layer_dim)

        rng = self.dbm.rng
        if self.irange is not None:
            assert self.sparse_init is None
            W = rng.uniform(-self.irange,
                                 self.irange,
                                 (self.input_dim, self.detector_layer_dim)) * \
                    (rng.uniform(0.,1., (self.input_dim, self.detector_layer_dim))
                     < self.include_prob)
        else:
            assert self.sparse_init is not None
            W = np.zeros((self.input_dim, self.detector_layer_dim))
            for i in xrange(self.detector_layer_dim):
                for j in xrange(self.sparse_init):
                    idx = rng.randint(0, self.input_dim)
                    while W[idx, i] != 0:
                        idx = rng.randint(0, self.input_dim)
                    W[idx, i] = rng.randn()

        W = sharedX(W)
        W.name = self.layer_name + '_W'

        self.transformer = MatrixMul(W)

        W ,= self.transformer.get_params()
        assert W.name is not None
Example #31
0
def test_np_format_as_conv2D_vector_conv2D():
    conv2d_space1 = Conv2DSpace(shape=(8, 8),
                                num_channels=3,
                                axes=('c', 'b', 1, 0))
    vector_space = VectorSpace(dim=8 * 8 * 3, sparse=False)
    conv2d_space0 = Conv2DSpace(shape=(8, 8),
                                num_channels=3,
                                axes=('b', 'c', 0, 1))
    data = np.arange(5 * 8 * 8 * 3).reshape(5, 3, 8, 8)

    vecval = conv2d_space0.np_format_as(data, vector_space)
    rval1 = vector_space.np_format_as(vecval, conv2d_space1)
    rval2 = conv2d_space0.np_format_as(data, conv2d_space1)
    assert np.allclose(rval1, rval2)

    nval = data.transpose(1, 0, 3, 2)
    assert np.allclose(nval, rval1)
Example #32
0
    def set_input_space(self, space):
        """ Note: this resets parameters! """

        self.input_space = space

        if isinstance(space, VectorSpace):
            self.requires_reformat = False
            self.input_dim = space.dim
        else:
            self.requires_reformat = True
            self.input_dim = space.get_total_dimension()
            self.desired_space = VectorSpace(self.input_dim)

        self.output_space = VectorSpace(self.dim)

        rng = self.dbm.rng
        if self.irange is not None:
            assert self.sparse_init is None
            W = rng.uniform(-self.irange,
                                 self.irange,
                                 (self.input_dim, self.dim)) * \
                    (rng.uniform(0.,1., (self.input_dim, self.dim))
                     < self.include_prob)
        else:
            assert self.sparse_init is not None
            W = np.zeros((self.input_dim, self.dim))
            W *= self.sparse_stdev

        W = sharedX(W)
        W.name = self.layer_name + '_W'

        self.W = W

        if self.sampling_b_stdev is not None:
            self.noisy_sampling_b = sharedX(np.zeros((self.dbm.batch_size, self.dim)))
            self.layer_below.noisy_sampling_b = sharedX(np.zeros((self.dbm.batch_size, self.layer_below.nvis)))
        if self.sampling_W_stdev is not None:
            self.noisy_sampling_W = sharedX(np.zeros((self.input_dim, self.dim)), 'noisy_sampling_W')

        updates = OrderedDict()
        updates[self.boltzmann_b] = self.boltzmann_b
        updates[self.W] = self.W
        updates[self.layer_below.boltzmann_bias] = self.layer_below.boltzmann_bias
        self.censor_updates(updates)
        f = function([], updates=updates)
        f()
Example #33
0
    def __init__(self, nvis, nhid, nclasses):
        super(MLP, self).__init__()

        self.nvis, self.nhid, self.nclasses = nvis, nhid, nclasses

        self.W = sharedX(numpy.random.normal(scale=0.01,
                                             size=(self.nvis, self.nhid)),
                         name='W')
        self.b = sharedX(numpy.zeros(self.nhid), name='b')
        self.V = sharedX(numpy.random.normal(scale=0.01,
                                             size=(self.nhid, self.nclasses)),
                         name='V')
        self.c = sharedX(numpy.zeros(self.nclasses), name='c')
        self._params = [self.W, self.b, self.V, self.c]

        self.input_space = VectorSpace(dim=self.nvis)
        self.output_space = VectorSpace(dim=self.nclasses)
Example #34
0
 def __init__(self, dim, layer_name, operation, nonlinearity=None, **kwargs):
     super(GlueLayer, self).__init__(**kwargs)
     self.dim = dim
     self.layer_name = layer_name
     self.nonlinearity = nonlinearity
     self.operation = operation
     self.output_space = VectorSpace(self.dim)
     self._params = []
Example #35
0
    def __init__(self,
                 n_classes,
                 layer_name,
                 irange=None,
                 sparse_init=None,
                 W_lr_scale=None):

        if isinstance(W_lr_scale, str):
            W_lr_scale = float(W_lr_scale)

        self.__dict__.update(locals())
        del self.self

        assert isinstance(n_classes, int)

        self.output_space = VectorSpace(n_classes)
        self.b = sharedX(np.zeros((n_classes, )), name='softmax_b')
Example #36
0
def model1():
    #pdb.set_trace()
    # train set X has dim (60,000, 784), y has dim (60,000, 10)
    train_set = MNIST(which_set='train', one_hot=True)
    # test set X has dim (10,000, 784), y has dim (10,000, 10)
    valid_set = MNIST(which_set='test', one_hot=True)
    test_set = MNIST(which_set='test', one_hot=True)

    #import pdb
    #pdb.set_trace()
    #print train_set.X.shape[1]

    # =====<Create the MLP Model>=====

    h2_layer = NoisyRELU(layer_name='h1',
                         sparse_init=15,
                         noise_factor=5,
                         dim=1000,
                         desired_active_rate=0.2,
                         bias_factor=20,
                         max_col_norm=1)
    #h2_layer = RectifiedLinear(layer_name='h2', dim=100, sparse_init=15, max_col_norm=1)
    #print h1_layer.get_params()
    #h2 = RectifiedLinear(layer_name='h2', dim=500, sparse_init=15, max_col_norm=1)
    y_layer = Softmax(layer_name='y', n_classes=10, irange=0., max_col_norm=1)

    mlp = MLP(batch_size=200,
              input_space=VectorSpace(dim=train_set.X.shape[1]),
              layers=[h2_layer, y_layer])

    # =====<Create the SGD algorithm>=====
    sgd = SGD(init_momentum=0.1,
              learning_rate=0.01,
              monitoring_dataset={'valid': valid_set},
              cost=MethodCost('cost_from_X'),
              termination_criterion=MonitorBased(
                  channel_name='valid_y_misclass', prop_decrease=0.001, N=50))
    #sgd.setup(model=mlp, dataset=train_set)

    # =====<Extensions>=====
    ext = [MomentumAdjustor(start=1, saturate=10, final_momentum=0.9)]

    # =====<Create Training Object>=====
    save_path = './mlp_model1.pkl'
    train_obj = Train(dataset=train_set,
                      model=mlp,
                      algorithm=sgd,
                      extensions=ext,
                      save_path=save_path,
                      save_freq=0)
    #train_obj.setup_extensions()

    #import pdb
    #pdb.set_trace()
    train_obj.main_loop()

    # =====<Run the training>=====
    '''
Example #37
0
def test_broadcastable():
    v = VectorSpace(5).make_theano_batch(batch_size=1)
    np.testing.assert_(v.broadcastable[0])
    c = Conv2DSpace((5, 5), channels=3,
                    axes=['c', 0, 1, 'b']).make_theano_batch(batch_size=1)
    np.testing.assert_(c.broadcastable[-1])
    d = Conv2DSpace((5, 5), channels=3,
                    axes=['b', 0, 1, 'c']).make_theano_batch(batch_size=1)
    np.testing.assert_(d.broadcastable[0])
    def __init__(self, nvis, nclasses):
        super(LogisticRegression, self).__init__()

        # Number of input nodes
        self.nvis = nvis
        # Number of output nodes
        self.nclasses = nclasses

        W_value = np.random.uniform(size=(self.nvis, self.nclasses))
        self.W = sharedX(W_value, 'W')  # sharedX formats for GPUs

        b_value = np.zeros(self.nclasses)
        self.b = sharedX(b_value, 'b')

        self._params = [self.W, self.b]

        self.input_space = VectorSpace(dim=self.nvis)
        self.output_space = VectorSpace(dim=self.nclasses)
    def __init__(self,
                 mlp,
                 input_condition_space,
                 condition_distribution,
                 noise_dim=100,
                 *args,
                 **kwargs):
        super(ConditionalGenerator, self).__init__(mlp, *args, **kwargs)

        self.noise_dim = noise_dim
        self.noise_space = VectorSpace(dim=self.noise_dim)

        self.condition_space = input_condition_space
        self.condition_distribution = condition_distribution

        self.input_space = CompositeSpace(
            [self.noise_space, self.condition_space])
        self.mlp.set_input_space(self.input_space)
    def __init__(self, n_classes, layer_name, irange = None,
                 istdev = None,
                 sparse_init = None):

        self.__dict__.update(locals())
        del self.self

        self.output_space = VectorSpace(n_classes)
        self.b = sharedX(np.zeros((n_classes,)), name = 'hingeloss_b')
Example #41
0
    def __init__(self, nvis, nclasses):
        super(LogisticRegressionLayer, self).__init__()

        assert nvis >= 0, "Number of visible units must be non-negative"
        self.input_space = VectorSpace(nvis)
        self.output_space = VectorSpace(nclasses)
        assert nclasses >= 0, "Number of classes must be non-negative"

        self.nvis = nvis
        self.nclasses = nclasses

        # initialize with 0 the weights W as a matrix of shape (nvis, nclasses)
        self.W = sharedX(numpy.zeros((nvis, nclasses)), name='W', borrow=True)
        # initialize the biases b as a vector of nclasses 0s
        self.b = sharedX(numpy.zeros((nclasses,)), name='b', borrow=True)

        # parameters of the model
        self._params = [self.W, self.b]
Example #42
0
    def __init__(self,
                 n_classes,
                 layer_name,
                 C=0.1,
                 irange=None,
                 istdev=None,
                 sparse_init=None,
                 W_lr_scale=None,
                 b_lr_scale=None,
                 max_row_norm=None,
                 no_affine=False,
                 max_col_norm=None,
                 init_bias_target_marginals=None,
                 binary_target_dim=None):

        super(L2SquareHinge, self).__init__()

        if isinstance(W_lr_scale, str):
            W_lr_scale = float(W_lr_scale)

        self.__dict__.update(locals())
        del self.self
        del self.init_bias_target_marginals

        assert isinstance(n_classes, py_integer_types)

        if binary_target_dim is not None:
            assert isinstance(binary_target_dim, py_integer_types)
            self._has_binary_target = True
            self._target_space = IndexSpace(dim=binary_target_dim,
                                            max_labels=n_classes)
        else:
            self._has_binary_target = False

        self.output_space = VectorSpace(n_classes)

        self.b = sharedX(np.zeros((n_classes, )), name='hinge_b')
        if init_bias_target_marginals:
            y = init_bias_target_marginals.y
            if init_bias_target_marginals.y_labels is None:
                marginals = y.mean(axis=0)
            else:
                # compute class frequencies
                if np.max(y.shape) != np.prod(y.shape):
                    raise AssertionError("Use of "
                                         "`init_bias_target_marginals` "
                                         "requires that each example has "
                                         "a single label.")
            marginals = np.bincount(y.flat) / float(y.shape[0])

            assert marginals.ndim == 1
            b = pseudoinverse_softmax_numpy(marginals).astype(self.b.dtype)
            assert b.ndim == 1
            assert b.dtype == self.b.dtype
            self.b.set_value(b)
        else:
            assert init_bias_target_marginals is None
Example #43
0
def test_finitedataset_source_check():
    """
    Check that the FiniteDatasetIterator returns sensible
    errors when there is a missing source in the dataset.
    """
    dataset = DenseDesignMatrix(X=np.random.rand(20,15).astype(theano.config.floatX),
                                y=np.random.rand(20,5).astype(theano.config.floatX))
    assert_raises(ValueError,
                  dataset.iterator,
                  mode='sequential',
                  batch_size=5,
                  data_specs=(VectorSpace(15),'featuresX'))
    try:
        dataset.iterator(mode='sequential',
                         batch_size=5,
                         data_specs=(VectorSpace(15),'featuresX'))
    except ValueError as e:
        assert 'featuresX' in str(e)
    def set_topological_view(self, V, axes=('b', 0, 1, 'c')):
        """
        Sets the dataset to represent V, where V is a batch
        of topological views of examples.

        Parameters
        ----------
        V : ndarray
            An array containing a design matrix representation of training \
            examples.
        axes : WRITEME

        .. todo::

            Why is this parameter named 'V'?
        """
        assert not np.any(np.isnan(V))
        rows = V.shape[axes.index(0)]
        cols = V.shape[axes.index(1)]
        channels = V.shape[axes.index('c')]
        self.view_converter = DefaultViewConverter([rows, cols, channels],
                                                   axes=axes)
        self.X = self.view_converter.topo_view_to_design_mat(V)
        # self.X_topo_space stores a "default" topological space that
        # will be used only when self.iterator is called without a
        # data_specs, and with "topo=True", which is deprecated.
        self.X_topo_space = self.view_converter.topo_space
        assert not np.any(np.isnan(self.X))

        # Update data specs
        X_space = VectorSpace(dim=self.X.shape[1])
        X_source = 'features'
        if self.y is None:
            space = X_space
            source = X_source
        else:
            y_space = VectorSpace(dim=self.y.shape[-1])
            y_source = 'targets'
            space = CompositeSpace((X_space, y_space))
            source = (X_source, y_source)

        self.data_specs = (space, source)
        self.X_space = X_space
        self._iter_data_specs = (X_space, X_source)
Example #45
0
    def __init__(self,
                 alpha_list=[1.4],
                 beta_list=[0.3],
                 init_state_list=[numpy.array([0, 0])],
                 num_samples=1000,
                 frame_length=1,
                 rng=None):
        # Validate parameters and set member variables
        self.alpha_list = alpha_list
        self.beta_list = beta_list

        if num_samples <= 0:
            raise ValueError("num_samples must be positive.")
        self.num_samples = num_samples
        self.num_examples = len(alpha_list)
        self.frame_length = frame_length

        self.init_state_list = init_state_list

        # Initialize RNG
        if rng is None:
            self.rng = numpy.random.RandomState(self._default_seed)
        else:
            self.rng = numpy.random.RandomState(rng)

        X, y = self._generate_data()
        self.data = (X, y)

        # DataSpecs
        features_space = VectorSpace(dim=2 * self.frame_length)
        features_source = 'features'

        targets_space = VectorSpace(dim=2)
        targets_source = 'targets'

        space = CompositeSpace([features_space, targets_space])
        source = tuple([features_source, targets_source])
        self.data_specs = (space, source)

        # Defaults for iterators
        self._iter_mode = resolve_iterator_class('shuffled_sequential')
        self._iter_data_specs = (CompositeSpace(
            (features_space, targets_space)), (features_source,
                                               targets_source))
Example #46
0
    def __init__(self, shape, axes=None):
        """
        The arguments describe how the data is laid out in the design matrix.

        Parameters
        ----------

        shape : tuple
          A tuple of 4 ints, describing the shape of each datum.
          This is the size of each axis in <axes>, excluding the 'b' axis.

        axes : tuple
          A tuple of the following elements in any order:
            'b'  batch axis
            's'  stereo axis
             0   image axis 0 (row)
             1   image axis 1 (column)
            'c'  channel axis
        """
        shape = tuple(shape)

        if not all(isinstance(s, int) for s in shape):
            raise TypeError("Shape must be a tuple/list of ints")

        if len(shape) != 4:
            raise ValueError("Shape array needs to be of length 4, got %s." %
                             shape)

        datum_axes = list(axes)
        datum_axes.remove('b')
        if shape[datum_axes.index('s')] != 2:
            raise ValueError("Expected 's' axis to have size 2, got %d.\n"
                             "  axes:       %s\n"
                             "  shape:      %s" %
                             (shape[datum_axes.index('s')],
                              axes,
                              shape))
        self.shape = shape
        self.set_axes(axes)

        def make_conv2d_space(shape, axes):
            shape_axes = list(axes)
            shape_axes.remove('b')
            image_shape = tuple(shape[shape_axes.index(axis)]
                                for axis in (0, 1))
            conv2d_axes = list(axes)
            conv2d_axes.remove('s')
            return Conv2DSpace(shape=image_shape,
                               num_channels=shape[shape_axes.index('c')],
                               axes=conv2d_axes,
                               dtype=None)

        conv2d_space = make_conv2d_space(shape, axes)
        self.topo_space = CompositeSpace((conv2d_space, conv2d_space))
        self.storage_space = VectorSpace(dim=numpy.prod(shape))
Example #47
0
 def __init__(self,
              layer_name,
              num_gates,   
              irange = 0.05,
              routing_protocol = 'nearest'
     ):
         
     self.__dict__.update(locals())
     del self.self
     
     self.output_space = VectorSpace(self.num_gates)
Example #48
0
    def __init__(self, mlp, input_condition_space, condition_distribution, noise_dim=100, *args, **kwargs):
        super(ConditionalGenerator, self).__init__(mlp, *args, **kwargs)

        self.noise_dim = noise_dim
        self.noise_space = VectorSpace(dim=self.noise_dim)

        self.condition_space = input_condition_space
        self.condition_distribution = condition_distribution

        self.input_space = CompositeSpace([self.noise_space, self.condition_space])
        self.mlp.set_input_space(self.input_space)
    def get_weights_topo(self):
        """
        Returns a topological view of the weights, the first half
        corresponds to wxf and the second half to wyf.

        Returns
        -------
        weights : ndarray
            Same as the return value of `get_weights` but formatted as a 4D
            tensor with the axes being (hidden/factor units, rows, columns,
            channels).The the number of channels is either 1 or 3
            (because they will be visualized as grayscale or RGB color).
            At the moment the function only supports factors whose sqrt
            is exact.
        """
        if not isinstance(self.input_space.components[0], Conv2DSpace) or not isinstance(
            self.input_space.components[1], Conv2DSpace
        ):
            raise NotImplementedError()
        wxf = self.wxf.get_value(borrow=False).T
        wyf = self.wyf.get_value(borrow=False).T
        convx = self.input_space.components[0]
        convy = self.input_space.components[1]
        vecx = VectorSpace(self.nvisx)
        vecy = VectorSpace(self.nvisy)
        wxf_view = vecx.np_format_as(
            wxf, Conv2DSpace(convx.shape, num_channels=convx.num_channels, axes=("b", 0, 1, "c"))
        )
        wyf_view = vecy.np_format_as(
            wyf, Conv2DSpace(convy.shape, num_channels=convy.num_channels, axes=("b", 0, 1, "c"))
        )
        h = int(numpy.ceil(numpy.sqrt(self.nfac)))
        new_weights = numpy.zeros(
            (wxf_view.shape[0] * 2, wxf_view.shape[1], wxf_view.shape[2], wxf_view.shape[3]), dtype=wxf_view.dtype
        )
        t = 0
        while t < (self.nfac // h):
            filter_pair = numpy.concatenate((wxf_view[h * t : h * (t + 1), ...], wyf_view[h * t : h * (t + 1), ...]), 0)
            new_weights[h * 2 * t : h * 2 * (t + 1), ...] = filter_pair
            t += 1
        return new_weights
Example #50
0
    def __init__(self, n_classes, layer_name, irange = None,
                 sparse_init = None, W_lr_scale = None):

        if isinstance(W_lr_scale, str):
            W_lr_scale = float(W_lr_scale)

        self.__dict__.update(locals())
        del self.self

        assert isinstance(n_classes, int)

        self.output_space = VectorSpace(n_classes)
        self.b = sharedX( np.zeros((n_classes,)), name = 'softmax_b')
Example #51
0
    def set_input_space(self, space):
        self.input_space = space
        if not isinstance(space, Space):
            raise TypeError("Expected Space, got "+
                    str(space)+" of type "+str(type(space)))

        self.input_dim = space.get_total_dimension()
        self.needs_reformat = not isinstance(space, VectorSpace)
	
        if self.no_affine:
            desired_dim = self.n_classes
            assert self.input_dim == desired_dim
        else:
            desired_dim = self.input_dim
        self.desired_space = VectorSpace(desired_dim)

        if not self.needs_reformat:
            assert self.desired_space == self.input_space

        rng = self.mlp.rng

        if self.no_affine:
            self._params = []
        else:
            if self.irange is not None:
                assert self.istdev is None
                assert self.sparse_init is None
                W_cluster = rng.uniform(-self.irange,self.irange, (self.input_dim, self.n_clusters))
                W_class = rng.uniform(-self.irange,self.irange, (self.n_clusters, self.input_dim, self.n_classes))
            elif self.istdev is not None:
                assert self.sparse_init is None
                W_cluster = rng.randn(self.input_dim, self.n_clusters) * self.istdev
                W_class = rng.randn(self.n_clusters, self.input_dim, self.n_classes) * self.istdev
            else:
                raise NotImplementedError()

            # set the extra dummy weights to 0
            for key in self.clusters_scope.keys():
		#print key
                #should probably be reverse
                W_class[int(key), :, :self.clusters_scope[key]] = 0.

            self.W_class = sharedX(W_class,  'softmax_W_class' )
            self.W_cluster = sharedX(W_cluster,  'softmax_W_cluster' )

            self._params = [self.b_class, self.W_class, self.b_cluster, self.W_cluster]
Example #52
0
class VectorSpaceConverter(mlp.Layer):
    def __init__(self, layer_name):
        self.layer_name = layer_name
        self._params = []

    def set_input_space(self, space):
        self.input_space = space
        self.output_space = VectorSpace(space.get_total_dimension())

    def fprop(self, state_below):
        return self.input_space.format_as(state_below, self.output_space)

    def inv_prop(self, state_above):
        return self.output_space.format_as(state_above, self.input_space)

    def get_weight_decay(self, coeff):
        return 0.0

    def get_l1_weight_decay(self, coeff):
        return 0.0
Example #53
0
    def __init__(self, shape, axes=None):
        shape = tuple(shape)

        if not all(isinstance(s, int) for s in shape):
            raise TypeError("Shape must be a tuple/list of ints")

        if len(shape) != 4:
            raise ValueError("Shape array needs to be of length 4, got %s." %
                             shape)

        datum_axes = list(axes)
        datum_axes.remove('b')
        if shape[datum_axes.index('s')] != 2:
            raise ValueError("Expected 's' axis to have size 2, got %d.\n"
                             "  axes:       %s\n"
                             "  shape:      %s" %
                             (shape[datum_axes.index('s')],
                              axes,
                              shape))
        self.shape = shape
        self.set_axes(axes)

        def make_conv2d_space(shape, axes):
            shape_axes = list(axes)
            shape_axes.remove('b')
            image_shape = tuple(shape[shape_axes.index(axis)]
                                for axis in (0, 1))
            conv2d_axes = list(axes)
            conv2d_axes.remove('s')
            return Conv2DSpace(shape=image_shape,
                               num_channels=shape[shape_axes.index('c')],
                               axes=conv2d_axes)

        conv2d_space = make_conv2d_space(shape, axes)
        self.topo_space = CompositeSpace((conv2d_space, conv2d_space))
        self.storage_space = VectorSpace(dim=numpy.prod(shape))
Example #54
0
    def __init__(self, nvis = None, nhid = None,
            vis_space = None,
            hid_space = None,
            transformer = None,
            irange=0.5, rng=None, init_bias_vis = None,
            init_bias_vis_marginals = None, init_bias_hid=0.0,
            base_lr = 1e-3, anneal_start = None, nchains = 100, sml_gibbs_steps = 1,
            random_patches_src = None,
            monitor_reconstruction = False):

        """
        Construct an RBM object.

        Parameters
        ----------
        nvis : int
            Number of visible units in the model.
            (Specifying this implies that the model acts on a vector,
            i.e. it sets vis_space = pylearn2.space.VectorSpace(nvis) )
        nhid : int
            Number of hidden units in the model.
            (Specifying this implies that the model acts on a vector)
        vis_space:
            A pylearn2.space.Space object describing what kind of vector
            space the RBM acts on. Don't specify if you used nvis / hid
        hid_space:
            A pylearn2.space.Space object describing what kind of vector
            space the RBM's hidden units live in. Don't specify if you used
            nvis / nhid
        init_bias_vis_marginals: either None, or a Dataset to use to initialize
            the visible biases to the inverse sigmoid of the data marginals
        irange : float, optional
            The size of the initial interval around 0 for weights.
        rng : RandomState object or seed
            NumPy RandomState object to use when initializing parameters
            of the model, or (integer) seed to use to create one.
        init_bias_vis : array_like, optional
            Initial value of the visible biases, broadcasted as necessary.
        init_bias_hid : array_like, optional
            initial value of the hidden biases, broadcasted as necessary.
        monitor_reconstruction : if True, will request a monitoring channel to monitor
            reconstruction error
        random_patches_src: Either None, or a Dataset from which to draw random patches
            in order to initialize the weights. Patches will be multiplied by irange

        Parameters for default SML learning rule:

            base_lr : the base learning rate
            anneal_start : number of steps after which to start annealing on a 1/t schedule
            nchains: number of negative chains
            sml_gibbs_steps: number of gibbs steps to take per update

        """

        Model.__init__(self)
        Block.__init__(self)

        if init_bias_vis_marginals is not None:
            assert init_bias_vis is None
            X = init_bias_vis_marginals.X
            assert X.min() >= 0.0
            assert X.max() <= 1.0

            marginals = X.mean(axis=0)

            #rescale the marginals a bit to avoid NaNs
            init_bias_vis = inverse_sigmoid_numpy(.01 + .98 * marginals)


        if init_bias_vis is None:
            init_bias_vis = 0.0

        if rng is None:
            # TODO: global rng configuration stuff.
            rng = numpy.random.RandomState(1001)
        self.rng = rng

        if vis_space is None:
            #if we don't specify things in terms of spaces and a transformer,
            #assume dense matrix multiplication and work off of nvis, nhid
            assert hid_space is None
            assert transformer is None or isinstance(transformer,MatrixMul)
            assert nvis is not None
            assert nhid is not None

            if transformer is None:
                if random_patches_src is None:
                    W = rng.uniform(-irange, irange, (nvis, nhid))
                else:
                    if hasattr(random_patches_src, '__array__'):
                        W = irange * random_patches_src.T
                        assert W.shape == (nvis, nhid)
                    else:
                        #assert type(irange) == type(0.01)
                        #assert irange == 0.01
                        W = irange * random_patches_src.get_batch_design(nhid).T

                self.transformer = MatrixMul(  sharedX(
                        W,
                        name='W',
                        borrow=True
                    )
                )
            else:
                self.transformer = transformer

            self.vis_space = VectorSpace(nvis)
            self.hid_space = VectorSpace(nhid)
        else:
            assert hid_space is not None
            assert transformer is not None
            assert nvis is None
            assert nhid is None

            self.vis_space = vis_space
            self.hid_space = hid_space
            self.transformer = transformer


        try:
            b_vis = self.vis_space.get_origin()
            b_vis += init_bias_vis
        except ValueError:
            raise ValueError("bad shape or value for init_bias_vis")
        self.bias_vis = sharedX(b_vis, name='bias_vis', borrow=True)

        try:
            b_hid = self.hid_space.get_origin()
            b_hid += init_bias_hid
        except ValueError:
            raise ValueError('bad shape or value for init_bias_hid')
        self.bias_hid = sharedX(b_hid, name='bias_hid', borrow=True)

        self.random_patches_src = random_patches_src
        self.register_names_to_del(['random_patches_src'])


        self.__dict__.update(nhid=nhid, nvis=nvis)
        self._params = safe_union(self.transformer.get_params(), [self.bias_vis, self.bias_hid])

        self.base_lr = base_lr
        self.anneal_start = anneal_start
        self.nchains = nchains
        self.sml_gibbs_steps = sml_gibbs_steps
Example #55
0
class RBM(Block, Model):
    """
    A base interface for RBMs, implementing the binary-binary case.

    """
    def __init__(self, nvis = None, nhid = None,
            vis_space = None,
            hid_space = None,
            transformer = None,
            irange=0.5, rng=None, init_bias_vis = None,
            init_bias_vis_marginals = None, init_bias_hid=0.0,
            base_lr = 1e-3, anneal_start = None, nchains = 100, sml_gibbs_steps = 1,
            random_patches_src = None,
            monitor_reconstruction = False):

        """
        Construct an RBM object.

        Parameters
        ----------
        nvis : int
            Number of visible units in the model.
            (Specifying this implies that the model acts on a vector,
            i.e. it sets vis_space = pylearn2.space.VectorSpace(nvis) )
        nhid : int
            Number of hidden units in the model.
            (Specifying this implies that the model acts on a vector)
        vis_space:
            A pylearn2.space.Space object describing what kind of vector
            space the RBM acts on. Don't specify if you used nvis / hid
        hid_space:
            A pylearn2.space.Space object describing what kind of vector
            space the RBM's hidden units live in. Don't specify if you used
            nvis / nhid
        init_bias_vis_marginals: either None, or a Dataset to use to initialize
            the visible biases to the inverse sigmoid of the data marginals
        irange : float, optional
            The size of the initial interval around 0 for weights.
        rng : RandomState object or seed
            NumPy RandomState object to use when initializing parameters
            of the model, or (integer) seed to use to create one.
        init_bias_vis : array_like, optional
            Initial value of the visible biases, broadcasted as necessary.
        init_bias_hid : array_like, optional
            initial value of the hidden biases, broadcasted as necessary.
        monitor_reconstruction : if True, will request a monitoring channel to monitor
            reconstruction error
        random_patches_src: Either None, or a Dataset from which to draw random patches
            in order to initialize the weights. Patches will be multiplied by irange

        Parameters for default SML learning rule:

            base_lr : the base learning rate
            anneal_start : number of steps after which to start annealing on a 1/t schedule
            nchains: number of negative chains
            sml_gibbs_steps: number of gibbs steps to take per update

        """

        Model.__init__(self)
        Block.__init__(self)

        if init_bias_vis_marginals is not None:
            assert init_bias_vis is None
            X = init_bias_vis_marginals.X
            assert X.min() >= 0.0
            assert X.max() <= 1.0

            marginals = X.mean(axis=0)

            #rescale the marginals a bit to avoid NaNs
            init_bias_vis = inverse_sigmoid_numpy(.01 + .98 * marginals)


        if init_bias_vis is None:
            init_bias_vis = 0.0

        if rng is None:
            # TODO: global rng configuration stuff.
            rng = numpy.random.RandomState(1001)
        self.rng = rng

        if vis_space is None:
            #if we don't specify things in terms of spaces and a transformer,
            #assume dense matrix multiplication and work off of nvis, nhid
            assert hid_space is None
            assert transformer is None or isinstance(transformer,MatrixMul)
            assert nvis is not None
            assert nhid is not None

            if transformer is None:
                if random_patches_src is None:
                    W = rng.uniform(-irange, irange, (nvis, nhid))
                else:
                    if hasattr(random_patches_src, '__array__'):
                        W = irange * random_patches_src.T
                        assert W.shape == (nvis, nhid)
                    else:
                        #assert type(irange) == type(0.01)
                        #assert irange == 0.01
                        W = irange * random_patches_src.get_batch_design(nhid).T

                self.transformer = MatrixMul(  sharedX(
                        W,
                        name='W',
                        borrow=True
                    )
                )
            else:
                self.transformer = transformer

            self.vis_space = VectorSpace(nvis)
            self.hid_space = VectorSpace(nhid)
        else:
            assert hid_space is not None
            assert transformer is not None
            assert nvis is None
            assert nhid is None

            self.vis_space = vis_space
            self.hid_space = hid_space
            self.transformer = transformer


        try:
            b_vis = self.vis_space.get_origin()
            b_vis += init_bias_vis
        except ValueError:
            raise ValueError("bad shape or value for init_bias_vis")
        self.bias_vis = sharedX(b_vis, name='bias_vis', borrow=True)

        try:
            b_hid = self.hid_space.get_origin()
            b_hid += init_bias_hid
        except ValueError:
            raise ValueError('bad shape or value for init_bias_hid')
        self.bias_hid = sharedX(b_hid, name='bias_hid', borrow=True)

        self.random_patches_src = random_patches_src
        self.register_names_to_del(['random_patches_src'])


        self.__dict__.update(nhid=nhid, nvis=nvis)
        self._params = safe_union(self.transformer.get_params(), [self.bias_vis, self.bias_hid])

        self.base_lr = base_lr
        self.anneal_start = anneal_start
        self.nchains = nchains
        self.sml_gibbs_steps = sml_gibbs_steps

    def get_input_dim(self):
        if not isinstance(self.vis_space, VectorSpace):
            raise TypeError("Can't describe "+str(type(self.vis_space))+" as a dimensionality number.")
        return self.vis_space.dim

    def get_output_dim(self):
        if not isinstance(self.hid_space, VectorSpace):
            raise TypeError("Can't describe "+str(type(self.hid_space))+" as a dimensionality number.")
        return self.hid_space.dim

    def get_input_space(self):
        return self.vis_space

    def get_output_space(self):
        return self.hid_space

    def get_params(self):
        return [param for param in self._params]

    def get_weights(self, borrow=False):

        weights ,= self.transformer.get_params()

        return weights.get_value(borrow=borrow)

    def get_weights_topo(self):
        return self.transformer.get_weights_topo()

    def get_weights_format(self):
        return ['v', 'h']


    def get_monitoring_channels(self, data):
        V = data
        theano_rng = RandomStreams(42)

        #TODO: re-enable this in the case where self.transformer
        #is a matrix multiply
        #norms = theano_norms(self.weights)

        H = self.mean_h_given_v(V)

        h = H.mean(axis=0)

        return { 'bias_hid_min' : T.min(self.bias_hid),
                 'bias_hid_mean' : T.mean(self.bias_hid),
                 'bias_hid_max' : T.max(self.bias_hid),
                 'bias_vis_min' : T.min(self.bias_vis),
                 'bias_vis_mean' : T.mean(self.bias_vis),
                 'bias_vis_max': T.max(self.bias_vis),
                 'h_min' : T.min(h),
                 'h_mean': T.mean(h),
                 'h_max' : T.max(h),
                 #'W_min' : T.min(self.weights),
                 #'W_max' : T.max(self.weights),
                 #'W_norms_min' : T.min(norms),
                 #'W_norms_max' : T.max(norms),
                 #'W_norms_mean' : T.mean(norms),
                'reconstruction_error' : self.reconstruction_error(V, theano_rng) }

    def get_monitoring_data_specs(self):
        """
        Get the data_specs describing the data for get_monitoring_channel.

        This implementation returns specification corresponding to unlabeled
        inputs.
        """
        return (self.get_input_space(), self.get_input_source())

    def ml_gradients(self, pos_v, neg_v):
        """
        Get the contrastive gradients given positive and negative phase
        visible units.

        Parameters
        ----------
        pos_v : tensor_like
            Theano symbolic representing a minibatch on the visible units,
            with the first dimension indexing training examples and the second
            indexing data dimensions (usually actual training data).
        neg_v : tensor_like
            Theano symbolic representing a minibatch on the visible units,
            with the first dimension indexing training examples and the second
            indexing data dimensions (usually reconstructions of the data or
            sampler particles from a persistent Markov chain).

        Returns
        -------
        grads : list
            List of Theano symbolic variables representing gradients with
            respect to model parameters, in the same order as returned by
            `params()`.

        Notes
        -----
        `pos_v` and `neg_v` need not have the same first dimension, i.e.
        minibatch size.
        """

        # taking the mean over each term independently allows for different
        # mini-batch sizes in the positive and negative phase.
        ml_cost = (self.free_energy_given_v(pos_v).mean() -
                   self.free_energy_given_v(neg_v).mean())

        grads = tensor.grad(ml_cost, self.get_params(),
                            consider_constant=[pos_v, neg_v])

        return grads


    def train_batch(self, dataset, batch_size):
        """ A default learning rule based on SML """
        self.learn_mini_batch(dataset.get_batch_design(batch_size))
        return True

    def learn_mini_batch(self, X):
        """ A default learning rule based on SML """

        if not hasattr(self, 'learn_func'):
            self.redo_theano()

        rval =  self.learn_func(X)

        return rval

    def redo_theano(self):
        """ Compiles the theano function for the default learning rule """

        init_names = dir(self)

        minibatch = tensor.matrix()

        optimizer = _SGDOptimizer(self, self.base_lr, self.anneal_start)

        sampler = sampler = BlockGibbsSampler(self, 0.5 + np.zeros((self.nchains, self.get_input_dim())), self.rng,
                                                  steps= self.sml_gibbs_steps)


        updates = training_updates(visible_batch=minibatch, model=self,
                                            sampler=sampler, optimizer=optimizer)

        self.learn_func = theano.function([minibatch], updates=updates)

        final_names = dir(self)

        self.register_names_to_del([name for name in final_names if name not in init_names])

    def gibbs_step_for_v(self, v, rng):
        """
        Do a round of block Gibbs sampling given visible configuration

        Parameters
        ----------
        v  : tensor_like
            Theano symbolic representing the hidden unit states for a batch of
            training examples (or negative phase particles), with the first
            dimension indexing training examples and the second indexing data
            dimensions.
        rng : RandomStreams object
            Random number generator to use for sampling the hidden and visible
            units.

        Returns
        -------
        v_sample : tensor_like
            Theano symbolic representing the new visible unit state after one
            round of Gibbs sampling.
        locals : dict
            Contains the following auxiliary state as keys (all symbolics
            except shape tuples):
             * `h_mean`: the returned value from `mean_h_given_v`
             * `h_mean_shape`: shape tuple indicating the size of `h_mean` and
               `h_sample`
             * `h_sample`: the stochastically sampled hidden units
             * `v_mean_shape`: shape tuple indicating the shape of `v_mean` and
               `v_sample`
             * `v_mean`: the returned value from `mean_v_given_h`
             * `v_sample`: the stochastically sampled visible units
        """
        h_mean = self.mean_h_given_v(v)
        assert h_mean.type.dtype == v.type.dtype
        # For binary hidden units
        # TODO: factor further to extend to other kinds of hidden units
        #       (e.g. spike-and-slab)
        h_sample = rng.binomial(size = h_mean.shape, n = 1 , p = h_mean, dtype=h_mean.type.dtype)
        assert h_sample.type.dtype == v.type.dtype
        # v_mean is always based on h_sample, not h_mean, because we don't
        # want h transmitting more than one bit of information per unit.
        v_mean = self.mean_v_given_h(h_sample)
        assert v_mean.type.dtype == v.type.dtype
        v_sample = self.sample_visibles([v_mean], v_mean.shape, rng)
        assert v_sample.type.dtype == v.type.dtype
        return v_sample, locals()

    def sample_visibles(self, params, shape, rng):
        """
        Stochastically sample the visible units given hidden unit
        configurations for a set of training examples.

        Parameters
        ----------
        params : list
            List of the necessary parameters to sample :math:`p(v|h)`. In the
            case of a binary-binary RBM this is a single-element list
            containing the symbolic representing :math:`p(v|h)`, as returned
            by `mean_v_given_h`.

        Returns
        -------
        vprime : tensor_like
            Theano symbolic representing stochastic samples from :math:`p(v|h)`
        """
        v_mean = params[0]
        return as_floatX(rng.uniform(size=shape) < v_mean)

    def input_to_h_from_v(self, v):
        """
        Compute the affine function (linear map plus bias) that serves as
        input to the hidden layer in an RBM.

        Parameters
        ----------
        v  : tensor_like or list of tensor_likes
            Theano symbolic (or list thereof) representing the one or several
            minibatches on the visible units, with the first dimension indexing
            training examples and the second indexing data dimensions.

        Returns
        -------
        a : tensor_like or list of tensor_likes
            Theano symbolic (or list thereof) representing the input to each
            hidden unit for each training example.
        """

        if isinstance(v, tensor.Variable):
            return self.bias_hid + self.transformer.lmul(v)
        else:
            return [self.input_to_h_from_v(vis) for vis in v]

    def input_to_v_from_h(self, h):
        """
        Compute the affine function (linear map plus bias) that serves as
        input to the visible layer in an RBM.

        Parameters
        ----------
        h  : tensor_like or list of tensor_likes
            Theano symbolic (or list thereof) representing the one or several
            minibatches on the hidden units, with the first dimension indexing
            training examples and the second indexing data dimensions.

        Returns
        -------
        a : tensor_like or list of tensor_likes
            Theano symbolic (or list thereof) representing the input to each
            visible unit for each row of h.
        """
        if isinstance(h, tensor.Variable):
            return self.bias_vis + self.transformer.lmul_T(h)
        else:
            return [self.input_to_v_from_h(hid) for hid in h]

    def upward_pass(self, v):
        """
        wrapper around mean_h_given_v method.  Called when RBM is accessed
        by mlp.HiddenLayer.
        """
        return self.mean_h_given_v(v)

    def mean_h_given_v(self, v):
        """
        Compute the mean activation of the hidden units given visible unit
        configurations for a set of training examples.

        Parameters
        ----------
        v  : tensor_like or list of tensor_likes
            Theano symbolic (or list thereof) representing the hidden unit
            states for a batch (or several) of training examples, with the
            first dimension indexing training examples and the second indexing
            data dimensions.

        Returns
        -------
        h : tensor_like or list of tensor_likes
            Theano symbolic (or list thereof) representing the mean
            (deterministic) hidden unit activations given the visible units.
        """
        if isinstance(v, tensor.Variable):
            return nnet.sigmoid(self.input_to_h_from_v(v))
        else:
            return [self.mean_h_given_v(vis) for vis in v]

    def mean_v_given_h(self, h):
        """
        Compute the mean activation of the visibles given hidden unit
        configurations for a set of training examples.

        Parameters
        ----------
        h  : tensor_like or list of tensor_likes
            Theano symbolic (or list thereof) representing the hidden unit
            states for a batch (or several) of training examples, with the
            first dimension indexing training examples and the second indexing
            hidden units.

        Returns
        -------
        vprime : tensor_like or list of tensor_likes
            Theano symbolic (or list thereof) representing the mean
            (deterministic) reconstruction of the visible units given the
            hidden units.
        """
        if isinstance(h, tensor.Variable):
            return nnet.sigmoid(self.input_to_v_from_h(h))
        else:
            return [self.mean_v_given_h(hid) for hid in h]

    def free_energy_given_v(self, v):
        """
        Calculate the free energy of a visible unit configuration by
        marginalizing over the hidden units.

        Parameters
        ----------
        v : tensor_like
            Theano symbolic representing the hidden unit states for a batch of
            training examples, with the first dimension indexing training
            examples and the second indexing data dimensions.

        Returns
        -------
        f : tensor_like
            1-dimensional tensor (vector) representing the free energy
            associated with each row of v.
        """
        sigmoid_arg = self.input_to_h_from_v(v)
        return (-tensor.dot(v, self.bias_vis) -
                 nnet.softplus(sigmoid_arg).sum(axis=1))

    def free_energy(self, V):
        return self.free_energy_given_v(V)


    def free_energy_given_h(self, h):
        """
        Calculate the free energy of a hidden unit configuration by
        marginalizing over the visible units.

        Parameters
        ----------
        h : tensor_like
            Theano symbolic representing the hidden unit states, with the
            first dimension indexing training examples and the second
            indexing data dimensions.

        Returns
        -------
        f : tensor_like
            1-dimensional tensor (vector) representing the free energy
            associated with each row of v.
        """
        sigmoid_arg = self.input_to_v_from_h(h)
        return (-tensor.dot(h, self.bias_hid) -
                nnet.softplus(sigmoid_arg).sum(axis=1))

    def __call__(self, v):
        """
        Forward propagate (symbolic) input through this module, obtaining
        a representation to pass on to layers above.

        This just aliases the `mean_h_given_v()` function for syntactic
        sugar/convenience.
        """
        return self.mean_h_given_v(v)

    def reconstruction_error(self, v, rng):
        """
        Compute the mean-squared error (mean over examples, sum over units)
        across a minibatch after a Gibbs
        step starting from the training data.

        Parameters
        ----------
        v : tensor_like
            Theano symbolic representing the hidden unit states for a batch of
            training examples, with the first dimension indexing training
            examples and the second indexing data dimensions.
        rng : RandomStreams object
            Random number generator to use for sampling the hidden and visible
            units.

        Returns
        -------
        mse : tensor_like
            0-dimensional tensor (essentially a scalar) indicating the mean
            reconstruction error across the minibatch.

        Notes
        -----
        The reconstruction used to assess error samples only the hidden
        units. For the visible units, it uses the conditional mean.
        No sampling of the visible units is done, to reduce noise in the estimate.
        """
        sample, _locals = self.gibbs_step_for_v(v, rng)
        return ((_locals['v_mean'] - v) ** 2).sum(axis=1).mean()
Example #56
0
class StereoViewConverter(object):
    """
    Converts stereo image data between two formats:

    #. A dense design matrix, one stereo pair per row (`VectorSpace`)
    #. An image pair (`CompositeSpace` of two `Conv2DSpace`)

    The arguments describe how the data is laid out in the design matrix.

    Parameters
    ----------
    shape: tuple
        A tuple of 4 ints, describing the shape of each datum. This is the size
        of each axis in `<axes>`, excluding the `b` axis.
    axes : tuple
        Tuple of the following elements in any order:

        * 'b' : batch axis
        * 's' : stereo axis
        *  0  : image axis 0 (row)
        *  1  : image axis 1 (column)
        * 'c' : channel axis
    """
    def __init__(self, shape, axes=None):
        shape = tuple(shape)

        if not all(isinstance(s, int) for s in shape):
            raise TypeError("Shape must be a tuple/list of ints")

        if len(shape) != 4:
            raise ValueError("Shape array needs to be of length 4, got %s." %
                             shape)

        datum_axes = list(axes)
        datum_axes.remove('b')
        if shape[datum_axes.index('s')] != 2:
            raise ValueError("Expected 's' axis to have size 2, got %d.\n"
                             "  axes:       %s\n"
                             "  shape:      %s" %
                             (shape[datum_axes.index('s')],
                              axes,
                              shape))
        self.shape = shape
        self.set_axes(axes)

        def make_conv2d_space(shape, axes):
            shape_axes = list(axes)
            shape_axes.remove('b')
            image_shape = tuple(shape[shape_axes.index(axis)]
                                for axis in (0, 1))
            conv2d_axes = list(axes)
            conv2d_axes.remove('s')
            return Conv2DSpace(shape=image_shape,
                               num_channels=shape[shape_axes.index('c')],
                               axes=conv2d_axes)

        conv2d_space = make_conv2d_space(shape, axes)
        self.topo_space = CompositeSpace((conv2d_space, conv2d_space))
        self.storage_space = VectorSpace(dim=numpy.prod(shape))

    def get_formatted_batch(self, batch, space):
        """
        .. todo::

            WRITEME
        """
        return self.storage_space.np_format_as(batch, space)

    def design_mat_to_topo_view(self, design_mat):
        """
        Called by DenseDesignMatrix.get_formatted_view(), get_batch_topo()
        """
        return self.storage_space.np_format_as(design_mat, self.topo_space)

    def design_mat_to_weights_view(self, design_mat):
        """
        Called by DenseDesignMatrix.get_weights_view()
        """
        return self.design_mat_to_topo_view(design_mat)

    def topo_view_to_design_mat(self, topo_batch):
        """
        Used by `DenseDesignMatrix.set_topological_view()` and
        `DenseDesignMatrix.get_design_mat()`.
        """
        return self.topo_space.np_format_as(topo_batch, self.storage_space)

    def view_shape(self):
        """
        .. todo::

            WRITEME
        """
        return self.shape

    def weights_view_shape(self):
        """
        .. todo::

            WRITEME
        """
        return self.view_shape()

    def set_axes(self, axes):
        """
        .. todo::

            WRITEME
        """
        axes = tuple(axes)

        if len(axes) != 5:
            raise ValueError("Axes must have 5 elements; got %s" % str(axes))

        for required_axis in ('b', 's', 0, 1, 'c'):
            if required_axis not in axes:
                raise ValueError("Axes must contain 'b', 's', 0, 1, and 'c'. "
                                 "Got %s." % str(axes))

        if axes.index('b') != 0:
            raise ValueError("The 'b' axis must come first (axes = %s)." %
                             str(axes))

        def get_batchless_axes(axes):
            axes = list(axes)
            axes.remove('b')
            return tuple(axes)

        if hasattr(self, 'axes'):
            # Reorders the shape vector to match the new axis ordering.
            assert hasattr(self, 'shape')
            old_axes = get_batchless_axes(self.axes)
            new_axes = get_batchless_axes(axes)
            new_shape = tuple(self.shape[old_axes.index(a)] for a in new_axes)
            self.shape = new_shape

        self.axes = axes
Example #57
0
class Softmax(Layer):

    def __init__(self, n_classes, layer_name, irange = None,
            istdev = None,
                 sparse_init = None, W_lr_scale = None,
                 b_lr_scale = None, max_row_norm = None):
        """
        """

        if isinstance(W_lr_scale, str):
            W_lr_scale = float(W_lr_scale)

        self.__dict__.update(locals())
        del self.self

        assert isinstance(n_classes, int)

        self.output_space = VectorSpace(n_classes)
        self.b = sharedX( np.zeros((n_classes,)), name = 'softmax_b')

    def get_lr_scalers(self):

        rval = OrderedDict()

        if self.W_lr_scale is not None:
            assert isinstance(self.W_lr_scale, float)
            rval[self.W] = self.W_lr_scale

        if not hasattr(self, 'b_lr_scale'):
            self.b_lr_scale = None

        if self.b_lr_scale is not None:
            assert isinstance(self.b_lr_scale, float)
            rval[self.b] = self.b_lr_scale

        return rval

    def get_monitoring_channels_from_state(self, state, target=None):

        mx = state.max(axis=1)

        rval =  OrderedDict([
                ('mean_max_class' , mx.mean()),
                ('max_max_class' , mx.max()),
                ('min_max_class' , mx.min())
        ])

        if target is not None:
            y_hat = T.argmax(state, axis=1)
            y = T.argmax(target, axis=1)
            misclass = T.neq(y, y_hat).mean()
            misclass = T.cast(misclass, config.floatX)
            rval['misclass'] = misclass

        return rval

    def set_input_space(self, space):
        self.input_space = space

        if not isinstance(space, Space):
            raise TypeError("Expected Space, got "+
                    str(space)+" of type "+str(type(space)))

        self.input_dim = space.get_total_dimension()
        self.needs_reformat = not isinstance(space, VectorSpace)

        self.desired_space = VectorSpace(self.input_dim)

        if not self.needs_reformat:
            assert self.desired_space == self.input_space

        rng = self.mlp.rng

        if self.irange is not None:
            assert self.istdev is None
            assert self.sparse_init is None
            W = rng.uniform(-self.irange,self.irange, (self.input_dim,self.n_classes))
        elif self.istdev is not None:
            assert self.sparse_init is None
            W = rng.randn(self.input_dim, self.n_classes) * self.istdev
        else:
            assert self.sparse_init is not None
            W = np.zeros((self.input_dim, self.n_classes))
            for i in xrange(self.n_classes):
                for j in xrange(self.sparse_init):
                    idx = rng.randint(0, self.input_dim)
                    while W[idx, i] != 0.:
                        idx = rng.randint(0, self.input_dim)
                    W[idx, i] = rng.randn()

        self.W = sharedX(W,  'softmax_W' )

        self._params = [ self.b, self.W ]

    def get_weights_topo(self):
        if not isinstance(self.input_space, Conv2DSpace):
            raise NotImplementedError()
        desired = self.W.get_value().T
        ipt = self.desired_space.format_as(desired, self.input_space)
        rval = Conv2DSpace.convert_numpy(ipt, self.input_space.axes, ('b', 0, 1, 'c'))
        return rval

    def get_weights(self):
        if not isinstance(self.input_space, VectorSpace):
            raise NotImplementedError()

        return self.W.get_value()

    def set_weights(self, weights):
        self.W.set_value(weights)

    def set_biases(self, biases):
        self.b.set_value(biases)

    def get_biases(self):
        return self.b.get_value()

    def get_weights_format(self):
        return ('v', 'h')

    def fprop(self, state_below):

        self.input_space.validate(state_below)

        if self.needs_reformat:
            state_below = self.input_space.format_as(state_below, self.desired_space)

        for value in get_debug_values(state_below):
            if value.shape[0] != self.mlp.batch_size:
                raise ValueError("state_below should have batch size "+str(self.dbm.batch_size)+" but has "+str(value.shape[0]))

        self.desired_space.validate(state_below)

        assert self.W.ndim == 2
        assert state_below.ndim == 2

        b = self.b

        Z = T.dot(state_below, self.W) + b

        rval = T.nnet.softmax(Z)

        for value in get_debug_values(rval):
            assert value.shape[0] == self.mlp.batch_size

        return rval

    def cost(self, Y, Y_hat):
        """
        Y must be one-hot binary. Y_hat is a softmax estimate.
        of Y. Returns negative log probability of Y under the Y_hat
        distribution.
        """

        assert hasattr(Y_hat, 'owner')
        owner = Y_hat.owner
        assert owner is not None
        op = owner.op
        if isinstance(op, Print):
            assert len(owner.inputs) == 1
            Y_hat, = owner.inputs
            owner = Y_hat.owner
            op = owner.op
        assert isinstance(op, T.nnet.Softmax)
        z ,= owner.inputs
        assert z.ndim == 2

        z = z - z.max(axis=1).dimshuffle(0, 'x')
        log_prob = z - T.log(T.exp(z).sum(axis=1).dimshuffle(0, 'x'))
        # we use sum and not mean because this is really one variable per row
        log_prob_of = (Y * log_prob).sum(axis=1)
        assert log_prob_of.ndim == 1

        rval = log_prob_of.mean()

        return - rval

    def get_weight_decay(self, coeff):
        if isinstance(coeff, str):
            coeff = float(coeff)
        assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
        return coeff * T.sqr(self.W).sum()

    def censor_updates(self, updates):
        if self.max_row_norm is not None:
            W = self.W
            if W in updates:
                updated_W = updates[W]
                row_norms = T.sqrt(T.sum(T.sqr(updated_W), axis=1))
                desired_norms = T.clip(row_norms, 0, self.max_row_norm)
                updates[W] = updated_W * (desired_norms / (1e-7 + row_norms)).dimshuffle(0, 'x')
Example #58
0
def test_np_format_as_vector2vector():
    vector_space_initial = VectorSpace(dim=8*8*3, sparse=False)
    vector_space_final = VectorSpace(dim=8*8*3, sparse=False)
    data = np.arange(5*8*8*3).reshape(5, 8*8*3)
    rval = vector_space_initial.np_format_as(data, vector_space_final)
    assert np.all(rval == data)
Example #59
0
class ConditionalGenerator(Generator):
    def __init__(self, mlp, input_condition_space, condition_distribution, noise_dim=100, *args, **kwargs):
        super(ConditionalGenerator, self).__init__(mlp, *args, **kwargs)

        self.noise_dim = noise_dim
        self.noise_space = VectorSpace(dim=self.noise_dim)

        self.condition_space = input_condition_space
        self.condition_distribution = condition_distribution

        self.input_space = CompositeSpace([self.noise_space, self.condition_space])
        self.mlp.set_input_space(self.input_space)

    def sample_and_noise(
        self, conditional_data, default_input_include_prob=1.0, default_input_scale=1.0, all_g_layers=False
    ):
        """
        Retrieve a sample (and the noise used to generate the sample)
        conditioned on some input data.

        Parameters
        ----------
        conditional_data: member of self.condition_space
            A minibatch of conditional data to feedforward.

        default_input_include_prob: float
            WRITEME

        default_input_scale: float
            WRITEME

        all_g_layers: boolean
            If true, return all generator layers in `other_layers` slot
            of this method's return value. (Otherwise returns `None` in
            this slot.)

        Returns
        -------
        net_output: 3-tuple
            Tuple of the form `(sample, noise, other_layers)`.
        """

        if isinstance(conditional_data, int):
            conditional_data = self.condition_distribution.sample(conditional_data)

        num_samples = conditional_data.shape[0]

        noise = self.get_noise((num_samples, self.noise_dim))
        # TODO necessary?
        formatted_noise = self.noise_space.format_as(noise, self.noise_space)

        # Build inputs: concatenate noise with conditional data
        inputs = (formatted_noise, conditional_data)

        # Feedforward
        # if all_g_layers:
        #     rval = self.mlp.dropout_fprop(inputs, default_input_include_prob=default_input_include_prob,
        #                                   default_input_scale=default_input_scale, return_all=all_g_layers)
        #     other_layers, rval = rval[:-1], rval[-1]
        # else:
        rval = self.mlp.dropout_fprop(
            inputs, default_input_include_prob=default_input_include_prob, default_input_scale=default_input_scale
        )
        # other_layers = None

        return rval, formatted_noise, conditional_data, None  # , other_layers

    def sample(self, conditional_data, **kwargs):
        sample, _, _, _ = self.sample_and_noise(conditional_data, **kwargs)
        return sample

    def get_monitoring_channels(self, data):
        if data is None:
            m = 100
            conditional_data = self.condition_distribution.sample(m)
        else:
            _, conditional_data = data
            m = conditional_data.shape[0]

        noise = self.get_noise((m, self.noise_dim))
        rval = OrderedDict()

        sampled_data = (noise, conditional_data)
        try:
            rval.update(self.mlp.get_monitoring_channels((sampled_data, None)))
        except Exception:
            warnings.warn("something went wrong with generator.mlp's monitoring channels")

        if self.monitor_ll:
            rval["ll"] = T.cast(self.ll(data, self.ll_n_samples, self.ll_sigma), theano.config.floatX).mean()
            rval["nll"] = -rval["ll"]
        return rval

    def ll(self, data, n_samples, sigma):
        real_data, conditional_data = data
        sampled_data = self.sample(conditional_data)

        output_space = self.mlp.get_output_space()
        if "Conv2D" in str(output_space):
            samples = output_space.convert(sampled_data, output_space.axes, ("b", 0, 1, "c"))
            samples = samples.flatten(2)
            data = output_space.convert(real_data, output_space.axes, ("b", 0, 1, "c"))
            data = data.flatten(2)
        parzen = theano_parzen(data, samples, sigma)
        return parzen