예제 #1
0
 def __init__(self,
              compressor,
              discriminator,
              monitor_compressor=False,
              monitor_discriminator=False):
     Model.__init__(self)
     self.__dict__.update(locals())
     del self.self
예제 #2
0
 def __init__(self, generator, discriminator, inferer=None,
              inference_monitoring_batch_size=128,
              monitor_generator=True,
              monitor_discriminator=True,
              monitor_inference=True,
              shrink_d = 0.):
     Model.__init__(self)
     self.__dict__.update(locals())
     del self.self
 def __init__(self,
              mlp,
              noise="gaussian",
              monitor_ll=False,
              ll_n_samples=100,
              ll_sigma=0.2):
     Model.__init__(self)
     self.__dict__.update(locals())
     del self.self
     self.theano_rng = MRG_RandomStreams(2014 * 5 + 27)
예제 #4
0
 def __init__(self, corruptor, nvis, nhid, act_enc, act_dec, 
              tied_weights=True, irange=0.05, 
              rng=9001, weights_nonnegative=False, extensions=None):
     super(ICMC, self).__init__(corruptor, nvis, nhid, act_enc, act_dec, 
                                tied_weights, irange, rng)
     Model.__init__(self, extensions)
     if weights_nonnegative:
         self._params[2].set_value((self._params[2]+irange/2.0).eval())
         if not tied_weights:
             self._params[3].set_value((self._params[3]+irange/2.0).eval())
     self.weights_nonnegative = weights_nonnegative        
 def __init__(self,
              generator,
              discriminator,
              inferer=None,
              inference_monitoring_batch_size=128,
              monitor_generator=True,
              monitor_discriminator=True,
              monitor_inference=True,
              shrink_d=0.):
     Model.__init__(self)
     self.__dict__.update(locals())
     del self.self
예제 #6
0
 def __init__(self, compressor, discriminator, 
             monitor_compressor=False, 
             monitor_discriminator=False):
     Model.__init__(self)
     self.__dict__.update(locals())
     del self.self
예제 #7
0
파일: rbm.py 프로젝트: mruan/pylearn2
    def __init__(self, nvis = None, nhid = None,
            vis_space = None,
            hid_space = None,
            transformer = None,
            irange=0.5, rng=None, init_bias_vis = None,
            init_bias_vis_marginals = None, init_bias_hid=0.0,
            base_lr = 1e-3, anneal_start = None, nchains = 100, sml_gibbs_steps = 1,
            random_patches_src = None,
            monitor_reconstruction = False):

        """
        Construct an RBM object.

        Parameters
        ----------
        nvis : int
            Number of visible units in the model.
            (Specifying this implies that the model acts on a vector,
            i.e. it sets vis_space = pylearn2.space.VectorSpace(nvis) )
        nhid : int
            Number of hidden units in the model.
            (Specifying this implies that the model acts on a vector)
        vis_space:
            A pylearn2.space.Space object describing what kind of vector
            space the RBM acts on. Don't specify if you used nvis / hid
        hid_space:
            A pylearn2.space.Space object describing what kind of vector
            space the RBM's hidden units live in. Don't specify if you used
            nvis / nhid
        init_bias_vis_marginals: either None, or a Dataset to use to initialize
            the visible biases to the inverse sigmoid of the data marginals
        irange : float, optional
            The size of the initial interval around 0 for weights.
        rng : RandomState object or seed
            NumPy RandomState object to use when initializing parameters
            of the model, or (integer) seed to use to create one.
        init_bias_vis : array_like, optional
            Initial value of the visible biases, broadcasted as necessary.
        init_bias_hid : array_like, optional
            initial value of the hidden biases, broadcasted as necessary.
        monitor_reconstruction : if True, will request a monitoring channel to monitor
            reconstruction error
        random_patches_src: Either None, or a Dataset from which to draw random patches
            in order to initialize the weights. Patches will be multiplied by irange

        Parameters for default SML learning rule:

            base_lr : the base learning rate
            anneal_start : number of steps after which to start annealing on a 1/t schedule
            nchains: number of negative chains
            sml_gibbs_steps: number of gibbs steps to take per update

        """

        Model.__init__(self)
        Block.__init__(self)

        if init_bias_vis_marginals is not None:
            assert init_bias_vis is None
            X = init_bias_vis_marginals.X
            assert X.min() >= 0.0
            assert X.max() <= 1.0

            marginals = X.mean(axis=0)

            #rescale the marginals a bit to avoid NaNs
            init_bias_vis = inverse_sigmoid_numpy(.01 + .98 * marginals)


        if init_bias_vis is None:
            init_bias_vis = 0.0

        if rng is None:
            # TODO: global rng configuration stuff.
            rng = numpy.random.RandomState(1001)
        self.rng = rng

        if vis_space is None:
            #if we don't specify things in terms of spaces and a transformer,
            #assume dense matrix multiplication and work off of nvis, nhid
            assert hid_space is None
            assert transformer is None or isinstance(transformer,MatrixMul)
            assert nvis is not None
            assert nhid is not None

            if transformer is None:
                if random_patches_src is None:
                    W = rng.uniform(-irange, irange, (nvis, nhid))
                else:
                    if hasattr(random_patches_src, '__array__'):
                        W = irange * random_patches_src.T
                        assert W.shape == (nvis, nhid)
                    else:
                        #assert type(irange) == type(0.01)
                        #assert irange == 0.01
                        W = irange * random_patches_src.get_batch_design(nhid).T

                self.transformer = MatrixMul(  sharedX(
                        W,
                        name='W',
                        borrow=True
                    )
                )
            else:
                self.transformer = transformer

            self.vis_space = VectorSpace(nvis)
            self.hid_space = VectorSpace(nhid)
        else:
            assert hid_space is not None
            assert transformer is not None
            assert nvis is None
            assert nhid is None

            self.vis_space = vis_space
            self.hid_space = hid_space
            self.transformer = transformer


        try:
            b_vis = self.vis_space.get_origin()
            b_vis += init_bias_vis
        except ValueError:
            raise ValueError("bad shape or value for init_bias_vis")
        self.bias_vis = sharedX(b_vis, name='bias_vis', borrow=True)

        try:
            b_hid = self.hid_space.get_origin()
            b_hid += init_bias_hid
        except ValueError:
            raise ValueError('bad shape or value for init_bias_hid')
        self.bias_hid = sharedX(b_hid, name='bias_hid', borrow=True)

        self.random_patches_src = random_patches_src
        self.register_names_to_del(['random_patches_src'])


        self.__dict__.update(nhid=nhid, nvis=nvis)
        self._params = safe_union(self.transformer.get_params(), [self.bias_vis, self.bias_hid])

        self.base_lr = base_lr
        self.anneal_start = anneal_start
        self.nchains = nchains
        self.sml_gibbs_steps = sml_gibbs_steps
예제 #8
0
 def __init__(self, layer_name):
     Model.__init__(self)
     self.__dict__.update(locals())
     del self.self
     self._params = []
예제 #9
0
 def __init__(self, mlp, noise = "gaussian", monitor_ll = False, ll_n_samples = 100, ll_sigma = 0.2):
     Model.__init__(self)
     self.__dict__.update(locals())
     del self.self
     self.theano_rng = MRG_RandomStreams(2014 * 5 + 27)
예제 #10
0
 def __init__(self, mlp):
     Model.__init__(self)
     self.__dict__.update(locals())
     del self.self
     self.theano_rng = MRG_RandomStreams(2015 * 4 * 20)
예제 #11
0
    def __init__(self,
                 nmap,
                 input_space=None,
                 nvisx=None,
                 nvisy=None,
                 input_source=("featuresX", "featuresY"),
                 act_enc=None,
                 act_dec=None,
                 irange=1e-3,
                 rng=9001):
        Block.__init__(self)
        Model.__init__(self)
        assert nmap > 0, "Number of mapping units must be positive"

        if nvisx is not None and nvisy is not None or input_space is not None:
            if nvisx is not None and nvisy is not None:
                assert nvisx > 0, "Number of visx units must be non-negative"
                assert nvisy > 0, "Number of visy units must be non-negative"
                input_space = CompositeSpace(
                    [VectorSpace(nvisx),
                     VectorSpace(nvisy)])
                self.nvisx = nvisx
                self.nvisy = nvisy
            elif isinstance(input_space.components[0], Conv2DSpace):
                rx, cx = input_space.components[0].shape
                chx = input_space.components[0].num_channels
                ry, cy = input_space.components[1].shape
                chy = input_space.components[1].num_channels
                self.nvisx = rx * cx * chx
                self.nvisy = ry * cy * chy
            else:
                raise NotImplementedError(
                    str(type(self)) + " does not support that input_space.")
        # Check whether the input_space and input_source structures match
        try:
            DataSpecsMapping((input_space, input_source))
        except ValueError:
            raise ValueError("The structures of `input_space`, %s, and "
                             "`input_source`, %s do not match. If you "
                             "specified a CompositeSpace as an input, "
                             "be sure to specify the data sources as well." %
                             (input_space, input_source))

        self.input_space = input_space
        self.input_source = input_source
        self.nmap = nmap
        self.output_space = VectorSpace(self.nmap)
        self._initialize_visbiasX(self.nvisx)  # self.visbiasX
        self._initialize_visbiasY(self.nvisy)  # self.visbiasY
        self._initialize_mapbias()  # self.mapbias
        self.irange = irange
        self.rng = make_np_rng(rng, which_method="randn")
        seed = int(self.rng.randint(2**30))
        self.s_rng = make_theano_rng(seed, which_method="uniform")

        def _resolve_callable(conf, conf_attr):
            if conf[conf_attr] is None or conf[conf_attr] == "linear":
                return None
            # If it's a callable, use it directly.
            if hasattr(conf[conf_attr], '__call__'):
                return conf[conf_attr]
            elif (conf[conf_attr] in globals()
                  and hasattr(globals()[conf[conf_attr]], '__call__')):
                return globals()[conf[conf_attr]]
            elif hasattr(tensor.nnet, conf[conf_attr]):
                return getattr(tensor.nnet, conf[conf_attr])
            elif hasattr(tensor, conf[conf_attr]):
                return getattr(tensor, conf[conf_attr])
            else:
                raise ValueError("Couldn't interpret %s value: '%s'" %
                                 (conf_attr, conf[conf_attr]))

        self.act_enc = _resolve_callable(locals(), 'act_enc')
        self.act_dec = _resolve_callable(locals(), 'act_dec')
    def __init__(self, kernel, data, W,
                 lambda_vals=.0, H=None,
                 termination_criterion=None, kernel_matrix=None):
        """
            Convex non-negative matrix factorization.
            This model compute the CNMF factorization of a dataset.

            Parameters
            ----------
            kernel: Object that is going to compute the kernel between vectors.
                The object must follow the interface in kernel_two_kay_MF.kernels.
            data: Numpy matrix.
            W: Numpy matrix.
            lambda_vals: Regularization to avoid division by zero.
            H: Numpy matrix.
            termination_criterion: instance of \
                pylearn2.termination_criteria.TerminationCriterion, optional
            kernel_matrix: Numpy matrix. Represents dot product in the feature space of the data.
                If this matrix is not provided, it is going to be computed.
        """

        Model.__init__(self)

        self._kernel = kernel

        self._data = data
        if not isfinite(self._data):
            raise Exception("NaN or Inf in data")

        if kernel_matrix is not None:
            assert kernel_matrix.shape[0] == self._data.shape[0]
            self._kernel_matrix = kernel_matrix
            if not isfinite(self._kernel_matrix):
                raise Exception("NaN or Inf in kernel_matrix")
        else:
            self._compute_kernel_matrix()

        self.W = W
        if not isfinite(self.W):
            raise Exception("NaN or Inf in W")

        assert self.W.shape[1] == self._data.shape[0]

        self._data_size, self._num_features = self._data.shape
        self._num_latent_topics, _ = self.W.shape

        self.W = sharedX(self.W, name="W", borrow=True)

        if H is not None:
            if H.shape[1] != self._num_latent_topics or H.shape[0] != self._data_size:
                self.H = sharedX(
                    numpy.random.rand(
                        self._data_size,
                        self._num_latent_topics).astype(
                        self.W.dtype),
                    name="H",
                    borrow=True)
                self.init_H()
            else:
                if not isfinite(H):
                    raise Exception("NaN or Inf in H")
                else:
                    self.H = sharedX(H, name="H", borrow=True)
        else:
            self.H = sharedX(
                numpy.random.rand(self._data_size, self._num_latent_topics).astype(self.W.dtype),
                name="H", borrow=True)
            self.init_H()

        self._params = [self.W, self.H]

        self.input_space = VectorSpace(self._num_features)
        self.output_space = VectorSpace(self._num_latent_topics)

        self.lambda_vals = lambda_vals
        self._compute_update_rules()
        self._compute_helper_functions()

        Monitor.get_monitor(self)
        self.monitor._sanity_check()

        self.termination_criterion = termination_criterion
예제 #13
0
    def __init__(
        self,
        nmap,
        input_space=None,
        nvisx=None,
        nvisy=None,
        input_source=("featuresX", "featuresY"),
        act_enc=None,
        act_dec=None,
        irange=1e-3,
        rng=9001,
    ):
        Block.__init__(self)
        Model.__init__(self)
        assert nmap > 0, "Number of mapping units must be positive"

        if nvisx is not None and nvisy is not None or input_space is not None:
            if nvisx is not None and nvisy is not None:
                assert nvisx > 0, "Number of visx units must be non-negative"
                assert nvisy > 0, "Number of visy units must be non-negative"
                input_space = CompositeSpace([VectorSpace(nvisx), VectorSpace(nvisy)])
                self.nvisx = nvisx
                self.nvisy = nvisy
            elif isinstance(input_space.components[0], Conv2DSpace):
                rx, cx = input_space.components[0].shape
                chx = input_space.components[0].num_channels
                ry, cy = input_space.components[1].shape
                chy = input_space.components[1].num_channels
                self.nvisx = rx * cx * chx
                self.nvisy = ry * cy * chy
            else:
                raise NotImplementedError(str(type(self)) + " does not support that input_space.")
        # Check whether the input_space and input_source structures match
        try:
            DataSpecsMapping((input_space, input_source))
        except ValueError:
            raise ValueError(
                "The structures of `input_space`, %s, and "
                "`input_source`, %s do not match. If you "
                "specified a CompositeSpace as an input, "
                "be sure to specify the data sources as well." % (input_space, input_source)
            )

        self.input_space = input_space
        self.input_source = input_source
        self.nmap = nmap
        self.output_space = VectorSpace(self.nmap)
        self._initialize_visbiasX(self.nvisx)  # self.visbiasX
        self._initialize_visbiasY(self.nvisy)  # self.visbiasY
        self._initialize_mapbias()  # self.mapbias
        self.irange = irange
        self.rng = make_np_rng(rng, which_method="randn")
        seed = int(self.rng.randint(2 ** 30))
        self.s_rng = make_theano_rng(seed, which_method="uniform")

        def _resolve_callable(conf, conf_attr):
            if conf[conf_attr] is None or conf[conf_attr] == "linear":
                return None
            # If it's a callable, use it directly.
            if hasattr(conf[conf_attr], "__call__"):
                return conf[conf_attr]
            elif conf[conf_attr] in globals() and hasattr(globals()[conf[conf_attr]], "__call__"):
                return globals()[conf[conf_attr]]
            elif hasattr(tensor.nnet, conf[conf_attr]):
                return getattr(tensor.nnet, conf[conf_attr])
            elif hasattr(tensor, conf[conf_attr]):
                return getattr(tensor, conf[conf_attr])
            else:
                raise ValueError("Couldn't interpret %s value: '%s'" % (conf_attr, conf[conf_attr]))

        self.act_enc = _resolve_callable(locals(), "act_enc")
        self.act_dec = _resolve_callable(locals(), "act_dec")
예제 #14
0
파일: rbm.py 프로젝트: davyfeng/pylearn
    def __init__(self, nvis, nhid, irange=0.5, rng=None, init_bias_vis = 0.0, init_bias_hid=0.0,
            base_lr = 1e-3, anneal_start = None, nchains = 100, sml_gibbs_steps = 1,
            random_patches_src = None,
            monitor_reconstruction = False):

        """
        Construct an RBM object.

        Parameters
        ----------
        nvis : int
            Number of visible units in the model.
        nhid : int
            Number of hidden units in the model.
        irange : float, optional
            The size of the initial interval around 0 for weights.
        rng : RandomState object or seed
            NumPy RandomState object to use when initializing parameters
            of the model, or (integer) seed to use to create one.
        init_bias_vis : array_like, optional
            Initial value of the visible biases, broadcasted as necessary.
        init_bias_hid : array_like, optional
            initial value of the hidden biases, broadcasted as necessary.
        monitor_reconstruction : if True, will request a monitoring channel to monitor
            reconstruction error
        random_patches_src: Either None, or a Dataset from which to draw random patches
            in order to initialize the weights. Patches will be multiplied by irange

        Parameters for default SML learning rule:

            base_lr : the base learning rate
            anneal_start : number of steps after which to start annealing on a 1/t schedule
            nchains: number of negative chains
            sml_gibbs_steps: number of gibbs steps to take per update

        """

        Model.__init__(self)
        Block.__init__(self)


        if rng is None:
            # TODO: global rng configuration stuff.
            rng = numpy.random.RandomState(1001)
        self.rng = rng

        try:
            b_vis = numpy.zeros(nvis)
            b_vis += init_bias_vis
        except ValueError:
            raise ValueError("bad shape or value for init_bias_vis")
        self.bias_vis = sharedX(b_vis, name='bias_vis', borrow=True)

        try:
            b_hid = numpy.zeros(nhid)
            b_hid += init_bias_hid
        except ValueError:
            raise ValueError('bad shape or value for init_bias_hid')
        self.bias_hid = sharedX(b_hid, name='bias_hid', borrow=True)

        self.random_patches_src = random_patches_src
        self.register_names_to_del(['random_patches_src'])

        if random_patches_src is None:
            W = rng.uniform(-irange, irange, (nvis, nhid))
        else:
            if hasattr(random_patches_src, '__array__'):
                W = irange * random_patches_src.T
                assert W.shape == (nvis, nhid)
            else:
                #assert type(irange) == type(0.01)
                #assert irange == 0.01
                W = irange * random_patches_src.get_batch_design(nhid).T

        self.weights = sharedX(
            W,
            name='W',
            borrow=True
        )

        self.__dict__.update(nhid=nhid, nvis=nvis)
        self._params = [self.bias_vis, self.bias_hid, self.weights]

        self.base_lr = base_lr
        self.anneal_start = anneal_start
        self.nchains = nchains
        self.sml_gibbs_steps = sml_gibbs_steps
예제 #15
0
 def __init__(self, layer_name):
     Model.__init__(self)
     self.__dict__.update(locals())
     del self.self
     self._params = []
예제 #16
0
 def __init__(self, mlp):
     Model.__init__(self)
     self.__dict__.update(locals())
     del self.self
     self.theano_rng = MRG_RandomStreams(2015 * 4 * 20)
예제 #17
0
    def __init__(self, nvis = None, nhid = None,
            vis_space = None,
            hid_space = None,
            transformer = None,
            irange=0.5, rng=None, init_bias_vis = None,
            init_bias_vis_marginals = None, init_bias_hid=0.0,
            base_lr = 1e-3, anneal_start = None, nchains = 100, sml_gibbs_steps = 1,
            random_patches_src = None,
            monitor_reconstruction = False):

        """
        Construct an RBM object.

        Parameters
        ----------
        nvis : int
            Number of visible units in the model.
            (Specifying this implies that the model acts on a vector,
            i.e. it sets vis_space = pylearn2.space.VectorSpace(nvis) )
        nhid : int
            Number of hidden units in the model.
            (Specifying this implies that the model acts on a vector)
        vis_space:
            A pylearn2.space.Space object describing what kind of vector
            space the RBM acts on. Don't specify if you used nvis / hid
        hid_space:
            A pylearn2.space.Space object describing what kind of vector
            space the RBM's hidden units live in. Don't specify if you used
            nvis / nhid
        init_bias_vis_marginals: either None, or a Dataset to use to initialize
            the visible biases to the inverse sigmoid of the data marginals
        irange : float, optional
            The size of the initial interval around 0 for weights.
        rng : RandomState object or seed
            NumPy RandomState object to use when initializing parameters
            of the model, or (integer) seed to use to create one.
        init_bias_vis : array_like, optional
            Initial value of the visible biases, broadcasted as necessary.
        init_bias_hid : array_like, optional
            initial value of the hidden biases, broadcasted as necessary.
        monitor_reconstruction : if True, will request a monitoring channel to monitor
            reconstruction error
        random_patches_src: Either None, or a Dataset from which to draw random patches
            in order to initialize the weights. Patches will be multiplied by irange

        Parameters for default SML learning rule:

            base_lr : the base learning rate
            anneal_start : number of steps after which to start annealing on a 1/t schedule
            nchains: number of negative chains
            sml_gibbs_steps: number of gibbs steps to take per update

        """

        Model.__init__(self)
        Block.__init__(self)

        if init_bias_vis_marginals is not None:
            assert init_bias_vis is None
            X = init_bias_vis_marginals.X
            assert X.min() >= 0.0
            assert X.max() <= 1.0

            marginals = X.mean(axis=0)

            #rescale the marginals a bit to avoid NaNs
            init_bias_vis = inverse_sigmoid_numpy(.01 + .98 * marginals)


        if init_bias_vis is None:
            init_bias_vis = 0.0

        if rng is None:
            # TODO: global rng configuration stuff.
            rng = numpy.random.RandomState(1001)
        self.rng = rng

        if vis_space is None:
            #if we don't specify things in terms of spaces and a transformer,
            #assume dense matrix multiplication and work off of nvis, nhid
            assert hid_space is None
            assert transformer is None or isinstance(transformer,MatrixMul)
            assert nvis is not None
            assert nhid is not None

            if transformer is None:
                if random_patches_src is None:
                    W = rng.uniform(-irange, irange, (nvis, nhid))
                else:
                    if hasattr(random_patches_src, '__array__'):
                        W = irange * random_patches_src.T
                        assert W.shape == (nvis, nhid)
                    else:
                        #assert type(irange) == type(0.01)
                        #assert irange == 0.01
                        W = irange * random_patches_src.get_batch_design(nhid).T

                self.transformer = MatrixMul(  sharedX(
                        W,
                        name='W',
                        borrow=True
                    )
                )
            else:
                self.transformer = transformer

            self.vis_space = VectorSpace(nvis)
            self.hid_space = VectorSpace(nhid)
        else:
            assert hid_space is not None
            assert transformer is not None
            assert nvis is None
            assert nhid is None

            self.vis_space = vis_space
            self.hid_space = hid_space
            self.transformer = transformer


        try:
            b_vis = self.vis_space.get_origin()
            b_vis += init_bias_vis
        except ValueError:
            raise ValueError("bad shape or value for init_bias_vis")
        self.bias_vis = sharedX(b_vis, name='bias_vis', borrow=True)

        try:
            b_hid = self.hid_space.get_origin()
            b_hid += init_bias_hid
        except ValueError:
            raise ValueError('bad shape or value for init_bias_hid')
        self.bias_hid = sharedX(b_hid, name='bias_hid', borrow=True)

        self.random_patches_src = random_patches_src
        self.register_names_to_del(['random_patches_src'])


        self.__dict__.update(nhid=nhid, nvis=nvis)
        self._params = safe_union(self.transformer.get_params(), [self.bias_vis, self.bias_hid])

        self.base_lr = base_lr
        self.anneal_start = anneal_start
        self.nchains = nchains
        self.sml_gibbs_steps = sml_gibbs_steps
예제 #18
0
    def __init__(self, nvis, nhid, act_enc, act_dec,
                 tied_weights=False, irange=1e-3, rng=9001):
        """
        WRITEME
        """
        super(Autoencoder, self).__init__()
        Model.__init__(self)
        assert nvis > 0, "Number of visible units must be non-negative"
        assert nhid > 0, "Number of hidden units must be positive"

        self.input_space = VectorSpace(nvis)
        self.output_space = VectorSpace(nhid)

        # Save a few parameters needed for resizing
        self.nhid = nhid
        self.irange = irange
        self.tied_weights = tied_weights
        self.rng = make_np_rng(rng, which_method="randn")
        self._initialize_hidbias()
        if nvis > 0:
            self._initialize_visbias(nvis)
            self._initialize_weights(nvis)
        else:
            self.visbias = None
            self.weights = None

        seed = int(self.rng.randint(2 ** 30))

        # why a theano rng? should we remove it?
        self.s_rng = make_theano_rng(seed, which_method="uniform")

        if tied_weights and self.weights is not None:
            self.w_prime = self.weights.T
        else:
            self._initialize_w_prime(nvis)

        def _resolve_callable(conf, conf_attr):
            """
            .. todo::

                WRITEME
            """
            if conf[conf_attr] is None or conf[conf_attr] == "linear":
                return None
            # If it's a callable, use it directly.
            if hasattr(conf[conf_attr], '__call__'):
                return conf[conf_attr]
            elif (conf[conf_attr] in globals()
                  and hasattr(globals()[conf[conf_attr]], '__call__')):
                return globals()[conf[conf_attr]]
            elif hasattr(tensor.nnet, conf[conf_attr]):
                return getattr(tensor.nnet, conf[conf_attr])
            elif hasattr(tensor, conf[conf_attr]):
                return getattr(tensor, conf[conf_attr])
            else:
                raise ValueError("Couldn't interpret %s value: '%s'" %
                                    (conf_attr, conf[conf_attr]))

        self.act_enc = _resolve_callable(locals(), 'act_enc')
        self.act_dec = _resolve_callable(locals(), 'act_dec')
        self._params = [
            self.visbias,
            self.hidbias,
            self.weights,
        ]
        if not self.tied_weights:
            self._params.append(self.w_prime)