コード例 #1
0
ファイル: audio_dataset.py プロジェクト: mohataher/dnn-mgr
    def __init__(self, config, proc_type='standardize', **kwargs):
        '''
        config: dictionary with partition configuration information
        
        proc_type: type of preprocessing (either standardize or pca_whiten)
        
        if proc_type='standardize' no extra arguments required

        if proc_type='pca_whiten' the following keyword arguments are required:
            ncomponents = x where x is an integer
            epsilon = y where y is a float (regularization parameter)
        '''

        recognized_types = ['standardize', 'pca_whiten']
        assert proc_type in recognized_types

        # load parition information
        self.mean    = config['mean']
        self.mean    = self.mean.reshape((np.prod(self.mean.shape),))
        self.istd    = np.reciprocal(np.sqrt(config['var']))
        self.istd    = self.istd.reshape((np.prod(self.istd.shape),))
        self.tframes = config['tframes']
        nvis = len(self.mean)

        if proc_type == 'standardize':
            dim = nvis
            mask = (self.istd < 20) # in order to ignore near-zero variance inputs
            self.biases = np.array(-self.mean * self.istd * mask, dtype=np.float32) 
            self.weights = np.array(np.diag(self.istd * mask), dtype=np.float32) #!!!gives memory error for convnet (because diag not treated as sparse mat)
            
        if proc_type == 'pca_whiten':
            raise NotImplementedError(
            '''PCA whitening not yet implemented as a layer. 
            Use audio_dataset2d.AudioDataset2d to perform whitening from the dataset iterator''')

            # dim      = kwargs['ncomponents']
            # S        = config['S'][:dim]   # eigenvalues
            # U        = config['U'][:,:dim] # eigenvectors            
            # self.pca = np.diag(1./(np.sqrt(S) + epsilon)).dot(U.T)
            
            # self.biases   = np.array(-self.mean.dot(self.pca.transpose()), dtype=np.float32)
            # self.weights  = np.array(self.pca.transpose(), dtype=np.float32)

        # Autoencoder with linear units
        pre_layer = Autoencoder(nvis=nvis, nhid=dim, act_enc=None, act_dec=None, irange=0)
        
        # Set weights for pre-processing
        params    = pre_layer.get_param_values()
        params[1] = self.biases
        params[2] = self.weights
        pre_layer.set_param_values(params)

        super(PreprocLayer, self).__init__(layer_name='pre', layer_content=pre_layer, freeze_params=True)        
コード例 #2
0
ファイル: test_autoencoder.py プロジェクト: JackyRen/pylearn2
def test_autoencoder_logistic_linear_tied():
    data = np.random.randn(10, 5).astype(config.floatX)
    ae = Autoencoder(5, 7, act_enc="sigmoid", act_dec="linear", tied_weights=True)
    w = ae.weights.get_value()
    ae.hidbias.set_value(np.random.randn(7).astype(config.floatX))
    hb = ae.hidbias.get_value()
    ae.visbias.set_value(np.random.randn(5).astype(config.floatX))
    vb = ae.visbias.get_value()
    d = tensor.matrix()
    result = np.dot(1.0 / (1 + np.exp(-hb - np.dot(data, w))), w.T) + vb
    ff = theano.function([d], ae.reconstruct(d))
    assert _allclose(ff(data), result)
コード例 #3
0
def test_autoencoder_logistic_linear_tied():
    data = np.random.randn(10, 5).astype(config.floatX)
    ae = Autoencoder(5, 7, act_enc='sigmoid', act_dec='linear',
                     tied_weights=True)
    w = ae.weights.get_value()
    ae.hidbias.set_value(np.random.randn(7).astype(config.floatX))
    hb = ae.hidbias.get_value()
    ae.visbias.set_value(np.random.randn(5).astype(config.floatX))
    vb = ae.visbias.get_value()
    d = tensor.matrix()
    result = np.dot(1. / (1 + np.exp(-hb - np.dot(data,  w))), w.T) + vb
    ff = theano.function([d], ae.reconstruct(d))
    assert _allclose(ff(data), result)
コード例 #4
0
def test_autoencoder_tanh_cos_untied():
    data = np.random.randn(10, 5).astype(config.floatX)
    ae = Autoencoder(5, 7, act_enc='tanh', act_dec='cos', tied_weights=False)
    w = ae.weights.get_value()
    w_prime = ae.w_prime.get_value()
    ae.hidbias.set_value(np.random.randn(7).astype(config.floatX))
    hb = ae.hidbias.get_value()
    ae.visbias.set_value(np.random.randn(5).astype(config.floatX))
    vb = ae.visbias.get_value()
    d = tensor.matrix()
    result = np.cos(np.dot(np.tanh(hb + np.dot(data, w)), w_prime) + vb)
    ff = theano.function([d], ae.reconstruct(d))
    assert _allclose(ff(data), result)
コード例 #5
0
ファイル: test_autoencoder.py プロジェクト: JackyRen/pylearn2
def test_autoencoder_tanh_cos_untied():
    data = np.random.randn(10, 5).astype(config.floatX)
    ae = Autoencoder(5, 7, act_enc="tanh", act_dec="cos", tied_weights=False)
    w = ae.weights.get_value()
    w_prime = ae.w_prime.get_value()
    ae.hidbias.set_value(np.random.randn(7).astype(config.floatX))
    hb = ae.hidbias.get_value()
    ae.visbias.set_value(np.random.randn(5).astype(config.floatX))
    vb = ae.visbias.get_value()
    d = tensor.matrix()
    result = np.cos(np.dot(np.tanh(hb + np.dot(data, w)), w_prime) + vb)
    ff = theano.function([d], ae.reconstruct(d))
    assert _allclose(ff(data), result)
コード例 #6
0
def test_stackedblocks_with_params():
    """
    Test StackedBlocks when all layers have trainable params
    """

    aes = [
        Autoencoder(100, 50, 'tanh', 'tanh'),
        Autoencoder(50, 10, 'tanh', 'tanh')
    ]
    sb = StackedBlocks(aes)
    _params = set([p for l in sb._layers for p in l._params])

    assert sb._params == _params
コード例 #7
0
ファイル: gsn.py プロジェクト: KennethPierce/pylearnk
    def _make_aes(layer_sizes, activation_funcs, tied=True):
        """
        Creates the Autoencoder objects needed by the GSN.

        Parameters
        ----------
        layer_sizes : WRITEME
        activation_funcs : WRITEME
        tied : WRITEME
        """
        aes = []
        assert len(activation_funcs) == len(layer_sizes)

        for i in xrange(len(layer_sizes) - 1):
            # activation for visible layer is aes[0].act_dec
            act_enc = activation_funcs[i + 1]
            act_dec = act_enc if i != 0 else activation_funcs[0]
            aes.append(
                Autoencoder(layer_sizes[i],
                            layer_sizes[i + 1],
                            act_enc,
                            act_dec,
                            tied_weights=tied))

        return aes
コード例 #8
0
def test_untied_ae():
    """
    Tests that UntiedAutoencoder calls the Model superclass constructor
    """
    ae = Autoencoder(5, 7, act_enc='tanh', act_dec='cos', tied_weights=True)
    model = UntiedAutoencoder(ae)
    model._ensure_extensions()
コード例 #9
0
def test_dcae():
    """
    Tests that DeepComposedAutoencoder calls the Model superclass constructor
    """
    ae = Autoencoder(5, 7, act_enc='tanh', act_dec='cos',
                     tied_weights=True)
    model = DeepComposedAutoencoder([ae])
    model._ensure_extensions()
コード例 #10
0
	def get_autoencoder(self, structure, act_function='sigmoid'):
		n_input, n_output = structure
		config = {
			'nvis': n_input,
			'nhid': n_output,
			'act_enc': act_function,
			'act_dec': act_function,
			"irange" : 0.05,
			}
		return Autoencoder(**config)
コード例 #11
0
def test_dcae():
    """
    Tests that DeepComposedAutoencoder works correctly
    """
    ae = Autoencoder(5, 7, act_enc='tanh', act_dec='cos', tied_weights=True)
    model = DeepComposedAutoencoder([ae])
    model._ensure_extensions()

    data = np.random.randn(10, 5).astype(config.floatX)
    model.perform(data)
コード例 #12
0
ファイル: training.py プロジェクト: albertomontesg/deeplearn
def get_autoencoder(structure):
    n_input, n_output = structure
    config = {
        'nvis': n_input,
        'nhid': n_output,
        'act_enc': 'sigmoid',
        'act_dec': None,
        "irange": 0.05,
    }
    return Autoencoder(**config)
コード例 #13
0
def get_autoencoder(structure):
    n_input, n_output = structure
    config = {
        'nhid': n_output,
        'nvis': n_input,
        'tied_weights': True,
        'act_enc': 'sigmoid',
        'act_dec': 'sigmoid',
        'irange': 0.001,
    }
    return Autoencoder(**config)
コード例 #14
0
def test_sdae():
    """
    Tests that StackedDenoisingAutoencoder works correctly
    """
    data = np.random.randn(10, 5).astype(config.floatX) * 100
    ae = Autoencoder(5, 7, act_enc='tanh', act_dec='cos', tied_weights=False)
    corruptor = BinomialCorruptor(corruption_level=0.5)
    model = StackedDenoisingAutoencoder([ae], corruptor)
    model._ensure_extensions()

    w = ae.weights.get_value()
    w_prime = ae.w_prime.get_value()
    ae.hidbias.set_value(np.random.randn(7).astype(config.floatX))
    hb = ae.hidbias.get_value()
    ae.visbias.set_value(np.random.randn(5).astype(config.floatX))
    vb = ae.visbias.get_value()
    d = tensor.matrix()
    result = np.cos(np.dot(np.tanh(hb + np.dot(data, w)), w_prime) + vb)
    ff = theano.function([d], model.reconstruct(d))
    assert not _allclose(ff(data), result)
コード例 #15
0
def test_autoencoder_properly_initialized():
    ae = Autoencoder(1, 1, 'sigmoid', 'linear')
    assert hasattr(ae, 'fn'), "Autoencoder didn't call Block.__init__"
    assert hasattr(ae, 'extensions'), "Autoencoder didn't call Model.__init__"