示例#1
0
	def __init__(self, rng, input, n_in, n_hidden, n_out):

		self.hiddenLayer = HiddenLayer(
            		rng=rng,
            		input=input,
            		n_in=n_in,
           		n_out=n_hidden,
            		activation=T.tanh
        	)

        	self.logRegressionLayer = logisticRegression(
            		input=self.hiddenLayer.output,
            		n_in=n_hidden,
            		n_out=n_out
        	)
		
		self.L1 = (
            		abs(self.hiddenLayer.W).sum()
            		+ abs(self.logRegressionLayer.W).sum()
        	)

        	self.L2_sqr = (
            		(self.hiddenLayer.W ** 2).sum()
            		+ (self.logRegressionLayer.W ** 2).sum()
        	)

        	self.negative_log_likelihood = (
            		self.logRegressionLayer.negative_log_likelihood
        	)

        	self.errors = self.logRegressionLayer.errors

        	self.params = self.hiddenLayer.params + self.logRegressionLayer.params
示例#2
0
文件: mlptest.py 项目: nanaju/MLstudy
	def __init__(self, rng, input, n_in, n_hidden, n_out):
		#多層パーセプトロンは隠れ層とロジスティック回帰で表される出力層から成る
		#隠れ層の出力がロジスティック回帰の入力になる点に注意
		self.hiddenLayer = HiddenLayer(rng=rng, input=input, n_in=n_in, n_out=n_hidden, activation=T.tanh)
		self.logRegressionLayer = logisticRegression(input=self.hiddenLayer.output, n_in=n_hidden, n_out=n_out)

		#L1/L2正則化の正則化項を計算するシンボル
		self.L1 = abs(self.hiddenLayer.W).sum() + abs(self.logRegressionLayer.W).sum()
		self.L2_sqr = (self.hiddenLayer.W**2).sum() + (self.logRegressionLayer.W**2).sum()

		#MLPの誤差関数を計算するシンボル
		#出力層のみ依存するので	ロジスティック回帰の実装と同じ
		self.negative_log_likelihood = self.logRegressionLayer.negative_log_likelihood

		#誤差を計算するシンボル
		self.errors = self.logRegressionLayer.errors

		#多層パーセプトロンのパラメータ
		self.params = self.hiddenLayer.params + self.logRegressionLayer.params  #python的な書き方の.paramsが後に入る意味がわからない
	def __init__(self, rng, input, n_in, hidden_layers_sizes, n_out):
		'''
		self.hiddenLayer = HiddenLayer(
            		rng=rng,
            		input=input,
            		n_in=n_in,
           		n_out=n,
            		activation=T.tanh
        	)
		'''
		self.params = []
		self.sigmoid_layers = []
		self.n_layers = len(hidden_layers_sizes)
		for i in xrange(self.n_layers):
			if i == 0:
                		input_size = n_in
            		else:
                		input_size = hidden_layers_sizes[i - 1]

			if i == 0:
                		layer_input = input
            		else:
                		layer_input = self.sigmoid_layers[-1].output
			sigmoid_layer = HiddenLayer(rng=rng,
                                        input=layer_input,
                                        n_in=input_size,
                                        n_out=hidden_layers_sizes[i],
                                        activation=T.nnet.sigmoid)
			self.sigmoid_layers.append(sigmoid_layer)
			self.params.extend(sigmoid_layer.params)	
	
        	self.logRegressionLayer = logisticRegression(
							input=self.sigmoid_layers[-1].output,
            						n_in=hidden_layers_sizes[-1],
            						n_out=n_out
        					)
			
		self.L1 = abs(self.logRegressionLayer.W).sum()
		for i in self.sigmoid_layers:
			self.L1 += abs(i.W).sum()
		'''	
		self.L1 = (
            		abs(self.hiddenLayer.W).sum()
            		+ abs(self.logRegressionLayer.W).sum()
        	)
		'''
		self.L2_sqr = abs(self.logRegressionLayer.W ** 2).sum()
                for i in self.sigmoid_layers:
                        self.L2_sqr += abs(i.W ** 2).sum() 
		'''
        	self.L2_sqr = (
            		(self.hiddenLayer.W ** 2).sum()
            		+ (self.logRegressionLayer.W ** 2).sum()
        	)
		'''
        	self.negative_log_likelihood = (
            		self.logRegressionLayer.negative_log_likelihood
        	)

        	self.errors = self.logRegressionLayer.errors
		self.params.extend(self.logRegressionLayer.params)