Beispiel #1
0
	def __init__(self, label_struct, n_features, filter_h=1, presample_size=1, batch_size=512, nkerns=[10,10,10,30], dropout_rate=0):
		self.label_struct = label_struct
		self.batch_size = batch_size
		self.nkerns = nkerns
		self.dropout_rate = dropout_rate
		rng = numpy.random.RandomState(39316)

		print '...building the model'

		x = T.tensor3('x')
		xs = T.matrix('xs')
		y = T.imatrix('y')

		# Reshape matrix of signals of shape (batch_size, 50) to a 4D tensor
		layer0_input = x.dimshuffle(0, 'x', 1, 2)

		# Construct the first convolutional pooling layer:
		# filtering reduces the signal size to (num_sensor, (50-10)/2+1) = (1, 21)
		# 4D output tensor is thus of shape (batch_size, nkerns[0], num_sensor/filter_height, 21)
		if presample_size > 1:
			layer0_input = downsample.max_pool_2d(input=layer0_input, ds=(1,presample_size),)
		if filter_h == 0:
			filter_h = n_features['alg']

		layer0 = ConvPoolLayer(
			rng,
			input=layer0_input,
			filter_shape=(nkerns[0], 1, filter_h, 10),
			signal_shape=(batch_size, 1, n_features['alg'], 50),
			stride=(filter_h, 2),
		)
		layer1_input = T.switch(layer0.output < 0, 0, layer0.output)
		h = n_features['alg'] / filter_h

		# Construct the second convolutional pooling layer:
		# filtering reduces the signal size to (h, (21-5)/2+1) = (h, 9)
		# maxpooling reduces this further to (h, 9/3) = (h, 3)
		# 4D output tensor is thus of shape (batch_size, nkerns[1], h, 3)
		layer1 = ConvPoolLayer(
			rng,
			input=layer1_input,
			filter_shape=(nkerns[1], nkerns[0], 1, 5),
			signal_shape=(batch_size, nkerns[0], h, 21),
			stride=2,
			poolsize=3,
		)
		layer2_input = T.switch(layer1.output < 0, 0, layer1.output)

		# Construct the third convolutional pooling layer:
		# filtering reduces the signal size to (h, (3-3)+1) = (h, 1)
		# 4D output tensor is thus of shape (batch_size, nkerns[2], h, 1)
		layer2 = ConvPoolLayer(
			rng,
			input=layer2_input,
			filter_shape=(nkerns[2], nkerns[1], 1, 3),
			signal_shape=(batch_size, nkerns[1], h, 3),
		)

		# This will generate a matrix of shape (batch_size, nkerns[2]),
		convlayer_output = T.switch(layer2.output < 0, 0, layer2.output).flatten(2)

		layer3_input = T.concatenate([convlayer_output, xs], axis=1)
		n_in3 = nkerns[2] * h + n_features['xs']

		# construct a fully-connected sigmoidal layer
		# dropout the input
		dropout_layer3_input = _dropout_from_layer(rng, layer3_input, p=dropout_rate)
		dropout_layer3 = DropoutHiddenLayer(
			rng=rng,
			input=dropout_layer3_input,
			n_in=n_in3,
			n_out=nkerns[3],
			activation=T.tanh,
			dropout_rate=dropout_rate,
			use_bias=True
		)
		# reuser the parameters from the dropout layer here, in a different path through the graph
		layer3 = HiddenLayer(
			rng,
			input=layer3_input,
			W=dropout_layer3.W*(1-dropout_rate),
			b=dropout_layer3.b,
			n_in=n_in3,
			n_out=nkerns[3],
			activation=T.tanh
	    )

		dropout_layer4_input = T.switch(dropout_layer3.output < 0, 0, dropout_layer3.output)
		layer4_input = T.switch(layer3.output < 0, 0, layer3.output)

		n_in4 = nkerns[3]
		# classify the values of the fully-connected sigmoidal layer
		dropout_layer4 = GroupedLogisticRegression(
			input=dropout_layer4_input,
			n_in=n_in4,
			n_outs=label_struct
		)
		layer4 = GroupedLogisticRegression(
			input=layer4_input,
			W=dropout_layer4.W*(1-dropout_rate),
			b=dropout_layer4.b,
			n_in=n_in4,
			n_outs=label_struct
		)

		self.cost = layer4.negative_log_likelihood(y)
		self.dropout_cost = dropout_layer4.negative_log_likelihood(y)
		# create a list of all model parameters to be fit by gradient descent
		self.params = dropout_layer4.params + dropout_layer3.params + layer2.params + layer1.params + layer0.params
		self.layers = [layer0, layer1, layer2, dropout_layer3, layer3, dropout_layer4, layer4]

		self.x = x
		self.y = y
		self.xs = xs
Beispiel #2
0
	def __init__(self, label_struct, n_features, filter_h=1, presample_size=1, batch_size=512, nkerns=[10,10,10,30], dropout_rate=0):
		self.label_struct = label_struct
		self.batch_size = batch_size
		self.nkerns = nkerns
		self.dropout_rate = dropout_rate
		rng = numpy.random.RandomState(39316)

		print '...building the model'

		a = T.tensor3('a')
		l = T.tensor3('l')
		g = T.tensor3('g')
		m = T.matrix('m')
		y = T.imatrix('y')

		# Reshape matrix of signals of shape (batch_size, 50) to a 4D tensor
		acc_input = a.dimshuffle(0, 'x', 1, 2)
		lin_input = l.dimshuffle(0, 'x', 1, 2)
		gyro_input = g.dimshuffle(0, 'x', 1, 2)

		acc_wing = ConvWing(rng, acc_input, n_features['acc'], filter_h, presample_size, nkerns, batch_size)
		lin_wing = ConvWing(rng, lin_input, n_features['lin'], filter_h, presample_size, nkerns, batch_size)
		gyro_wing = ConvWing(rng, gyro_input, n_features['gyro'], filter_h, presample_size, nkerns, batch_size)

		layer3_input = T.concatenate([acc_wing.output, lin_wing.output, gyro_wing.output, m], axis=1)
		# n_in3 = nkerns[2] * 3 + n_features['mag']
		n_in3 = acc_wing.outdim + lin_wing.outdim + gyro_wing.outdim + n_features['mag']

		# construct a fully-connected sigmoidal layer
		# dropout the input
		dropout_layer3_input = _dropout_from_layer(rng, layer3_input, p=dropout_rate)
		dropout_layer3 = DropoutHiddenLayer(
			rng=rng,
			input=dropout_layer3_input,
			n_in=n_in3,
			n_out=nkerns[3],
			activation=T.tanh,
			dropout_rate=dropout_rate,
			use_bias=True
		)
		# reuser the parameters from the dropout layer here, in a different path through the graph
		layer3 = HiddenLayer(
			rng,
			input=layer3_input,
			W=dropout_layer3.W*(1-dropout_rate),
			b=dropout_layer3.b,
			n_in=n_in3,
			n_out=nkerns[3],
			activation=T.tanh
	    )

		dropout_layer4_input = T.switch(dropout_layer3.output < 0, 0, dropout_layer3.output)
		layer4_input = T.switch(layer3.output < 0, 0, layer3.output)

		n_in4=nkerns[3]
		# classify the values of the fully-connected sigmoidal layer
		#layer4 = LogisticRegression(input=layer4_input, n_in=nkerns[3], n_out=numpy.sum(label_struct))
		#layer4 = GroupedLogisticRegression(input=layer4_input, n_in=nkerns[3], n_outs=label_struct)
		dropout_layer4 = GroupedLogisticRegression(
			input=dropout_layer4_input,
			n_in=n_in4,
			n_outs=label_struct
		)
		layer4 = GroupedLogisticRegression(
			input=layer4_input,
			W=dropout_layer4.W*(1-dropout_rate),
			b=dropout_layer4.b,
			n_in=n_in4,
			n_outs=label_struct
		)

		self.cost = layer4.negative_log_likelihood(y)
		self.dropout_cost = dropout_layer4.negative_log_likelihood(y)
		# create a list of all model parameters to be fit by gradient descent
		self.params = dropout_layer4.params + dropout_layer3.params + acc_wing.params + lin_wing.params + gyro_wing.params
		self.layers = [acc_wing, lin_wing, gyro_wing, dropout_layer3, layer3, dropout_layer4, layer4]

		self.a = a
		self.l = l
		self.g = g
		self.y = y
		self.m = m