Ejemplo n.º 1
0
    def conv2d_residual_block(self, model_layer, name):

        last_layer = self.last_layer
        filter_size = model_layer['filter_size']
        if 'function' in model_layer:
            activation = model_layer['function']
        else:
            activation = 'relu'

        # original residual unit
        shape = self.network[last_layer].get_output_shape()
        num_filters = shape[-1].value

        if not isinstance(filter_size, (list, tuple)):
            filter_size = (filter_size, filter_size)

        if 'W' not in model_layer.keys():
            W = init.GlorotUniform(**self.seed)
        else:
            W = model_layer['W']

        self.network[name + '_1resid'] = layers.Conv2DLayer(
            self.network[last_layer],
            num_filters=num_filters,
            filter_size=filter_size,
            W=W,
            padding='SAME')
        self.network[name + '_1resid_norm'] = layers.BatchNormLayer(
            self.network[name + '_1resid'], self.placeholders['is_training'])
        self.network[name + '_1resid_active'] = layers.ActivationLayer(
            self.network[name + '_1resid_norm'], function=activation)

        if 'dropout_block' in model_layer:
            placeholder_name = 'keep_prob_' + str(self.num_dropout)
            self.placeholders[placeholder_name] = tf.placeholder(
                tf.float32, name=placeholder_name)
            self.feed_dict[placeholder_name] = 1 - model_layer['dropout_block']
            self.num_dropout += 1
            self.network[name + '_dropout1'] = layers.DropoutLayer(
                self.network[name + '_1resid_active'],
                keep_prob=self.placeholders[placeholder_name])
            lastname = name + '_dropout1'
        else:
            lastname = name + '_1resid_active'

        self.network[name + '_2resid'] = layers.Conv2DLayer(
            self.network[lastname],
            num_filters=num_filters,
            filter_size=filter_size,
            W=W,
            padding='SAME')
        self.network[name + '_2resid_norm'] = layers.BatchNormLayer(
            self.network[name + '_2resid'], self.placeholders['is_training'])
        self.network[name + '_resid_sum'] = layers.ElementwiseSumLayer(
            [self.network[last_layer], self.network[name + '_2resid_norm']])
        self.network[name + '_resid'] = layers.ActivationLayer(
            self.network[name + '_resid_sum'], function=activation)
        self.last_layer = name + '_resid'
Ejemplo n.º 2
0
	def single_layer(self, model_layer, name):
		""" build a single layer"""

		# input layer
		if model_layer['layer'] == 'input':

			with tf.name_scope('input') as scope:
				input_shape = str(model_layer['input_shape'])
				inputs = utils.placeholder(shape=model_layer['input_shape'], name=name)
				self.network[name] = layers.InputLayer(inputs)
				self.placeholders[name] = inputs
				self.feed_dict[name] = []

		# dense layer
		elif model_layer['layer'] == 'dense':

			with tf.name_scope('dense') as scope:
				if 'W' not in model_layer.keys():
					model_layer['W'] = init.GlorotUniform(**self.seed)
				self.network[name] = layers.DenseLayer(self.network[self.last_layer], num_units=model_layer['num_units'],
													 W=model_layer['W'],
													 b=None)

		# convolution layer
		elif (model_layer['layer'] == 'conv2d'):

			with tf.name_scope('conv2d') as scope:
				if 'W' not in model_layer.keys():
					W = init.GlorotUniform(**self.seed)
				else:
					W = model_layer['W']
				if 'padding' not in model_layer.keys():
					padding = 'VALID'
				else:
					padding = model_layer['padding']
				if 'strides' not in model_layer.keys():
					strides = (1, 1)
				else:
					strides = model_layer['strides']

				self.network[name] = layers.Conv2DLayer(self.network[self.last_layer], num_filters=model_layer['num_filters'],
													  filter_size=model_layer['filter_size'],
													  W=W,
													  padding=padding,
													  strides=strides)

		elif model_layer['layer'] == 'conv1d':
			with tf.name_scope('conv1d') as scope:
				if 'W' not in model_layer.keys():
					W = init.GlorotUniform(**self.seed)
				else:
					W = model_layer['W']
				if 'padding' not in model_layer.keys():
					padding = 'VALID'
				else:
					padding = model_layer['padding']
				if 'strides' not in model_layer.keys():
					strides = 1
				else:
					strides = model_layer['strides']
				reverse=False
				if 'reverse' in model_layer:
					reverse = model_layer['reverse']


				self.network[name] = layers.Conv1DLayer(self.network[self.last_layer], num_filters=model_layer['num_filters'],
													  filter_size=model_layer['filter_size'],
													  W=W,
													  padding=padding,
													  strides=strides,
													  reverse=reverse)

		# convolution layer
		elif (model_layer['layer'] == 'conv2d_transpose'):

			if 'W' not in model_layer.keys():
				W = init.GlorotUniform(**self.seed)
			else:
				W = model_layer['W']
			if 'padding' not in model_layer.keys():
				padding = 'SAME'
			else:
				padding = model_layer['padding']
			if 'strides' not in model_layer.keys():
				strides = (1, 1)
			else:
				strides = model_layer['strides']

			self.network[name] = layers.TransposeConv2DLayer(self.network[self.last_layer], num_filters=model_layer['num_filters'],
												  filter_size=model_layer['filter_size'],
												  W=W,
												  padding=padding,
												  strides=strides)

		elif model_layer['layer'] == 'conv1d_transpose':
			if 'W' not in model_layer.keys():
				W = init.GlorotUniform(**self.seed)
			else:
				W = model_layer['W']
			if 'padding' not in model_layer.keys():
				padding = 'SAME'
			else:
				padding = model_layer['padding']
			if 'strides' not in model_layer.keys():
				strides = 1
			else:
				strides = model_layer['strides']


			self.network[name] = layers.TransposeConv1DLayer(self.network[self.last_layer], num_filters=model_layer['num_filters'],
												  filter_size=model_layer['filter_size'],
												  W=W,
												  padding=padding,
												  strides=strides)

		# concat layer
		elif model_layer['layer'] == 'concat':
			self.network[name] = layers.ConcatLayer([self.network[self.last_layer], self.network[model_layer['concat']]])

		# element-size sum layer
		elif model_layer['layer'] == 'sum':
			self.network[name] = layers.ElemwiseSumLayer([self.network[self.last_layer], model_layer['sum']])

		# reshape layer
		elif model_layer['layer'] == 'reshape':
			self.network[name] = layers.ReshapeLayer(self.network[self.last_layer], model_layer['reshape'])

		elif model_layer['layer'] == 'reduce_max':
			self.network[name] = layers.MaxLayer(self.network[self.last_layer], axis=1)

		elif model_layer['layer'] == 'reduce_mean':
			self.network[name] = layers.MeanLayer(self.network[self.last_layer], axis=1)

		elif model_layer['layer'] == 'softmax2D':
			self.network[name] = layers.Softmax2DLayer(self.network[self.last_layer])

		self.last_layer = name