Example #1
0
	def dense_residual_block(self, model_layer, name):
		with tf.name_scope('dense_residual_block') as scope:
			last_layer = self.last_layer

			if 'function' in model_layer:
				activation = model_layer['function']
			else:
				activation = 'relu'

			# original residual unit
			shape = self.network[last_layer].get_output_shape()
			num_units = shape[-1].value

			self.network[name+'_1resid'] = layers.DenseLayer(self.network[last_layer], num_units=num_units, b=None, **self.seed)
			self.network[name+'_1resid_norm'] = layers.BatchNormLayer(self.network[name+'_1resid'], self.placeholders['is_training'])
			self.network[name+'_1resid_active'] = layers.ActivationLayer(self.network[name+'_1resid_norm'], function=activation)

			if 'dropout_block' in model_layer:
				placeholder_name = 'keep_prob_'+str(self.num_dropout)
				self.placeholders[placeholder_name] = tf.placeholder(tf.float32, name=placeholder_name)
				self.feed_dict[placeholder_name] = 1-model_layer['dropout_block']
				self.num_dropout += 1
				self.network[name+'_dropout1'] = layers.DropoutLayer(self.network[name+'_1resid_active'], keep_prob=self.placeholders[placeholder_name])
				lastname = name+'_dropout1'
			else:
				lastname = name+'_1resid_active'

			self.network[name+'_2resid'] = layers.DenseLayer(self.network[lastname], num_units=num_units, b=None, **self.seed)
			self.network[name+'_2resid_norm'] = layers.BatchNormLayer(self.network[name+'_2resid'], self.placeholders['is_training'])
			self.network[name+'_resid_sum'] = layers.ElementwiseSumLayer([self.network[last_layer], self.network[name+'_2resid_norm']])
			self.network[name+'_resid'] = layers.ActivationLayer(self.network[name+'_resid_sum'], function=activation)
			self.last_layer = name+'_resid'
Example #2
0
	def single_layer(self, model_layer, name):
		""" build a single layer"""

		# input layer
		if model_layer['layer'] == 'input':

			with tf.name_scope('input') as scope:
				input_shape = str(model_layer['input_shape'])
				inputs = utils.placeholder(shape=model_layer['input_shape'], name=name)
				self.network[name] = layers.InputLayer(inputs)
				self.placeholders[name] = inputs
				self.feed_dict[name] = []

		# dense layer
		elif model_layer['layer'] == 'dense':

			with tf.name_scope('dense') as scope:
				if 'W' not in model_layer.keys():
					model_layer['W'] = init.GlorotUniform(**self.seed)
				self.network[name] = layers.DenseLayer(self.network[self.last_layer], num_units=model_layer['num_units'],
													 W=model_layer['W'],
													 b=None)

		# convolution layer
		elif (model_layer['layer'] == 'conv2d'):

			with tf.name_scope('conv2d') as scope:
				if 'W' not in model_layer.keys():
					W = init.GlorotUniform(**self.seed)
				else:
					W = model_layer['W']
				if 'padding' not in model_layer.keys():
					padding = 'VALID'
				else:
					padding = model_layer['padding']
				if 'strides' not in model_layer.keys():
					strides = (1, 1)
				else:
					strides = model_layer['strides']

				self.network[name] = layers.Conv2DLayer(self.network[self.last_layer], num_filters=model_layer['num_filters'],
													  filter_size=model_layer['filter_size'],
													  W=W,
													  padding=padding,
													  strides=strides)

		elif model_layer['layer'] == 'conv1d':
			with tf.name_scope('conv1d') as scope:
				if 'W' not in model_layer.keys():
					W = init.GlorotUniform(**self.seed)
				else:
					W = model_layer['W']
				if 'padding' not in model_layer.keys():
					padding = 'VALID'
				else:
					padding = model_layer['padding']
				if 'strides' not in model_layer.keys():
					strides = 1
				else:
					strides = model_layer['strides']
				reverse=False
				if 'reverse' in model_layer:
					reverse = model_layer['reverse']


				self.network[name] = layers.Conv1DLayer(self.network[self.last_layer], num_filters=model_layer['num_filters'],
													  filter_size=model_layer['filter_size'],
													  W=W,
													  padding=padding,
													  strides=strides,
													  reverse=reverse)

		# convolution layer
		elif (model_layer['layer'] == 'conv2d_transpose'):

			if 'W' not in model_layer.keys():
				W = init.GlorotUniform(**self.seed)
			else:
				W = model_layer['W']
			if 'padding' not in model_layer.keys():
				padding = 'SAME'
			else:
				padding = model_layer['padding']
			if 'strides' not in model_layer.keys():
				strides = (1, 1)
			else:
				strides = model_layer['strides']

			self.network[name] = layers.TransposeConv2DLayer(self.network[self.last_layer], num_filters=model_layer['num_filters'],
												  filter_size=model_layer['filter_size'],
												  W=W,
												  padding=padding,
												  strides=strides)

		elif model_layer['layer'] == 'conv1d_transpose':
			if 'W' not in model_layer.keys():
				W = init.GlorotUniform(**self.seed)
			else:
				W = model_layer['W']
			if 'padding' not in model_layer.keys():
				padding = 'SAME'
			else:
				padding = model_layer['padding']
			if 'strides' not in model_layer.keys():
				strides = 1
			else:
				strides = model_layer['strides']


			self.network[name] = layers.TransposeConv1DLayer(self.network[self.last_layer], num_filters=model_layer['num_filters'],
												  filter_size=model_layer['filter_size'],
												  W=W,
												  padding=padding,
												  strides=strides)

		# concat layer
		elif model_layer['layer'] == 'concat':
			self.network[name] = layers.ConcatLayer([self.network[self.last_layer], self.network[model_layer['concat']]])

		# element-size sum layer
		elif model_layer['layer'] == 'sum':
			self.network[name] = layers.ElemwiseSumLayer([self.network[self.last_layer], model_layer['sum']])

		# reshape layer
		elif model_layer['layer'] == 'reshape':
			self.network[name] = layers.ReshapeLayer(self.network[self.last_layer], model_layer['reshape'])

		elif model_layer['layer'] == 'reduce_max':
			self.network[name] = layers.MaxLayer(self.network[self.last_layer], axis=1)

		elif model_layer['layer'] == 'reduce_mean':
			self.network[name] = layers.MeanLayer(self.network[self.last_layer], axis=1)

		elif model_layer['layer'] == 'softmax2D':
			self.network[name] = layers.Softmax2DLayer(self.network[self.last_layer])

		self.last_layer = name
Example #3
0
	def build_layers(self, model_layers, supervised=True):

		self.network = OrderedDict()
		name_gen = NameGenerator()
		self.num_dropout = 0
		self.num_inputs = 0
		self.last_layer = ''

		# loop to build each layer of network
		for model_layer in model_layers:
			layer = model_layer['layer']

			# name of layer
			if 'name' in model_layer:
				name = model_layer['name']
			else:
				name = name_gen.generate_name(layer)

			# set scope for each layer
			with tf.name_scope(name) as scope:
				if layer == "input":

					# add input layer
					self.single_layer(model_layer, name)

				elif layer == 'embedding':
					vocab_size = model_layer['vocab_size']
					embedding_size = model_layer['embedding_size']
					if 'max_norm' in model_layer:
						max_norm = model_layer['max_norm']
					else:
						max_norm = None
					self.network[name] = layers.EmbeddingLayer(self.network[self.last_layer], vocab_size, embedding_size, max_norm)
					self.last_layer = name

				elif (layer == 'variational') | (layer == 'variational_normal'):
					if 'name' in model_layer:
						name = model_layer['name']
					else:
						name = 'Z'

					self.network[name+'_mu'] = layers.DenseLayer(self.network[self.last_layer], num_units=model_layer['num_units'], b=init.GlorotUniform(), **self.seed)
					self.network[name+'_logvar'] = layers.DenseLayer(self.network[self.last_layer], num_units=model_layer['num_units'], b=init.GlorotUniform(), **self.seed)
					self.network[name+'_sample'] = layers.VariationalSampleLayer(self.network[name+'_mu'], self.network[name+'_logvar'])
					self.last_layer = name+'_sample'

				elif layer == 'variational_softmax':
					if 'hard' in model_layer:
						hard = model_layer['hard']
					else:
						hard = False
					num_categories, num_classes = model_layer['shape']

					if 'temperature' in model_layer:
						temperature = model_layer['temperature']
					else:
						temperature = 5.0
					self.feed_dict['temperature'] = temperature
					self.placeholders['temperature'] = tf.placeholder(dtype=tf.float32, name="temperature")
					if 'name' in model_layer:
						name = model_layer['name']
					else:
						name = 'Z'

					if 'output' in model_layer:
						output = model_layer['output']
					else:
						output = 'softmax'

					self.network[name+'_logits'] = layers.DenseLayer(self.network[self.last_layer], num_units=num_categories*num_classes, b=init.GlorotUniform())

					self.network[name+'_logits_reshape'] = layers.ReshapeLayer(self.network[name+'_logits'], shape=[-1, num_categories, num_classes])

					self.network[name+'_sample'] = layers.CategoricalSampleLayer(self.network[name+'_logits_reshape'], temperature=temperature, hard=hard)

					self.network[name+'_softmax'] = layers.Softmax2DLayer(self.network[name+'_logits_reshape'])

					if output == 'hard':
						self.network[name] = layers.ReshapeLayer(self.network[name+'_sample'], shape=[-1, num_categories*num_classes])
					else:
						self.network[name] = layers.ReshapeLayer(self.network[name+'_softmax'], shape=[-1, num_categories*num_classes])

					self.last_layer = name


				else:
					if layer == 'conv1d_residual':
						self.conv1d_residual_block(model_layer, name)

					elif layer == 'conv2d_residual':
						self.conv2d_residual_block(model_layer, name)

					elif layer == 'dense_residual':
						self.dense_residual_block(model_layer, name)

					else:
						# add core layer
						self.single_layer(model_layer, name)

					# add Batch normalization layer
					if 'norm' in model_layer:
						if 'batch' in model_layer['norm']:
							with tf.name_scope("norm") as scope:
								new_layer = name + '_batch' #str(counter) + '_' + name + '_batch'
								self.network[new_layer] = layers.BatchNormLayer(self.network[self.last_layer], self.placeholders['is_training'])
								self.last_layer = new_layer

					else:
						if (model_layer['layer'] == 'dense') | (model_layer['layer'] == 'conv1d') | (model_layer['layer'] == 'conv2d'):
							if 'b' in model_layer:
								if model_layer['b'] != None:
									with tf.name_scope("bias") as scope:
										b = init.Constant(model_layer['b'])
										new_layer = name+'_bias'
										self.network[new_layer] = layers.BiasLayer(self.network[self.last_layer], b=b)
										self.last_layer = new_layer

							elif 'norm' not in model_layer:
								with tf.name_scope("bias") as scope:
									b = init.GlorotUniform()
									new_layer = name+'_bias'
									self.network[new_layer] = layers.BiasLayer(self.network[self.last_layer], b=b)
									self.last_layer = new_layer

				# add activation layer
				if 'activation' in model_layer:
					new_layer = name+'_active'
					self.network[new_layer] = layers.ActivationLayer(self.network[self.last_layer], function=model_layer['activation'], name=scope)
					self.last_layer = new_layer

				# add max-pooling layer
				if 'max_pool' in model_layer:
					new_layer = name+'_maxpool'  # str(counter) + '_' + name+'_pool'
					if 'max_pool_strides' in model_layer:
						strides = model_layer['max_pool_strides']
					else:
						strides = model_layer['max_pool']

					if isinstance(model_layer['max_pool'], (tuple, list)):
						self.network[new_layer] = layers.MaxPool2DLayer(self.network[self.last_layer], pool_size=model_layer['max_pool'], strides=strides, name=name+'_maxpool')
					else:
						self.network[new_layer] = layers.MaxPool1DLayer(self.network[self.last_layer], pool_size=model_layer['max_pool'], strides=strides, name=name+'_maxpool')
					self.last_layer = new_layer

				# add mean-pooling layer
				elif 'mean_pool' in model_layer:
					new_layer = name+'_meanpool'  # str(counter) + '_' + name+'_pool'
					if 'mean_pool_strides' in model_layer:
						strides = model_layer['mean_pool_strides']
					else:
						strides = model_layer['mean_pool']

					if isinstance(model_layer['mean_pool'], (tuple, list)):
						self.network[new_layer] = layers.MeanPool2DLayer(self.network[self.last_layer], pool_size=model_layer['mean_pool'], strides=strides, name=name+'_meanpool')
					else:
						self.network[new_layer] = layers.MeanPool1DLayer(self.network[self.last_layer], pool_size=model_layer['mean_pool'], strides=strides, name=name+'_meanpool')
					self.last_layer = new_layer

				# add global-pooling layer
				elif 'global_pool' in model_layer:
					new_layer = name+'_globalpool'
					self.network[new_layer] = layers.GlobalPoolLayer(self.network[self.last_layer], func=model_layer['global_pool'], name=name+'_globalpool')
					self.last_layer = new_layer

				# add dropout layer
				if 'dropout' in model_layer:
					new_layer = name+'_dropout' # str(counter) + '_' + name+'_dropout'
					placeholder_name = 'keep_prob_'+str(self.num_dropout)
					self.placeholders[placeholder_name] = tf.placeholder(tf.float32, name=placeholder_name)
					self.feed_dict[placeholder_name] = 1-model_layer['dropout']
					self.num_dropout += 1
					self.network[new_layer] = layers.DropoutLayer(self.network[self.last_layer], keep_prob=self.placeholders[placeholder_name], name=name+'_dropout')
					self.last_layer = new_layer

				if ('reshape' in model_layer) & (layer != 'reshape'):
					new_layer = name+'_reshape'
					self.network[new_layer] = layers.ReshapeLayer(self.network[self.last_layer], model_layer['reshape'])
					self.last_layer = new_layer

		if supervised:
			self.network['output'] = self.network.pop(self.last_layer)
			shape = self.network['output'].get_output_shape()
			targets = utils.placeholder(shape=shape, name='output')
			self.placeholders['targets'] = targets
			self.feed_dict['targets'] = []
		else:
			self.network['X'] = self.network.pop(self.last_layer)
			self.placeholders['targets'] = self.placeholders['inputs']
			self.feed_dict['targets'] = []

		return self.network, self.placeholders, self.feed_dict
Example #4
0
    def build_layers(self, model_layers, supervised=True, use_scope=False):

        self.network = OrderedDict()
        name_gen = NameGenerator()
        self.num_dropout = 0
        self.num_inputs = 0
        self.last_layer = ''

        # loop to build each layer of network
        for model_layer in model_layers:
            layer = model_layer['layer']

            # name of layer
            if 'name' in model_layer:
                name = model_layer['name']
            else:
                name = name_gen.generate_name(layer)

            if layer == "input":

                # add input layer
                self.single_layer(model_layer, name)

            else:
                if layer == 'conv1d_residual':
                    self.conv1d_residual_block(model_layer, name)

                elif layer == 'conv2d_residual':
                    self.conv2d_residual_block(model_layer, name)

                elif layer == 'dense_residual':
                    self.dense_residual_block(model_layer, name)

                elif layer == 'variational':
                    self.network['encode_mu'] = layers.DenseLayer(
                        self.network[self.last_layer],
                        num_units=model_layer['num_units'],
                        **self.seed)
                    self.network['encode_logsigma'] = layers.DenseLayer(
                        self.network[self.last_layer],
                        num_units=model_layer['num_units'],
                        **self.seed)
                    self.network['Z'] = layers.VariationalSampleLayer(
                        self.network['encode_mu'],
                        self.network['encode_logsigma'])
                    self.last_layer = 'Z'

                else:
                    # add core layer
                    self.single_layer(model_layer, name)

            # add Batch normalization layer
            if 'norm' in model_layer:
                if 'batch' in model_layer['norm']:
                    new_layer = name + '_batch'  #str(counter) + '_' + name + '_batch'
                    self.network[new_layer] = layers.BatchNormLayer(
                        self.network[self.last_layer],
                        self.placeholders['is_training'])
                    self.last_layer = new_layer

            else:
                if (model_layer['layer']
                        == 'dense') | (model_layer['layer'] == 'conv1d') | (
                            model_layer['layer'] == 'conv2d'):
                    if 'b' in model_layer:
                        if model_layer['b'] != None:
                            b = init.Constant(0.05)
                            new_layer = name + '_bias'
                            self.network[new_layer] = layers.BiasLayer(
                                self.network[self.last_layer], b=b)
                            self.last_layer = new_layer
                    elif 'norm' not in model_layer:
                        b = init.Constant(0.05)
                        new_layer = name + '_bias'
                        self.network[new_layer] = layers.BiasLayer(
                            self.network[self.last_layer], b=b)
                        self.last_layer = new_layer
                else:
                    if 'b' in model_layer:
                        if model_layer['b'] != None:
                            b = init.Constant(0.05)
                            new_layer = name + '_bias'
                            self.network[new_layer] = layers.BiasLayer(
                                self.network[self.last_layer], b=b)
                            self.last_layer = new_layer

            # add activation layer
            if 'activation' in model_layer:
                new_layer = name + '_active'
                self.network[new_layer] = layers.ActivationLayer(
                    self.network[self.last_layer],
                    function=model_layer['activation'])
                self.last_layer = new_layer

            # add max-pooling layer ### Modified this from the older pool_size
            if 'max_pool' in model_layer:
                new_layer = name + '_maxpool'  # str(counter) + '_' + name+'_pool'
                if len(self.network[self.last_layer].output_shape) == 4:
                    if isinstance(model_layer['max_pool'], (tuple, list)):
                        self.network[new_layer] = layers.MaxPool2DLayer(
                            self.network[self.last_layer],
                            pool_size=model_layer['max_pool'])
                    else:
                        self.network[new_layer] = layers.MaxPool2DLayer(
                            self.network[self.last_layer],
                            pool_size=(model_layer['max_pool'], 1))

                self.last_layer = new_layer

            # add mean-pooling layer
            elif 'mean_pool' in model_layer:
                new_layer = name + '_meanpool'  # str(counter) + '_' + name+'_pool'
                if isinstance(model_layer['mean_pool'], (tuple, list)):
                    self.network[new_layer] = layers.MeanPool2DLayer(
                        self.network[self.last_layer],
                        pool_size=model_layer['mean_pool'],
                        name=name + '_meanpool')
                else:
                    self.network[new_layer] = layers.MeanPool1DLayer(
                        self.network[self.last_layer],
                        pool_size=model_layer['mean_pool'],
                        name=name + '_meanpool')
                self.last_layer = new_layer

            # add global-pooling layer
            elif 'global_pool' in model_layer:
                print('global_pool')
                new_layer = name + '_globalpool'
                self.network[new_layer] = layers.GlobalPoolLayer(
                    self.network[self.last_layer],
                    func=model_layer['global_pool'],
                    name=name + '_globalpool')
                self.last_layer = new_layer

            # add dropout layer
            if 'dropout' in model_layer:
                new_layer = name + '_dropout'  # str(counter) + '_' + name+'_dropout'
                placeholder_name = 'keep_prob_' + str(self.num_dropout)
                self.placeholders[placeholder_name] = tf.placeholder(
                    tf.float32, name=placeholder_name)
                self.feed_dict[placeholder_name] = 1 - model_layer['dropout']
                self.num_dropout += 1
                self.network[new_layer] = layers.DropoutLayer(
                    self.network[self.last_layer],
                    keep_prob=self.placeholders[placeholder_name])
                self.last_layer = new_layer

        if supervised:
            self.network['output'] = self.network.pop(self.last_layer)
            shape = self.network['output'].get_output_shape()
            targets = utils.placeholder(shape=shape, name='output')
            self.placeholders['targets'] = targets
            self.feed_dict['targets'] = []
        else:
            self.network['X'] = self.network.pop(self.last_layer)
            self.placeholders['targets'] = self.placeholders['inputs'][0]
            self.feed_dict['targets'] = []

            self.feed_dict['KL_weight'] = 1.0
            self.placeholders['KL_weight'] = tf.placeholder(tf.float32)
        return self.network, self.placeholders, self.feed_dict