예제 #1
0
    def dense_residual_block(self, model_layer, name):
        with tf.name_scope('dense_residual_block') as scope:
            last_layer = self.last_layer

            if 'function' in model_layer:
                activation = model_layer['function']
            else:
                activation = 'relu'

            # original residual unit
            shape = self.network[last_layer].get_output_shape()
            num_units = shape[-1].value

            self.network[name + '_1resid'] = layers.DenseLayer(
                self.network[last_layer],
                num_units=num_units,
                b=None,
                **self.seed)
            self.network[name + '_1resid_norm'] = layers.BatchNormLayer(
                self.network[name + '_1resid'],
                self.placeholders['is_training'])
            self.network[name + '_1resid_active'] = layers.ActivationLayer(
                self.network[name + '_1resid_norm'], function=activation)

            if 'dropout_block' in model_layer:
                placeholder_name = 'keep_prob_' + str(self.num_dropout)
                self.placeholders[placeholder_name] = tf.placeholder(
                    tf.float32, name=placeholder_name)
                self.feed_dict[
                    placeholder_name] = 1 - model_layer['dropout_block']
                self.num_dropout += 1
                self.network[name + '_dropout1'] = layers.DropoutLayer(
                    self.network[name + '_1resid_active'],
                    keep_prob=self.placeholders[placeholder_name])
                lastname = name + '_dropout1'
            else:
                lastname = name + '_1resid_active'

            self.network[name + '_2resid'] = layers.DenseLayer(
                self.network[lastname],
                num_units=num_units,
                b=None,
                **self.seed)
            self.network[name + '_2resid_norm'] = layers.BatchNormLayer(
                self.network[name + '_2resid'],
                self.placeholders['is_training'])
            self.network[name + '_resid_sum'] = layers.ElementwiseSumLayer([
                self.network[last_layer], self.network[name + '_2resid_norm']
            ])
            self.network[name + '_resid'] = layers.ActivationLayer(
                self.network[name + '_resid_sum'], function=activation)
            self.last_layer = name + '_resid'
예제 #2
0
	def conv2d_residual_block(self, model_layer, name):

		last_layer = self.last_layer
		filter_size = model_layer['filter_size']
		if 'function' in model_layer:
			activation = model_layer['function']
		else:
			activation = 'relu'

		# original residual unit
		shape = self.network[last_layer].get_output_shape()
		num_filters = shape[-1].value

		if not isinstance(filter_size, (list, tuple)):
			filter_size = (filter_size, filter_size)

		if 'W' not in model_layer.keys():
			W = init.HeUniform(**self.seed)
		else:
			W = model_layer['W']

		self.network[name+'_1resid'] = layers.Conv2DLayer(self.network[last_layer], num_filters=num_filters,
											  filter_size=filter_size,
											  W=W,
											  padding='SAME')
		self.network[name+'_1resid_norm'] = layers.BatchNormLayer(self.network[name+'_1resid'], self.placeholders['is_training'])
		self.network[name+'_1resid_active'] = layers.ActivationLayer(self.network[name+'_1resid_norm'], function=activation)


		if 'dropout_block' in model_layer:
			placeholder_name = 'keep_prob_'+str(self.num_dropout)
			self.placeholders[placeholder_name] = tf.placeholder(tf.float32, name=placeholder_name)
			self.feed_dict[placeholder_name] = 1-model_layer['dropout_block']
			self.num_dropout += 1
			self.network[name+'_dropout1'] = layers.DropoutLayer(self.network[name+'_1resid_active'], keep_prob=self.placeholders[placeholder_name])
			lastname = name+'_dropout1'
		else:
			lastname = name+'_1resid_active'

		self.network[name+'_2resid'] = layers.Conv2DLayer(self.network[lastname], num_filters=num_filters,
												  filter_size=filter_size,
												  W=W,
												  padding='SAME')
		self.network[name+'_2resid_norm'] = layers.BatchNormLayer(self.network[name+'_2resid'], self.placeholders['is_training'])
		self.network[name+'_resid_sum'] = layers.ElementwiseSumLayer([self.network[last_layer], self.network[name+'_2resid_norm']])
		self.network[name+'_resid'] = layers.ActivationLayer(self.network[name+'_resid_sum'], function=activation)
		self.last_layer = name+'_resid'
예제 #3
0
	def build_layers(self, model_layers, supervised=True, use_scope=False):

		self.network = OrderedDict()
		name_gen = NameGenerator()
		self.num_dropout = 0
		self.num_inputs = 0
		self.last_layer = ''

		# loop to build each layer of network
		for model_layer in model_layers:
			layer = model_layer['layer']

			# name of layer
			if 'name' in model_layer:
				name = model_layer['name']
			else:
				name = name_gen.generate_name(layer)

			if layer == "input":

				# add input layer
				self.single_layer(model_layer, name)

			else:
				if layer == 'conv1d_residual':
					self.conv1d_residual_block(model_layer, name)

				elif layer == 'conv2d_residual':
					self.conv2d_residual_block(model_layer, name)

				elif layer == 'dense_residual':
					self.dense_residual_block(model_layer, name)

				elif layer == 'variational':
					self.network['encode_mu'] = layers.DenseLayer(self.network[self.last_layer], num_units=model_layer['num_units'], **self.seed)
					self.network['encode_logsigma'] = layers.DenseLayer(self.network[self.last_layer], num_units=model_layer['num_units'], **self.seed)
					self.network['Z'] = layers.VariationalSampleLayer(self.network['encode_mu'], self.network['encode_logsigma'])
					self.last_layer = 'Z'

				else:
					# add core layer
					self.single_layer(model_layer, name)

			# add Batch normalization layer
			if 'norm' in model_layer:
				if 'batch' in model_layer['norm']:
					new_layer = name + '_batch' #str(counter) + '_' + name + '_batch'
					self.network[new_layer] = layers.BatchNormLayer(self.network[self.last_layer], self.placeholders['is_training'])
					self.last_layer = new_layer

			else:
				if (model_layer['layer'] == 'dense') | (model_layer['layer'] == 'conv1d') | (model_layer['layer'] == 'conv2d'):
					if 'b' in model_layer:
						if model_layer['b'] != None:
							b = init.Constant(0.05)
							new_layer = name+'_bias'
							self.network[new_layer] = layers.BiasLayer(self.network[self.last_layer], b=b)
							self.last_layer = new_layer
					elif 'norm' not in model_layer:
						b = init.Constant(0.05)
						new_layer = name+'_bias'
						self.network[new_layer] = layers.BiasLayer(self.network[self.last_layer], b=b)
						self.last_layer = new_layer
				else:
					if 'b' in model_layer:
						if model_layer['b'] != None:
							b = init.Constant(0.05)
							new_layer = name+'_bias'
							self.network[new_layer] = layers.BiasLayer(self.network[self.last_layer], b=b)
							self.last_layer = new_layer

			# add activation layer
			if 'activation' in model_layer:
				new_layer = name+'_active'
				self.network[new_layer] = layers.ActivationLayer(self.network[self.last_layer], function=model_layer['activation'])
				self.last_layer = new_layer

			# add max-pooling layer ### Modified this from the older pool_size
			if 'max_pool' in model_layer:
				new_layer = name+'_maxpool'  # str(counter) + '_' + name+'_pool'
				if len(self.network[self.last_layer].output_shape) == 4:
					if isinstance(model_layer['max_pool'], (tuple, list)):
							self.network[new_layer] = layers.MaxPool2DLayer(self.network[self.last_layer], pool_size=model_layer['max_pool'])
					else:
							self.network[new_layer] = layers.MaxPool2DLayer(self.network[self.last_layer], pool_size=(model_layer['max_pool'], 1))

				self.last_layer = new_layer

			# add mean-pooling layer
			elif 'mean_pool' in model_layer:
				new_layer = name+'_meanpool'  # str(counter) + '_' + name+'_pool'
				if isinstance(model_layer['mean_pool'], (tuple, list)):
						self.network[new_layer] = layers.MeanPool2DLayer(self.network[self.last_layer], pool_size=model_layer['mean_pool'], name=name+'_meanpool')
				else:
						self.network[new_layer] = layers.MeanPool1DLayer(self.network[self.last_layer], pool_size=model_layer['mean_pool'], name=name+'_meanpool')
				self.last_layer = new_layer

			# add global-pooling layer
			elif 'global_pool' in model_layer:
				print('global_pool')
				new_layer = name+'_globalpool'
				self.network[new_layer] = layers.GlobalPoolLayer(self.network[self.last_layer], func=model_layer['global_pool'], name=name+'_globalpool')
				self.last_layer = new_layer

			# add dropout layer
			if 'dropout' in model_layer:
				new_layer = name+'_dropout' # str(counter) + '_' + name+'_dropout'
				placeholder_name = 'keep_prob_'+str(self.num_dropout)
				self.placeholders[placeholder_name] = tf.placeholder(tf.float32, name=placeholder_name)
				self.feed_dict[placeholder_name] = 1-model_layer['dropout']
				self.num_dropout += 1
				self.network[new_layer] = layers.DropoutLayer(self.network[self.last_layer], keep_prob=self.placeholders[placeholder_name])
				self.last_layer = new_layer

		if supervised:
			self.network['output'] = self.network.pop(self.last_layer)
			shape = self.network['output'].get_output_shape()
			targets = utils.placeholder(shape=shape, name='output')
			self.placeholders['targets'] = targets
			self.feed_dict['targets'] = []
		else:
			self.network['X'] = self.network.pop(self.last_layer)
			self.placeholders['targets'] = self.placeholders['inputs'][0]
			self.feed_dict['targets'] = []

			self.feed_dict['KL_weight'] = 1.0
			self.placeholders['KL_weight'] = tf.placeholder(tf.float32)
		return self.network, self.placeholders, self.feed_dict
예제 #4
0
    def build_layers(self, model_layers, supervised=True):

        self.network = OrderedDict()
        name_gen = NameGenerator()
        self.num_dropout = 0
        self.num_inputs = 0
        self.last_layer = ''

        # loop to build each layer of network
        for model_layer in model_layers:
            layer = model_layer['layer']

            # name of layer
            if 'name' in model_layer:
                name = model_layer['name']
            else:
                name = name_gen.generate_name(layer)

            # set scope for each layer
            with tf.name_scope(name) as scope:
                if layer == "input":

                    # add input layer
                    self.single_layer(model_layer, name)

                elif layer == 'embedding':
                    vocab_size = model_layer['vocab_size']
                    embedding_size = model_layer['embedding_size']
                    if 'max_norm' in model_layer:
                        max_norm = model_layer['max_norm']
                    else:
                        max_norm = None
                    self.network[name] = layers.EmbeddingLayer(
                        self.network[self.last_layer], vocab_size,
                        embedding_size, max_norm)
                    self.last_layer = name

                elif (layer == 'variational') | (layer
                                                 == 'variational_normal'):
                    if 'name' in model_layer:
                        name = model_layer['name']
                    else:
                        name = 'Z'

                    self.network[name + '_mu'] = layers.DenseLayer(
                        self.network[self.last_layer],
                        num_units=model_layer['num_units'],
                        b=init.GlorotUniform(),
                        **self.seed)
                    self.network[name + '_logvar'] = layers.DenseLayer(
                        self.network[self.last_layer],
                        num_units=model_layer['num_units'],
                        b=init.GlorotUniform(),
                        **self.seed)
                    self.network[name +
                                 '_sample'] = layers.VariationalSampleLayer(
                                     self.network[name + '_mu'],
                                     self.network[name + '_logvar'])
                    self.last_layer = name + '_sample'

                elif layer == 'variational_softmax':
                    if 'hard' in model_layer:
                        hard = model_layer['hard']
                    else:
                        hard = False
                    num_categories, num_classes = model_layer['shape']

                    if 'temperature' in model_layer:
                        temperature = model_layer['temperature']
                    else:
                        temperature = 5.0
                    self.feed_dict['temperature'] = temperature
                    self.placeholders['temperature'] = tf.placeholder(
                        dtype=tf.float32, name="temperature")
                    if 'name' in model_layer:
                        name = model_layer['name']
                    else:
                        name = 'Z'

                    self.network[name + '_logits'] = layers.DenseLayer(
                        self.network[self.last_layer],
                        num_units=num_categories * num_classes,
                        b=init.GlorotUniform())

                    self.network[name +
                                 '_logits_reshape'] = layers.ReshapeLayer(
                                     self.network[name + '_logits'],
                                     shape=[-1, num_classes])
                    self.network[name] = layers.ActivationLayer(
                        self.network[name + '_logits_reshape'],
                        function='softmax')
                    self.network[name +
                                 '_sample'] = layers.CategoricalSampleLayer(
                                     self.network[name + '_logits_reshape'],
                                     temperature=temperature,
                                     hard=hard)
                    self.network[name +
                                 '_logits_reshape'] = layers.ReshapeLayer(
                                     self.network[name + '_logits'],
                                     shape=[-1, num_categories, num_classes])
                    self.network[name + '_softmax'] = layers.Softmax2DLayer(
                        self.network[name + '_logits_reshape'])
                    self.network[name +
                                 '_sample'] = layers.CategoricalSampleLayer(
                                     self.network[name + '_logits_reshape'],
                                     temperature=temperature,
                                     hard=hard)
                    self.network[name] = layers.ReshapeLayer(
                        self.network[name + '_softmax'],
                        shape=[-1, num_categories * num_classes])
                    self.last_layer = name

                else:
                    if layer == 'conv1d_residual':
                        self.conv1d_residual_block(model_layer, name)

                    elif layer == 'conv2d_residual':
                        self.conv2d_residual_block(model_layer, name)

                    elif layer == 'dense_residual':
                        self.dense_residual_block(model_layer, name)

                    else:
                        # add core layer
                        self.single_layer(model_layer, name)

                    # add Batch normalization layer
                    if 'norm' in model_layer:
                        if 'batch' in model_layer['norm']:
                            with tf.name_scope("norm") as scope:
                                new_layer = name + '_batch'  #str(counter) + '_' + name + '_batch'
                                self.network[
                                    new_layer] = layers.BatchNormLayer(
                                        self.network[self.last_layer],
                                        self.placeholders['is_training'])
                                self.last_layer = new_layer

                    else:
                        if (model_layer['layer'] == 'dense') | (
                                model_layer['layer'] == 'conv1d') | (
                                    model_layer['layer'] == 'conv2d'):
                            if 'b' in model_layer:
                                if model_layer['b'] != None:
                                    with tf.name_scope("bias") as scope:
                                        b = init.Constant(model_layer['b'])
                                        new_layer = name + '_bias'
                                        self.network[
                                            new_layer] = layers.BiasLayer(
                                                self.network[self.last_layer],
                                                b=b)
                                        self.last_layer = new_layer

                            elif 'norm' not in model_layer:
                                with tf.name_scope("bias") as scope:
                                    b = init.GlorotUniform()
                                    new_layer = name + '_bias'
                                    self.network[new_layer] = layers.BiasLayer(
                                        self.network[self.last_layer], b=b)
                                    self.last_layer = new_layer

                # add activation layer
                if 'activation' in model_layer:
                    new_layer = name + '_active'
                    self.network[new_layer] = layers.ActivationLayer(
                        self.network[self.last_layer],
                        function=model_layer['activation'],
                        name=scope)
                    self.last_layer = new_layer

                # add max-pooling layer
                if 'max_pool' in model_layer:
                    new_layer = name + '_maxpool'  # str(counter) + '_' + name+'_pool'
                    if isinstance(model_layer['max_pool'], (tuple, list)):
                        self.network[new_layer] = layers.MaxPool2DLayer(
                            self.network[self.last_layer],
                            pool_size=model_layer['max_pool'],
                            name=name + '_maxpool')
                    else:
                        self.network[new_layer] = layers.MaxPool1DLayer(
                            self.network[self.last_layer],
                            pool_size=model_layer['max_pool'],
                            name=name + '_maxpool')
                    self.last_layer = new_layer

                # add mean-pooling layer
                elif 'mean_pool' in model_layer:
                    new_layer = name + '_meanpool'  # str(counter) + '_' + name+'_pool'
                    if isinstance(model_layer['mean_pool'], (tuple, list)):
                        self.network[new_layer] = layers.MeanPool2DLayer(
                            self.network[self.last_layer],
                            pool_size=model_layer['mean_pool'],
                            name=name + '_meanpool')
                    else:
                        self.network[new_layer] = layers.MeanPool1DLayer(
                            self.network[self.last_layer],
                            pool_size=model_layer['mean_pool'],
                            name=name + '_meanpool')
                    self.last_layer = new_layer

                # add global-pooling layer
                elif 'global_pool' in model_layer:
                    new_layer = name + '_globalpool'
                    self.network[new_layer] = layers.GlobalPoolLayer(
                        self.network[self.last_layer],
                        func=model_layer['global_pool'],
                        name=name + '_globalpool')
                    self.last_layer = new_layer

                # add dropout layer
                if 'dropout' in model_layer:
                    new_layer = name + '_dropout'  # str(counter) + '_' + name+'_dropout'
                    placeholder_name = 'keep_prob_' + str(self.num_dropout)
                    self.placeholders[placeholder_name] = tf.placeholder(
                        tf.float32, name=placeholder_name)
                    self.feed_dict[
                        placeholder_name] = 1 - model_layer['dropout']
                    self.num_dropout += 1
                    self.network[new_layer] = layers.DropoutLayer(
                        self.network[self.last_layer],
                        keep_prob=self.placeholders[placeholder_name],
                        name=name + '_dropout')
                    self.last_layer = new_layer

                if ('reshape' in model_layer) & (layer != 'reshape'):
                    new_layer = name + '_reshape'
                    self.network[new_layer] = layers.ReshapeLayer(
                        self.network[self.last_layer], model_layer['reshape'])
                    self.last_layer = new_layer

        if supervised:
            self.network['output'] = self.network.pop(self.last_layer)
            shape = self.network['output'].get_output_shape()
            targets = utils.placeholder(shape=shape, name='output')
            self.placeholders['targets'] = targets
            self.feed_dict['targets'] = []
        else:
            self.network['X'] = self.network.pop(self.last_layer)
            self.placeholders['targets'] = self.placeholders['inputs']
            self.feed_dict['targets'] = []

        return self.network, self.placeholders, self.feed_dict