def conv2d_residual_block(network, lastlayer, model_layer):

    name = model_layer['name']
    filter_size = model_layer['filter_size']
    is_training = model_layer['batch_norm']
    if 'function' in model_layer:
        activation = model_layer['function']
    else:
        activation = 'relu'

    # original residual unit
    shape = network[lastlayer].get_output_shape()
    num_filters = shape[-1].value

    if not isinstance(filter_size, (list, tuple)):
        filter_size = (filter_size, 1)

    network[name + '_1resid'] = layers.Conv2DLayer(network[lastlayer],
                                                   num_filters=num_filters,
                                                   filter_size=filter_size,
                                                   padding='SAME')
    network[name + '_1resid_norm'] = layers.BatchNormLayer(
        network[name + '_1resid'], is_training)
    network[name + '_1resid_active'] = layers.ActivationLayer(
        network[name + '_1resid_norm'], function=activation)

    if 'dropout_block' in model_layer:
        network[name + '_dropout1'] = layers.DropoutLayer(
            network[name + '_1resid_active'],
            keep_prob=model_layer['dropout_block'])
        lastname = name + '_dropout1'
    else:
        lastname = name + '_1resid_active'

    network[name + '_2resid'] = layers.Conv2DLayer(network[lastname],
                                                   num_filters=num_filters,
                                                   filter_size=filter_size,
                                                   padding='SAME')
    network[name + '_2resid_norm'] = layers.BatchNormLayer(
        network[name + '_2resid'], is_training)
    network[name + '_resid_sum'] = layers.ElementwiseSumLayer(
        [network[lastlayer], network[name + '_2resid_norm']])
    network[name + '_resid'] = layers.ActivationLayer(network[name +
                                                              '_resid_sum'],
                                                      function=activation)
    return network
	def conv1d_residual_block(self, model_layer):

		lastlayer = self.lastlayer

		name = model_layer['name']
		filter_size = model_layer['filter_size']
		is_training = model_layer['batch_norm']
		if 'function' in model_layer:
			activation = model_layer['function']
		else:
			activation = 'relu'

		# original residual unit
		shape = self.network[lastlayer].get_output_shape()
		num_filters = shape[-1].value

		if not isinstance(filter_size, (list, tuple)):
			filter_size = (filter_size, 1)

		self.network[name+'_1resid'] = layers.Conv2DLayer(self.network[lastlayer], num_filters=num_filters, filter_size=filter_size, padding='SAME')
		self.network[name+'_1resid_norm'] = layers.BatchNormLayer(self.network[name+'_1resid'], is_training)
		self.network[name+'_1resid_active'] = layers.ActivationLayer(self.network[name+'_1resid_norm'], function=activation)

		if 'dropout_block' in model_layer:
			dropout = model_layer['dropout_block']
			placeholder_name = 'keep_prob'+str(len(self.num_dropout))
			exec(placeholder_name+" = tf.placeholder(tf.float32, name='"+placeholder_name+"')")
			#exec("self.placeholders["+placeholder_name+"] = " + placeholder_name)			
			exec("self.network["+name+"+'_dropout1'] = layers.DropoutLayer(self.network["+name+"+'_1resid_active'], keep_prob="+placeholder_name+")")				
			exec("self.hidden_feed_dict["+placeholder_name+"] ="+str(dropout))
			self.num_dropout += 1
			lastname = name+'_dropout1'
		else:
			lastname = name+'_1resid_active'

		self.network[name+'_2resid'] = layers.Conv2DLayer(self.network[lastname], num_filters=num_filters, filter_size=filter_size, padding='SAME')
		self.network[name+'_2resid_norm'] = layers.BatchNormLayer(self.network[name+'_2resid'], is_training)
		self.network[name+'_resid_sum'] = layers.ElementwiseSumLayer([self.network[lastlayer], self.network[name+'_2resid_norm']])
		self.network[name+'_resid'] = layers.ActivationLayer(self.network[name+'_resid_sum'], function=activation)

		self.lastlayer = name+'_resid'

		return network
def dense_residual_block(network, lastlayer, model_layer):

    name = model_layer['name']
    is_training = model_layer['batch_norm']
    if 'function' in model_layer:
        activation = model_layer['function']
    else:
        activation = 'relu'

    # original residual unit
    shape = network[lastlayer].get_output_shape()
    num_units = shape[-1].value

    network[name + '_1resid'] = layers.DenseLayer(network[lastlayer],
                                                  num_units=num_units,
                                                  b=None)
    network[name + '_1resid_norm'] = layers.BatchNormLayer(
        network[name + '_1resid'], is_training)
    network[name + '_1resid_active'] = layers.ActivationLayer(
        network[name + '_1resid_norm'], function=activation)

    if 'dropout_block' in model_layer:
        network[name + '_dropout1'] = layers.DropoutLayer(
            network[name + '_1resid_active'],
            keep_prob=model_layer['dropout_block'])
    else:
        lastname = name + '_1resid_active'

    network[name + '_2resid'] = layers.DenseLayer(network[lastname],
                                                  num_units=num_units,
                                                  b=None)
    network[name + '_2resid_norm'] = layers.BatchNormLayer(
        network[name + '_2resid'], is_training)
    network[name + '_resid_sum'] = layers.ElementwiseSumLayer(
        [network[lastlayer], network[name + '_2resid_norm']])
    network[name + '_resid'] = layers.ActivationLayer(network[name +
                                                              '_resid_sum'],
                                                      function=activation)
    return network
	def dense_residual_block(self, model_layer):

		lastlayer = self.lastlayer

		name = model_layer['name']
		is_training = model_layer['batch_norm']
		if 'function' in model_layer:
			activation = model_layer['function']
		else:
			activation = 'relu'

		# original residual unit
		shape = self.network[lastlayer].get_output_shape()
		num_units = shape[-1].value

		self.network[name+'_1resid'] = layers.DenseLayer(self.network[lastlayer], num_units=num_units, b=None)
		self.network[name+'_1resid_norm'] = layers.BatchNormLayer(self.network[name+'_1resid'], is_training)
		self.network[name+'_1resid_active'] = layers.ActivationLayer(self.network[name+'_1resid_norm'], function=activation)

		

		if 'dropout_block' in model_layer:
			dropout = model_layer['dropout_block']
			placeholder_name = 'keep_prob'+str(len(self.num_dropout))
			exec(placeholder_name+" = tf.placeholder(tf.float32, name='"+placeholder_name+"')")
			#exec("self.placeholders["+placeholder_name+"] = " + placeholder_name)			
			exec("self.network["+name+"+'_dropout1'] = layers.DropoutLayer(self.network["+name+"+'_1resid_active'], keep_prob="+placeholder_name+")")				
			exec("self.hidden_feed_dict["+placeholder_name+"] ="+str(dropout))
			lastname = name+'_dropout1'
			self.num_dropout += 1
		else:
			lastname = name+'_1resid_active'

		self.network[name+'_2resid'] = layers.DenseLayer(self.network[lastname], num_units=num_units, b=None)
		self.network[name+'_2resid_norm'] = layers.BatchNormLayer(self.network[name+'_2resid'], is_training)
		self.network[name+'_resid_sum'] = layers.ElementwiseSumLayer([self.network[lastlayer], self.network[name+'_2resid_norm']])
		self.network[name+'_resid'] = layers.ActivationLayer(self.network[name+'_resid_sum'], function=activation)
		return network
def build_layers(model_layers, network=OrderedDict()):

    # loop to build each layer of network
    lastlayer = ''
    for model_layer in model_layers:
        layer = model_layer['layer']
        name = model_layer['name']

        if layer == "input":

            # add input layer
            network[name] = single_layer(model_layer, network)
            lastlayer = name

        else:
            if layer == 'residual-conv1d':

                network = conv1d_residual_block(network, lastlayer,
                                                model_layer)
                lastlayer = name + '_resid'

            elif layer == 'residual-conv2d':

                network = conv2d_residual_block(network, lastlayer,
                                                model_layer)
                lastlayer = name + '_resid'

            elif layer == 'residual-dense':

                network = dense_residual_block(network, lastlayer, model_layer)
                lastlayer = name + '_resid'

            else:
                # add core layer
                newlayer = name  #'# str(counter) + '_' + name + '_batch'
                network[newlayer] = single_layer(model_layer,
                                                 network[lastlayer])
                lastlayer = newlayer

                # add bias layer
                if 'b' in model_layer:
                    newlayer = name + '_bias'
                    network[newlayer] = layers.BiasLayer(network[lastlayer],
                                                         b=model_layer['b'])
                    lastlayer = newlayer

        # add Batch normalization layer
        if 'batch_norm' in model_layer:
            newlayer = name + '_batch'  #str(counter) + '_' + name + '_batch'
            network[newlayer] = layers.BatchNormLayer(
                network[lastlayer], model_layer['batch_norm'])
            lastlayer = newlayer

        # add activation layer
        if 'activation' in model_layer:
            newlayer = name + '_active'
            network[newlayer] = layers.ActivationLayer(
                network[lastlayer], function=model_layer['activation'])
            lastlayer = newlayer

        # add max-pooling layer
        if 'pool_size' in model_layer:
            newlayer = name + '_pool'  # str(counter) + '_' + name+'_pool'
            if isinstance(model_layer['pool_size'], (tuple, list)):
                network[newlayer] = layers.MaxPool2DLayer(
                    network[lastlayer], pool_size=model_layer['pool_size'])
            else:
                network[newlayer] = layers.MaxPool2DLayer(
                    network[lastlayer],
                    pool_size=(model_layer['pool_size'], 1))
            lastlayer = newlayer

        # add dropout layer
        if 'dropout' in model_layer:
            newlayer = name + '_dropout'  # str(counter) + '_' + name+'_dropout'
            network[newlayer] = layers.DropoutLayer(
                network[lastlayer], keep_prob=model_layer['dropout'])
            lastlayer = newlayer

    return network, lastlayer
	def build_layers(self):

		# loop to build each layer of network
		for model_layer in self.model_layers:
			layer = model_layer['layer']
			name = model_layer['name']	

			if layer == "input":

				# add input layer
				self.single_layer(model_layer)

			else:
				if layer == 'residual-conv1d':
					conv1d_residual_block(model_layer)

				elif layer == 'residual-conv2d':
					conv2d_residual_block(model_layer)

				elif layer == 'residual-dense':
					dense_residual_block(model_layer)

				else:
					# add core layer
					self.single_layer(model_layer)
					
					# add bias layer
					if 'b' in model_layer:
						newlayer = name+'_bias'
						self.network[newlayer] = layers.BiasLayer(self.network[self.lastlayer], b=model_layer['b'])
						self.lastlayer = newlayer    


			# add Batch normalization layer
			if 'norm' in model_layer:
				if 'batch' in model_layer['norm']:
					newlayer = name + '_batch' #str(counter) + '_' + name + '_batch'
					self.network[newlayer] = layers.BatchNormLayer(self.network[self.lastlayer], self.is_training)
					self.lastlayer = newlayer

			# add activation layer
			if 'activation' in model_layer:
				newlayer = name+'_active'
				self.network[newlayer] = layers.ActivationLayer(self.network[self.lastlayer], function=model_layer['activation']) 
				self.lastlayer = newlayer

			# add max-pooling layer
			if 'pool_size' in model_layer:  
				newlayer = name+'_pool'  # str(counter) + '_' + name+'_pool' 
				if isinstance(model_layer['pool_size'], (tuple, list)):
					self.network[newlayer] = layers.MaxPool2DLayer(self.network[self.lastlayer], pool_size=model_layer['pool_size'])
				else:
					self.network[newlayer] = layers.MaxPool2DLayer(self.network[self.lastlayer], pool_size=(model_layer['pool_size'], 1))
				self.lastlayer = newlayer       

			# add dropout layer
			if 'dropout' in model_layer:
				newlayer = name+'_dropout' # str(counter) + '_' + name+'_dropout'

				if model_layer['dropout']:
					dropout = model_layer['dropout']
					placeholder_name = 'keep_prob'+str(self.num_dropout)
					exec(placeholder_name+" = tf.placeholder(tf.float32, name='"+placeholder_name+"')")
					#exec("self.placeholders["+placeholder_name+"] = " + placeholder_name)				
					exec("self.hidden_feed_dict[" + placeholder_name+"] = "+str(dropout))
					self.num_dropout += 1

				self.network[newlayer] = layers.DropoutLayer(self.network[self.lastlayer], keep_prob=model_layer['dropout'])
				self.lastlayer = newlayer