def setup_layers(self, params): """Build all the model layers """ # # Input layer # # Input and encoding layers in_shape = [None] if isinstance(params.in_size, (list, tuple)): in_shape.extend(params.in_size) else: in_shape.extend([params.in_size, 1]) layer_stack = [tf.placeholder("float", name="input", shape=in_shape)] in_layer = layer_stack[0] # Add conv layers for idx, num_nodes in enumerate(params.conv_nodes): layer_stack.append( self.make_conv_layer(layer_stack[-1], num_nodes, 'conv_{}'.format(idx), params.conv_params)) # Flatten convolutional layers layer_stack.append(tf.contrib.layers.flatten(layer_stack[-1])) # Add dense layers for idx, num_nodes in enumerate(params.dense_nodes): layer_stack.append( self.make_dense_layer(layer_stack[-1], num_nodes, 'dense_{}'.format(idx), params.dense_params)) # Output layer is broken into two pieces. The pre/post the application # of the activation function. That's because the loss functions # for cross-entropy rely on the pre-activation scores in preact preact_out_layer, out_layer = self.make_dense_output_layer( layer_stack[-1], params.out_size, params.output_params) target_layer = tf.placeholder("float", name="target", shape=[None, params.out_size]) if params.output_params.activation in ['sigmoid']: loss = tops.loss_sigmoid_ce(preact_out_layer, target_layer) elif params.output_params.activation in ['softmax']: loss = tops.loss_softmax_ce(preact_out_layer, target_layer) else: loss = tops.loss_mse(target_layer, out_layer) return in_layer, out_layer, target_layer, loss
def setup_layers(self, params): """Build all the model layers """ # # Input layer # layer_stack = [ tf.placeholder( "float", name="input", shape=[None, params.in_size] ) ] in_layer = layer_stack[0] for idx, num_nodes in enumerate(params.hidden_nodes): layer_stack.append( self.make_dense_layer( layer_stack[-1], num_nodes, 'hidden_{}'.format(idx), params.hidden_params ) ) preact_out_layer, out_layer = self.make_dense_output_layer( layer_stack[-1], params.out_size, params.output_params ) target_layer = tf.placeholder( "float", name="target", shape=[None, params.out_size] ) if params.output_params.activation in ['sigmoid']: loss = tops.loss_sigmoid_ce(preact_out_layer, target_layer) elif params.output_params.activation in ['softmax']: loss = tops.loss_softmax_ce(preact_out_layer, target_layer) else: loss = tops.loss_mse(target_layer, out_layer) return in_layer, out_layer, target_layer, loss
def setup_layers(self, params): """Build all the model layers """ # Input and encoding layers in_shape = [None] if isinstance(params.in_size, (list, tuple)): in_shape.extend(params.in_size) else: in_shape.extend([params.in_size, 1]) encode_layers = [tf.placeholder("float", name="input", shape=in_shape)] for idx, num_nodes in enumerate(params.encode_nodes): encode_layers.append( self.make_conv_layer(encode_layers[-1], num_nodes, 'encode_{}'.format(idx), params.encode_params)) # Bottleneck and decoding layers decode_layers = [ self.make_conv_layer(encode_layers[-1], params.bottleneck_dim, 'bottleneck', params.bottleneck_params) ] for idx, num_nodes in enumerate(params.decode_nodes): decode_layers.append( self.make_deconv_layer(decode_layers[-1], num_nodes, 'decode_{}'.format(idx), params.decode_params)) # Standardizing the input/output layer names in_layer = encode_layers[0] target_layer = tf.placeholder("float", name="target", shape=in_layer.get_shape().as_list()) out_layer = tops.fit_to_shape( self.make_deconv_layer(decode_layers[-1], 1, 'output_layer', params.output_params), target_layer.get_shape().as_list()) loss = tops.loss_mse(target_layer, out_layer) return in_layer, out_layer, target_layer, loss
def setup_layers(self, params): """Build all the model layers """ # # Input and encoding layers # encode_layers = [ tf.placeholder("float", name="input", shape=[None, params.in_size]) ] for idx, num_nodes in enumerate(params.encode_nodes): encode_layers.append( self.make_dense_layer(encode_layers[-1], num_nodes, 'encode_{}'.format(idx), params.encode_params)) # # Bottleneck and decoding layers # decode_layers = [ self.make_dense_layer(encode_layers[-1], params.bottleneck_dim, 'bottleneck', params.bottleneck_params) ] for idx, num_nodes in enumerate(params.decode_nodes): decode_layers.append( self.make_dense_layer(decode_layers[-1], num_nodes, 'decode_{}'.format(idx), params.decode_params)) in_layer = encode_layers[0] target_layer = tf.placeholder("float", name="target", shape=in_layer.get_shape().as_list()) out_layer = tops.fit_to_shape( self.make_dense_layer(decode_layers[-1], params.in_size, 'output_layer', params.output_params), target_layer.get_shape().as_list()) loss = tops.loss_mse(target_layer, out_layer) return in_layer, out_layer, target_layer, loss
def setup_layers(self, params): """Build all the model layers """ in_layer = tf.placeholder("float", name="input", shape=[None, params.in_size]) coeff = tf.Variable(tf.ones([params.in_size, 1]), name="coeff") intercept = tf.Variable(tf.zeros([ 1, ]), name="intercept") out_layer = tf.add(tf.matmul(in_layer, coeff), intercept, name="output") target_layer = tf.placeholder("float", name="target", shape=[None, params.out_size]) loss = tops.loss_mse(out_layer, target_layer) return in_layer, out_layer, target_layer, loss