def create_variables(self): print(' initializing model autoencoder variables') with tf.device('/device:GPU:%i' % (self.config.layers['preferred_gpu'])): layout = self.config.layers['autoencoder'] first_layer = len(self.encoder_variables) last_layer = min(len(layout), self.max_layer) for i in range(first_layer, last_layer): layer_id = "encoder_%i" % i print(' creating encoder variables ' + layer_id) # generate variables (if necessary) self.encoder_variables.append( layers.encoder_variables(layer_id, layout[i]))
def create_3D_encoders_variables(self): # for encoder_config in self.config.encoders_3D: with tf.device('/device:GPU:%i' % (self.config.encoders_3D[0]['preferred_gpu'])): # self.create_3D_encoder(encoder_config) encoder_id = self.config.encoders_3D[0]['id'] encoder = dict() layout = [] for i in range(0, len(self.config.layers['encoder_3D'])): layout.append(self.config.layers['encoder_3D'][i]) with tf.variable_scope(encoder_id, reuse=tf.AUTO_REUSE): encoder['variables'] = [] last_layer = min(len(layout), self.max_layer) for i in range(0, last_layer): layer_id = "encoder_%i" % i print(' creating 3D encoder variables ' + layer_id) encoder['variables'].append( layers.encoder_variables(layer_id, layout[i])) self.encoder_variables = encoder['variables']
def create_3D_encoder(self, encoder_config): encoder = dict() encoder_id = encoder_config['id'] ids = [] for i in range(0, len(self.config.layer_config)): ids.append(self.config.layer_config[i]['id']) pos = ids.index(encoder_id) layout = [] layout.insert(0, self.config.layer_config[pos]['layout'][0]) for i in range(0, len(self.config.layers['encoder_3D'])): layout.append(self.config.layers['encoder_3D'][i]) print('creating encoder pipeline for ' + encoder_id) with tf.variable_scope(encoder_id): encoder['id'] = encoder_id encoder['channels'] = encoder_config['channels'] encoder['preferred_gpu'] = encoder_config['preferred_gpu'] encoder['variables'] = [] encoder['features_v'] = None encoder['features_h'] = None encoder['conv_layers_v'] = [] encoder['conv_layers_h'] = [] #################################################################################################### # create encoder variables last_layer = min(len(layout), self.max_layer) for i in range(0, last_layer): layer_id = "encoder_%i" % i print(' creating 3D encoder variables ' + layer_id) encoder['variables'].append( layers.encoder_variables(layer_id, layout[i])) #################################################################################################### # create 3D encoder layers for stacks start = self.config.layer_config[pos]['start'] end = self.config.layer_config[pos]['end'] shape = [ self.stack_shape[0], self.stack_shape[1], self.stack_shape[2], self.stack_shape[3], encoder['channels'] ] if encoder['features_v'] == None: encoder['features_v'] = self.stack_v[:, :, :, :, start:end] encoder['features_v'] = tf.reshape( encoder['features_v'], shape) # why we need to reshape ? if encoder['features_h'] == None: encoder['features_h'] = self.stack_h[:, :, :, :, start:end] encoder['features_h'] = tf.reshape(encoder['features_h'], shape) print(' CREATING 3D encoder layers for %s ' % encoder_id) for i in range(0, last_layer): layer_id_v = "v_%s_%i" % (encoder_id, i) layer_id_h = "h_%s_%i" % (encoder_id, i) print( ' generating downconvolution layer structure for %s %i' % (encoder_id, i)) encoder['conv_layers_v'].append( layers.layer_conv3d(layer_id_v, encoder['variables'][i], encoder['features_v'], self.phase, self.config.training)) encoder['conv_layers_h'].append( layers.layer_conv3d(layer_id_h, encoder['variables'][i], encoder['features_h'], self.phase, self.config.training)) # update layer shapes encoder['variables'][i].input_shape = encoder['conv_layers_v'][ i].input_shape encoder['variables'][i].output_shape = encoder[ 'conv_layers_v'][i].output_shape # final encoder layer: vertical/horizontal features encoder['features_v'] = encoder['conv_layers_v'][-1].out encoder['features_h'] = encoder['conv_layers_h'][-1].out #################################################################################################### # create dense layers print(' creating dense layers for %s' % encoder_id) encoder['feature_shape'] = encoder['features_v'].shape.as_list() sh = encoder['feature_shape'] encoder['encoder_input_size'] = sh[1] * sh[2] * sh[3] * sh[4] # setup shared feature space between horizontal/vertical encoder encoder['features'] = tf.concat([ tf.reshape(encoder['features_h'], [-1, encoder['encoder_input_size']]), tf.reshape(encoder['features_v'], [-1, encoder['encoder_input_size']]) ], 1) encoder['features_transposed'] = tf.concat([ tf.reshape( tf.transpose(encoder['features_h'], [0, 1, 3, 2, 4]), [-1, encoder['encoder_input_size']]), tf.reshape(encoder['features_v'], [-1, encoder['encoder_input_size']]) ], 1) encoder['encoder_nodes'] = encoder['features'].shape.as_list()[1] # with tf.variable_scope('input'): # encoder['input_features'] = tf.placeholder(tf.float32, encoder['features'].shape.as_list()) self.encoders_3D[encoder_id] = encoder