def __init__(self, FLAGS): """ Initializes the ATLAS model. Inputs: - FLAGS: A _FlagValuesWrapper object. """ self.FLAGS = FLAGS with tf.variable_scope("ATLASModel"): self.add_placeholders() self.build_graph() self.add_loss() print('Finished add_placeholders, build_graph, add_loss') # Defines the trainable parameters, gradient, gradient norm, and clip by gradient norm params = tf.trainable_variables() # print('Number of trainable parameters:',np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])) # pprint([n.name for n in tf.get_default_graph().as_graph_def().node]);exit() gradients = tf.gradients(self.loss, params) self.gradient_norm = tf.global_norm(gradients) clipped_gradients, _ = tf.clip_by_global_norm(gradients, FLAGS.max_gradient_norm) self.param_norm = tf.global_norm(params) # Defines optimizer and updates; {self.updates} needs to be fetched in # sess.run to do a gradient update self.global_step_op = tf.Variable(0, name="global_step", trainable=False) opt = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate) self.updates = opt.apply_gradients(zip(clipped_gradients, params), global_step=self.global_step_op) # Adds a summary to write examples of images to TensorBoard utils.add_summary_image_triplet( self.inputs_op, self.target_masks_op, self.predicted_masks_op, num_images=self.FLAGS.num_summary_images, use_volumetric=self.FLAGS.use_volumetric) # Defines savers (for checkpointing) and summaries (for tensorboard) self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=FLAGS.keep) self.summaries = tf.summary.merge_all()
def __init__(self, FLAGS): """ Initializes the ATLAS model. Inputs: - FLAGS: A _FlagValuesWrapper object. """ self.FLAGS = FLAGS with tf.variable_scope("MetaUNetATLASModel"): self.add_placeholders() self.build_graph() self.add_loss() # Defines the trainable parameters, gradient, gradient norm, and clip by # gradient norm params = tf.trainable_variables() gradients = tf.gradients(self.loss, params) self.gradient_norm = tf.global_norm(gradients) clipped_gradients, _ = tf.clip_by_global_norm(gradients, FLAGS.max_gradient_norm) self.param_norm = tf.global_norm(params) # Defines optimizer and updates; {self.updates} needs to be fetched in # sess.run to do a gradient update self.global_step_op = tf.Variable(0, name="global_step", trainable=False) opt = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate) self.updates = opt.apply_gradients(zip(clipped_gradients, params), global_step=self.global_step_op) # Adds a summary to write examples of images to TensorBoard utils.add_summary_image_triplet( self.inputs_op[:, :, :, 0], self.target_masks_op, self.predicted_masks_op, num_images=self.FLAGS.num_summary_images) # Defines savers (for checkpointing) and summaries (for tensorboard) self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=FLAGS.keep) self.summaries = tf.summary.merge_all()