Esempio n. 1
0
    def init_networks(self):
        """
        Initialize networks and placeholders.
        """
        network_image_size = list(reversed(self.image_size))

        if self.data_format == 'channels_first':
            data_generator_entries = OrderedDict([('image', [1] + network_image_size),
                                                  ('single_label', [self.num_labels] + network_image_size),
                                                  ('single_heatmap', [1] + network_image_size)])
        else:
            data_generator_entries = OrderedDict([('image', network_image_size + [1]),
                                                  ('single_label', network_image_size + [self.num_labels]),
                                                  ('single_heatmap', network_image_size + [1])])

        data_generator_types = {'image':  tf.float32,
                                'labels': tf.uint8}

        # create model with shared weights between train and val
        training_net = tf.make_template('net', self.network)

        # build train graph
        self.train_queue = DataGenerator(coord=self.coord, dataset=self.dataset_train, data_names_and_shapes=data_generator_entries, data_types=data_generator_types, batch_size=self.batch_size)
        data, mask, single_heatmap = self.train_queue.dequeue()
        data_heatmap_concat = tf.concat([data, single_heatmap], axis=1)
        prediction = training_net(data_heatmap_concat, num_labels=self.num_labels, is_training=True, actual_network=self.unet, padding=self.padding, **self.network_parameters)
        # losses
        self.loss_net = self.loss_function(labels=mask, logits=prediction, data_format=self.data_format)
        self.loss_reg = get_reg_loss(self.reg_constant)
        self.loss = self.loss_net + self.loss_reg

        # solver
        global_step = tf.Variable(self.current_iter, trainable=False)
        learning_rate = tf.train.piecewise_constant(global_step, self.learning_rate_boundaries, self.learning_rates)
        optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
        unclipped_gradients, variables = zip(*optimizer.compute_gradients(self.loss))
        norm = tf.global_norm(unclipped_gradients)
        if self.clip_gradient_global_norm > 0:
            gradients, _ = tf.clip_by_global_norm(unclipped_gradients, self.clip_gradient_global_norm)
        else:
            gradients = unclipped_gradients
        self.optimizer = optimizer.apply_gradients(zip(gradients, variables), global_step=global_step)
        self.train_losses = OrderedDict([('loss', self.loss_net), ('loss_reg', self.loss_reg), ('gradient_norm', norm)])

        # build val graph
        self.data_val, self.mask_val, self.single_heatmap_val = create_placeholders_tuple(data_generator_entries, data_types=data_generator_types, shape_prefix=[1])
        self.data_heatmap_concat_val = tf.concat([self.data_val, self.single_heatmap_val], axis=1)
        self.prediction_val = training_net(self.data_heatmap_concat_val, num_labels=self.num_labels, is_training=False, actual_network=self.unet, padding=self.padding, **self.network_parameters)
        self.prediction_softmax_val = tf.nn.sigmoid(self.prediction_val)

        if self.has_validation_groundtruth:
            self.loss_val = self.loss_function(labels=self.mask_val, logits=self.prediction_val, data_format=self.data_format)
            self.val_losses = OrderedDict([('loss', self.loss_val), ('loss_reg', self.loss_reg), ('gradient_norm', tf.constant(0, tf.float32))])
    def initNetworks(self):
        net = tf.make_template('net', self.network)

        if self.data_format == 'channels_first':
            data_generator_entries = OrderedDict([('image', [self.image_channels] + list(reversed(self.image_size))),
                                                  ('landmarks', [self.num_landmarks, 3])])
            data_generator_entries_val = OrderedDict([('image', [self.image_channels] + list(reversed(self.image_size))),
                                                      ('landmarks', [self.num_landmarks, 3])])
        else:
            data_generator_entries = OrderedDict([('image', list(reversed(self.image_size)) + [self.image_channels]),
                                                  ('landmarks', [self.num_landmarks, 3])])
            data_generator_entries_val = OrderedDict([('image', list(reversed(self.image_size)) + [self.image_channels]),
                                                      ('landmarks', [self.num_landmarks, 3])])

        sigmas = tf.get_variable('sigmas', [self.num_landmarks], initializer=tf.constant_initializer(self.heatmap_sigma))
        sigmas_list = [(f's{i}', sigmas[i]) for i in range(self.num_landmarks)]

        # build training graph
        self.train_queue = DataGenerator(self.dataset_train, self.coord, data_generator_entries, batch_size=self.batch_size, n_threads=8)
        placeholders = self.train_queue.dequeue()
        image = placeholders[0]
        target_landmarks = placeholders[1]
        prediction = net(image, num_landmarks=self.num_landmarks, is_training=True, data_format=self.data_format)
        target_heatmaps = generate_heatmap_target(list(reversed(self.heatmap_size)), target_landmarks, sigmas, scale=self.sigma_scale, normalize=True, data_format=self.data_format)
        loss_sigmas = self.loss_sigmas(sigmas, target_landmarks)
        self.loss_reg = get_reg_loss(self.reg_constant)
        self.loss_net = self.loss_function(target_heatmaps, prediction)
        self.loss = self.loss_net + tf.cast(self.loss_reg, tf.float32) + loss_sigmas

        # optimizer
        global_step = tf.Variable(self.current_iter, trainable=False)
        optimizer = tf.train.MomentumOptimizer(learning_rate=self.learning_rate, momentum=0.99, use_nesterov=True)
        unclipped_gradients, variables = zip(*optimizer.compute_gradients(self.loss))
        norm = tf.global_norm(unclipped_gradients)
        gradients, _ = tf.clip_by_global_norm(unclipped_gradients, 10000.0)
        self.optimizer = optimizer.apply_gradients(zip(gradients, variables), global_step=global_step)
        self.train_losses = OrderedDict([('loss', self.loss_net), ('loss_reg', self.loss_reg), ('loss_sigmas', loss_sigmas), ('norm', norm)] + sigmas_list)

        # build val graph
        self.val_placeholders = tensorflow_train.utils.tensorflow_util.create_placeholders(data_generator_entries_val, shape_prefix=[1])
        self.image_val = self.val_placeholders['image']
        self.target_landmarks_val = self.val_placeholders['landmarks']
        self.prediction_val = net(self.image_val, num_landmarks=self.num_landmarks, is_training=False, data_format=self.data_format)
        self.target_heatmaps_val = generate_heatmap_target(list(reversed(self.heatmap_size)), self.target_landmarks_val, sigmas, scale=self.sigma_scale, normalize=True, data_format=self.data_format)

        # losses
        self.loss_val = self.loss_function(self.target_heatmaps_val, self.prediction_val)
        self.val_losses = OrderedDict([('loss', self.loss_val), ('loss_reg', self.loss_reg), ('loss_sigmas', tf.constant(0, tf.float32)), ('norm', tf.constant(0, tf.float32))] + sigmas_list)
    def init_networks(self):
        """
        Initialize networks and placeholders.
        """
        network_image_size = list(reversed(self.image_size))

        if self.data_format == 'channels_first':
            data_generator_entries = OrderedDict([('image', [1] + network_image_size),
                                                  ('spine_heatmap', [1] + network_image_size)])
        else:
            data_generator_entries = OrderedDict([('image', network_image_size + [1]),
                                                  ('spine_heatmap', [1] + network_image_size)])

        data_generator_types = {'image': tf.float32,
                                'spine_heatmap': tf.float32}


        # create model with shared weights between train and val
        training_net = tf.make_template('net', self.network)

        # build train graph
        self.train_queue = DataGenerator(coord=self.coord, dataset=self.dataset_train, data_names_and_shapes=data_generator_entries, data_types=data_generator_types, batch_size=self.batch_size)
        data, target_spine_heatmap = self.train_queue.dequeue()

        prediction = training_net(data, num_labels=self.num_labels, is_training=True, actual_network=self.unet, padding=self.padding, **self.network_parameters)
        self.loss_net = self.loss_function(target=target_spine_heatmap, pred=prediction)
        self.loss_reg = get_reg_loss(self.reg_constant)
        self.loss = self.loss_net + tf.cast(self.loss_reg, tf.float32)

        # solver
        global_step = tf.Variable(self.current_iter, trainable=False)
        learning_rate = tf.train.piecewise_constant(global_step, self.learning_rate_boundaries, self.learning_rates)
        self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.loss, global_step=global_step)
        self.train_losses = OrderedDict([('loss', self.loss_net), ('loss_reg', self.loss_reg)])

        # build val graph
        self.data_val, self.target_spine_heatmap_val = create_placeholders_tuple(data_generator_entries, data_types=data_generator_types, shape_prefix=[1])
        self.prediction_val = training_net(self.data_val, num_labels=self.num_labels, is_training=False, actual_network=self.unet, padding=self.padding, **self.network_parameters)

        if self.has_validation_groundtruth:
            self.loss_val = self.loss_function(target=self.target_spine_heatmap_val, pred=self.prediction_val)
            self.val_losses = OrderedDict([('loss', self.loss_val), ('loss_reg', self.loss_reg)])
Esempio n. 4
0
    def init_networks(self):
        """
        Initialize networks and placeholders.
        """
        network_image_size = list(reversed(self.image_size))

        if self.data_format == 'channels_first':
            data_generator_entries = OrderedDict([
                ('image', [1] + network_image_size),
                ('landmarks', [self.num_landmarks, 4]),
                ('landmark_mask', [1] + network_image_size)
            ])
        else:
            data_generator_entries = OrderedDict([
                ('image', network_image_size + [1]),
                ('landmarks', [self.num_landmarks, 4]),
                ('landmark_mask', network_image_size + [1])
            ])

        data_generator_types = {'image': tf.float32}

        # create sigmas variable
        sigmas = tf.get_variable('sigmas', [self.num_landmarks],
                                 initializer=tf.constant_initializer(
                                     self.heatmap_sigma))
        if not self.learnable_sigma:
            sigmas = tf.stop_gradient(sigmas)
        mean_sigmas = tf.reduce_mean(sigmas)

        # create model with shared weights between train and val
        training_net = tf.make_template('net', self.network)

        # build train graph
        self.train_queue = DataGenerator(
            coord=self.coord,
            dataset=self.dataset_train,
            data_names_and_shapes=data_generator_entries,
            data_types=data_generator_types,
            batch_size=self.batch_size)
        data, target_landmarks, landmark_mask = self.train_queue.dequeue()
        target_heatmaps = generate_heatmap_target(list(
            reversed(self.heatmap_size)),
                                                  target_landmarks,
                                                  sigmas,
                                                  scale=self.sigma_scale,
                                                  normalize=True,
                                                  data_format=self.data_format)
        prediction, local_prediction, spatial_prediction = training_net(
            data,
            num_labels=self.num_landmarks,
            is_training=True,
            actual_network=self.unet,
            padding=self.padding,
            **self.network_parameters)
        # losses
        self.loss_net = self.loss_function(target=target_heatmaps,
                                           pred=prediction,
                                           mask=landmark_mask)
        self.loss_sigmas = self.loss_function_sigmas(sigmas,
                                                     target_landmarks[0, :, 0])
        self.loss_reg = get_reg_loss(self.reg_constant)
        self.loss = self.loss_net + self.loss_reg + self.loss_sigmas

        # solver
        global_step = tf.Variable(self.current_iter, trainable=False)
        learning_rate = tf.train.piecewise_constant(
            global_step, self.learning_rate_boundaries, self.learning_rates)
        optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate,
                                               momentum=0.99,
                                               use_nesterov=True)
        unclipped_gradients, variables = zip(
            *optimizer.compute_gradients(self.loss))
        norm = tf.global_norm(unclipped_gradients)
        if self.clip_gradient_global_norm > 0:
            gradients, _ = tf.clip_by_global_norm(
                unclipped_gradients, self.clip_gradient_global_norm)
        else:
            gradients = unclipped_gradients
        self.optimizer = optimizer.apply_gradients(zip(gradients, variables),
                                                   global_step=global_step)
        self.train_losses = OrderedDict([('loss', self.loss_net),
                                         ('loss_reg', self.loss_reg),
                                         ('loss_sigmas', self.loss_sigmas),
                                         ('mean_sigmas', mean_sigmas),
                                         ('gradient_norm', norm)])

        # build val graph
        self.data_val, self.target_landmarks_val, self.landmark_mask_val = create_placeholders_tuple(
            data_generator_entries,
            data_types=data_generator_types,
            shape_prefix=[1])
        self.target_heatmaps_val = generate_heatmap_target(
            list(reversed(self.heatmap_size)),
            self.target_landmarks_val,
            sigmas,
            scale=self.sigma_scale,
            normalize=True,
            data_format=self.data_format)
        self.prediction_val, self.local_prediction_val, self.spatial_prediction_val = training_net(
            self.data_val,
            num_labels=self.num_landmarks,
            is_training=False,
            actual_network=self.unet,
            padding=self.padding,
            **self.network_parameters)

        if self.has_validation_groundtruth:
            self.loss_val = self.loss_function(target=self.target_heatmaps_val,
                                               pred=self.prediction_val)
            self.val_losses = OrderedDict([
                ('loss', self.loss_val), ('loss_reg', self.loss_reg),
                ('loss_sigmas', tf.constant(0, tf.float32)),
                ('mean_sigmas', tf.constant(0, tf.float32)),
                ('gradient_norm', tf.constant(0, tf.float32))
            ])
Esempio n. 5
0
    def init_networks(self):
        """
        Init training and validation networks.
        """
        network_image_size = list(reversed(self.image_size))
        num_instances = 1 if self.bitwise_instance_image else None
        num_instances_val = None

        if self.data_format == 'channels_first':
            data_generator_entries = OrderedDict([
                ('image', [1, self.num_frames] + network_image_size),
                ('instances_merged',
                 [num_instances, self.num_frames] + network_image_size),
                ('instances_bac',
                 [num_instances, self.num_frames] + network_image_size)
            ])
            data_generator_entries_test_cropped_single_frame = OrderedDict([
                ('image', [1] + network_image_size),
                ('instances_merged', [num_instances_val] + network_image_size),
                ('instances_bac', [num_instances_val] + network_image_size)
            ])
            embedding_normalization_function = lambda x: tf.nn.l2_normalize(
                x, dim=self.channel_axis)
        else:
            assert 'channels_last not supported'
        data_generator_types = {
            'image': tf.float32,
            'instances_merged': self.bitwise_instances_image_type,
            'instances_bac': self.bitwise_instances_image_type
        }

        # create model with shared weights between train and val
        training_net = tf.make_template('net', self.network)

        # build train graph
        self.train_queue = DataGeneratorPadding(
            self.dataset_train,
            self.coord,
            data_generator_entries,
            batch_size=self.batch_size,
            data_types=data_generator_types,
            n_threads=4)

        # build train graph
        data, instances_tra, instances_bac = self.train_queue.dequeue()
        embeddings_tuple = training_net(
            data,
            num_outputs_embedding=self.num_embeddings,
            is_training=True,
            data_format=self.data_format,
            actual_network=self.actual_network,
            **self.network_parameters)

        if not isinstance(embeddings_tuple, tuple):
            embeddings_tuple = (embeddings_tuple, )

        loss_reg = get_reg_loss(self.reg_constant, True)

        with tf.name_scope('train_loss'):
            train_losses_dict = self.losses(
                embeddings_tuple,
                instances_tra,
                instances_bac,
                bitwise_instances=self.bitwise_instance_image)
            train_losses_dict['loss_reg'] = loss_reg
            self.loss = tf.reduce_sum(list(train_losses_dict.values()))
            self.train_losses = train_losses_dict

        # solver
        global_step = tf.Variable(self.current_iter)
        learning_rate = tf.train.piecewise_constant(
            global_step, self.learning_rate_boundaries, self.learning_rates)
        #optimizer = tf.contrib.opt.NadamOptimizer(learning_rate=learning_rate)
        optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
        self.optimizer = optimizer.minimize(self.loss, global_step=global_step)

        # initialize variables
        self.sess.run(tf.global_variables_initializer())
        self.sess.run(tf.local_variables_initializer())

        print('Variables')
        for i in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES):
            print(i)

        # build val graph
        val_placeholders_cropped = create_placeholders(
            data_generator_entries_test_cropped_single_frame, shape_prefix=[1])
        self.data_cropped_val = val_placeholders_cropped['image']
        self.instances_cropped_tra_val = val_placeholders_cropped[
            'instances_merged']
        self.instances_cropped_bac_val = val_placeholders_cropped[
            'instances_bac']
        with tf.variable_scope('net/rnn', reuse=True):
            output_tuple = network_single_frame_with_lstm_states(
                self.data_cropped_val,
                num_outputs_embedding=self.num_embeddings,
                data_format=self.data_format,
                actual_network=self.actual_network,
                **self.network_parameters)
            self.lstm_input_states_cropped_val = output_tuple[0]
            self.lstm_output_states_cropped_val = output_tuple[1]
            self.embeddings_cropped_val = output_tuple[2:]

        if not isinstance(self.embeddings_cropped_val, tuple):
            self.embeddings_cropped_val = (self.embeddings_cropped_val, )

        with tf.variable_scope('loss'):
            val_losses_dict = self.losses(self.embeddings_cropped_val,
                                          self.instances_cropped_tra_val,
                                          self.instances_cropped_bac_val,
                                          bitwise_instances=False)
            val_losses_dict['loss_reg'] = loss_reg
            self.loss_val = tf.reduce_sum(list(val_losses_dict.values()))
            self.val_losses = val_losses_dict

        if not self.normalized_embeddings:
            self.embeddings_cropped_val = tuple([
                embedding_normalization_function(e)
                for e in self.embeddings_cropped_val
            ])