コード例 #1
0
    def setup_trainer(config_data: dict, model: PEModel, training_paf: tuple,
                      training_heatmap: tuple, gen_layer):
        paf, paf_mask = training_paf
        heatmap, heatmap_mask = training_heatmap
        iterator = gen_layer.get_iterator()
        absent_human_masks = iterator[RIterator.ABSENT_HUMAN_MASK]
        trainer = TrainerBuilder.trainer_from_dict(
            model=model,
            train_inputs=[gen_layer],
            label_tensors={
                PETrainer.TRAINING_HEATMAP: heatmap.get_data_tensor(),
                PETrainer.TRAINING_PAF: paf.get_data_tensor(),
                PETrainer.TRAINING_MASK: absent_human_masks,
                PETrainer.TRAINING_HEATMAP_MASK:
                heatmap_mask.get_data_tensor(),
                PETrainer.TRAINING_PAF_MASK: paf_mask.get_data_tensor()
            },
            info_dict=config_data[ModelAssembler.TRAINER_INFO])

        untrainable_layers = config_data[ModelAssembler.UNTRAINABLE_LAYERS]
        if untrainable_layers is not None:
            layers = []
            for layer_name in untrainable_layers:
                layers += [(layer_name, False)]
            trainer.set_layers_trainable(layers)

        # Set l1 regularization
        l1_reg = config_data[ModelAssembler.L1_REG]
        if l1_reg is not None:
            l1_reg = float(l1_reg)
            l1_reg_layers = config_data[ModelAssembler.L1_REG_LAYERS]
            reg_config = [(layer, l1_reg) for layer in l1_reg_layers]
            trainer.set_l1_reg(reg_config)

        # Set l2 regularization
        l2_reg = config_data[ModelAssembler.L2_REG]
        if l2_reg is not None:
            l2_reg = float(l2_reg)
            l2_reg_layers = config_data[ModelAssembler.L2_REG_LAYERS]
            reg_config = [(layer, l2_reg) for layer in l2_reg_layers]
            trainer.set_l2_reg(reg_config)

        distillation_config = config_data.get(ModelAssembler.DISTILLATION)
        if distillation_config is not None:
            arch_path = distillation_config[ModelAssembler.TEACHER_ARCH]
            teacher = PEModel.from_json(arch_path)
            teacher.set_session(model.get_session())

            weights_path = distillation_config[ModelAssembler.TEACHER_WEIGHTS]
            teacher.load_weights(weights_path)

            distillator = DistillatorBuilder.distillator_from_dict(
                teacher=teacher, info_dict=distillation_config)
            trainer = distillator(trainer)

        trainer.compile()
        return trainer
コード例 #2
0
    def build_trainer(self, config_data: dict, model, gen_layer):
        iterator = gen_layer.get_iterator()
        # TODO: Label tensor - tensors from iterator - how connect different models???
        trainer = TrainerBuilder.trainer_from_dict(
            model=model,
            train_inputs=[gen_layer],
            label_tensors={
                "LABELS": iterator['mask'],
                "WEIGHT_MAP": None
            },
            info_dict=config_data[ModelAssemblerBase.TRAINER_INFO])

        return trainer
コード例 #3
0
            # --- LOSS MASKING
            heatmap_loss = heatmap_loss * tf.expand_dims(train_mask, axis=-1)
            heatmap_loss = heatmap_loss * train_heatmap_mask

            if self._heatmap_weight is not None:
                # Create mask for scaling loss
                # Add 1.0 for saving values that are equal to 0 (approximately equal to 0)
                weight_mask = train_heatmap * self._heatmap_weight + self.__IDENTITY

                heatmap_loss = heatmap_loss * weight_mask

            heatmap_losses.append(
                tf.reduce_mean(heatmap_loss)
            )

        self._paf_loss = tf.reduce_sum(paf_losses)
        self._heatmap_loss = tf.reduce_sum(heatmap_losses)

        loss = self._heatmap_loss * self._heatmap_scale + \
               self._paf_loss * self._paf_scale

        # For Tensorboard
        super().track_loss(self._paf_loss, PETrainer.PAF_LOSS)
        super().track_loss(self._heatmap_loss, PETrainer.HEATMAP_LOSS)

        return loss


TrainerBuilder.register_trainer(ABSTrainer)
コード例 #4
0
                train_heatmap, heatmap, raw_tensor=True) / 2.0

            # --- LOSS MASKING
            heatmap_loss = heatmap_loss * train_mask
            heatmap_loss = heatmap_loss * train_heatmap_mask

            if self._heatmap_weight is not None:
                # Create mask for scaling loss
                # Add 1.0 for saving values that are equal to 0 (approximately equal to 0)
                weight_mask = train_heatmap * self._heatmap_weight + self.__IDENTITY

                heatmap_loss = heatmap_loss * weight_mask

            heatmap_losses.append(tf.reduce_sum(heatmap_loss))

        # The original repo takes mean over the sums of the losses
        self._paf_loss = tf.reduce_mean(paf_losses)
        self._heatmap_loss = tf.reduce_mean(heatmap_losses)

        loss = self._heatmap_loss * self._heatmap_scale + \
               self._paf_loss * self._paf_scale

        # For Tensorboard
        super().track_loss(self._paf_loss, PETrainer.PAF_LOSS)
        super().track_loss(self._heatmap_loss, PETrainer.HEATMAP_LOSS)

        return loss


TrainerBuilder.register_trainer(MSETrainer)
コード例 #5
0
# MakiFlow is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MakiFlow is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Foobar.  If not, see <https://www.gnu.org/licenses/>.

import tensorflow as tf
from ..core import RegressorTrainer
from makiflow.core import TrainerBuilder, Loss


class AbsTrainer(RegressorTrainer):
    TYPE = 'AbsTrainer'

    ABS_LOSS = 'ABS_LOSS'

    def _build_local_loss(self, prediction, label):
        abs_loss = Loss.abs_loss(label, prediction, raw_tensor=True)
        final_loss = tf.reduce_mean(abs_loss)
        return final_loss


TrainerBuilder.register_trainer(AbsTrainer)
コード例 #6
0
        self._normalize_by_positives = norm_by_pos

    def _build_loss(self):
        logits = super().get_logits()
        labels = super().get_labels()

        num_positives = None
        if self._normalize_by_positives:
            positives = tf.cast(tf.not_equal(labels, 0),
                                tf.float32)  # [BATCH_SIZE, ...]
            positives_dim_n = len(positives.get_shape())
            axis = list(range(1, positives_dim_n))
            num_positives = tf.reduce_sum(
                positives, axis=axis)  # [BATCH_SIZE, N_POSITIVES]

        focal_loss = Loss.focal_binary_loss(
            logits=logits,
            labels=labels,
            num_positives=num_positives,
            focal_gamma=self._focal_gamma,
            label_smoothing=self._smoothing_labels)

        if not self._normalize_by_positives:
            focal_loss = focal_loss / float(super().get_batch_size())

        super().track_loss(focal_loss, FocalBinaryTrainer.FOCAL_LOSS)
        return focal_loss


TrainerBuilder.register_trainer(FocalBinaryTrainer)
コード例 #7
0
        self._maki_gamma = gamma

    def _build_loss(self):
        logits = super().get_logits()
        labels = super().get_labels()
        ce_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels,
                                                                 logits=logits)
        num_classes = super().get_num_classes()
        logits_shape = tf.shape(logits)
        batch_size = logits_shape[0]

        positives = tf.not_equal(labels, 0)  # [BATCH_SIZE, ...]
        positives_dim_n = len(positives.get_shape())
        axis = list(range(1, positives_dim_n))
        num_positives = tf.reduce_sum(positives,
                                      axis=axis)  # [BATCH_SIZE, N_POSITIVES]

        maki_loss = Loss.maki_loss(
            flattened_logits=tf.reshape(logits,
                                        shape=[batch_size, -1, num_classes]),
            flattened_labels=tf.reshape(labels, shape=[batch_size, -1]),
            num_classes=num_classes,
            num_positives=num_positives,
            maki_gamma=self._maki_gamma,
            ce_loss=ce_loss)
        super().track_loss(maki_loss, MakiLossTrainer.MAKI_LOSS)
        return maki_loss


TrainerBuilder.register_trainer(MakiLossTrainer)
コード例 #8
0
        self._normalize_by_positives = norm_by_pos

    def _build_loss(self):
        logits = super().get_logits()
        labels = super().get_labels()
        num_classes = super().get_num_classes()

        num_positives = None
        if self._normalize_by_positives:
            positives = tf.cast(tf.not_equal(labels, 0),
                                tf.float32)  # [BATCH_SIZE, ...]
            positives_dim_n = len(positives.get_shape())
            axis = list(range(1, positives_dim_n))
            num_positives = tf.reduce_sum(
                positives, axis=axis)  # [BATCH_SIZE, N_POSITIVES]

        focal_loss = Loss.focal_loss(logits=logits,
                                     labels=labels,
                                     num_classes=num_classes,
                                     num_positives=num_positives,
                                     focal_gamma=self._focal_gamma)

        if not self._normalize_by_positives:
            focal_loss = focal_loss / float(super().get_batch_size())

        super().track_loss(focal_loss, FocalTrainer.FOCAL_LOSS)
        return focal_loss


TrainerBuilder.register_trainer(FocalTrainer)
コード例 #9
0
# MakiFlow is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MakiFlow is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Foobar.  If not, see <https://www.gnu.org/licenses/>.

from ..core import ClassificatorTrainer
from makiflow.core import Loss, TrainerBuilder
import tensorflow as tf


class QCETrainer(ClassificatorTrainer):
    QCE_LOSS = 'QCE_LOSS'

    def _build_loss(self):
        ce_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=super().get_labels(), logits=super().get_logits())
        qce_loss = Loss.quadratic_ce_loss(ce_loss=ce_loss)
        super().track_loss(qce_loss, QCETrainer.QCE_LOSS)
        return qce_loss


TrainerBuilder.register_trainer(QCETrainer)
コード例 #10
0
        # p - predicted probability
        # g - ground truth label
        p = tf.nn.sigmoid(logits)
        g = labels
        dice_loss = Loss.dice_loss(
            p=p,
            g=g,
            eps=self._eps,
            axes=self._axes
        )

        super().track_loss(dice_loss, DiceTrainer.DICE_LOSS)
        return dice_loss


TrainerBuilder.register_trainer(DiceTrainer)

if __name__ == '__main__':
    from makiflow.models.classificator import Classificator
    from makiflow.layers import InputLayer

    # SEGMENTATION CASE
    print('SEGMENTATION CASE------------------------------------------------------------------------------------------')
    x = InputLayer(input_shape=[32, 128, 128, 3], name='input')

    model = Classificator(in_x=x, out_x=x)
    TrainerBuilder.trainer_from_dict(
        model,
        None,
        None,
        {
コード例 #11
0
ファイル: mse_loss.py プロジェクト: TaplierShiru/MakiFlow
# MakiFlow is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MakiFlow is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Foobar.  If not, see <https://www.gnu.org/licenses/>.

import tensorflow as tf
from ..core import RegressorTrainer
from makiflow.core import TrainerBuilder, Loss


class MseTrainer(RegressorTrainer):
    TYPE = 'MseTrainer'

    MSE_LOSS = 'MSE_LOSS'

    def _build_local_loss(self, prediction, label):
        mse_loss = Loss.mse_loss(label, prediction, raw_tensor=True)
        final_loss = tf.reduce_mean(mse_loss)
        return final_loss


TrainerBuilder.register_trainer(MseTrainer)
コード例 #12
0
        """
        assert gamma >= 0, f'Gamma must be non-negative. Received gamma={gamma}'
        # noinspection PyAttributeOutsideInit
        self._focal_gamma = gamma

    def _build_loss(self):
        logits = super().get_logits()
        labels = super().get_labels()
        num_classes = super().get_num_classes()

        positives = tf.not_equal(labels, 0)  # [BATCH_SIZE, ...]
        positives_dim_n = len(positives.get_shape())
        axis = list(range(1, positives_dim_n))
        num_positives = tf.reduce_sum(positives,
                                      axis=axis)  # [BATCH_SIZE, N_POSITIVES]

        focal_loss = Loss.focal_loss(logits=logits,
                                     labels=labels,
                                     num_classes=num_classes,
                                     num_positives=num_positives,
                                     focal_gamma=self._focal_gamma,
                                     raw_tensor=True)

        weights = super().get_weight_map()
        focal_loss = tf.reduce_sum(focal_loss * weights)
        super().track_loss(focal_loss, WeightedFocalTrainer.FOCAL_LOSS)
        return focal_loss


TrainerBuilder.register_trainer(WeightedFocalTrainer)
コード例 #13
0
#
# MakiFlow is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Foobar.  If not, see <https://www.gnu.org/licenses/>.

import tensorflow as tf
from ..core import ClassificatorTrainer
from makiflow.core import TrainerBuilder


class WeightedCETrainer(ClassificatorTrainer):
    TYPE = 'WeightedCETrainer'

    WEIGHTED_CE_LOSS = 'WEIGHTED_CE_LOSS'

    def _build_loss(self):
        ce_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=super().get_labels(), logits=super().get_logits())
        weights = super().get_weight_map()
        weighted_ce_loss = tf.reduce_mean(ce_loss * weights)
        super().track_loss(weighted_ce_loss,
                           WeightedCETrainer.WEIGHTED_CE_LOSS)
        return weighted_ce_loss


TrainerBuilder.register_trainer(WeightedCETrainer)