Ejemplo n.º 1
0
    def load_loss(self, config_dict):
        # normal
        if config_dict.get('MAE', False):
            pairwise_loss = torch.nn.modules.loss.L1Loss()
        else:
            pairwise_loss = torch.nn.modules.loss.MSELoss()
        image_pixel_loss = losses_generic.LossOnDict(key='img_crop',
                                                     loss=pairwise_loss)

        image_imgNet_bare = losses_images.ImageNetCriterium(
            criterion=pairwise_loss,
            weight=config_dict['loss_weight_imageNet'],
            do_maxpooling=config_dict.get('do_maxpooling', True))
        image_imgNet_loss = losses_generic.LossOnDict(key='img_crop',
                                                      loss=image_imgNet_bare)

        losses_train = []
        losses_test = []

        if 'img_crop' in config_dict['output_types']:
            if config_dict['loss_weight_rgb'] > 0:
                losses_train.append(image_pixel_loss)
                losses_test.append(image_pixel_loss)
            if config_dict['loss_weight_imageNet'] > 0:
                losses_train.append(image_imgNet_loss)
                losses_test.append(image_imgNet_loss)

        loss_train = losses_generic.PreApplyCriterionListDict(losses_train,
                                                              sum_losses=True)
        loss_test = losses_generic.PreApplyCriterionListDict(losses_test,
                                                             sum_losses=True)

        # annotation and pred is organized as a list, to facilitate multiple output types (e.g. heatmap and 3d loss)
        return loss_train, loss_test
Ejemplo n.º 2
0
    def load_loss(self, config_dict):
        # normal
        if config_dict.get('MAE', False):
            pairwise_loss = torch.nn.modules.loss.L1Loss()
        else:
            pairwise_loss = torch.nn.modules.loss.MSELoss()

        if 1:  #"box" in config_dict['training_set'] or "walk_full" in config_dict['training_set']:
            pairwise_loss = losses_generic.LossInstanceMeanStdFromLabel(
                pairwise_loss)

        img_key = 'img'
        image_pixel_loss = losses_generic.LossOnDict(key=img_key,
                                                     loss=pairwise_loss)
        image_imgNet_bare = losses_images.ImageNetCriterium(
            criterion=pairwise_loss,
            weight=config_dict['loss_weight_imageNet'],
            do_maxpooling=config_dict.get('do_maxpooling', True))
        image_imgNet_loss = losses_generic.LossOnDict(key=img_key,
                                                      loss=image_imgNet_bare)

        losses_train = []
        losses_test = []

        if img_key in config_dict['output_types']:
            if config_dict['loss_weight_rgb'] > 0:
                losses_train.append(image_pixel_loss)
                losses_test.append(image_pixel_loss)
            if config_dict['loss_weight_imageNet'] > 0:
                losses_train.append(image_imgNet_loss)
                losses_test.append(image_imgNet_loss)

        # priors on crop
        if config_dict['spatial_transformer']:
            losses_train.append(
                losses_generic.AffineCropPositionPrior(
                    config_dict['fullFrameResolution'], weight=0.1))

        loss_train = losses_generic.PreApplyCriterionListDict(losses_train,
                                                              sum_losses=True)
        loss_test = losses_generic.PreApplyCriterionListDict(losses_test,
                                                             sum_losses=True)

        # annotation and pred is organized as a list, to facilitate multiple output types (e.g. heatmap and 3d loss)
        return loss_train, loss_test
Ejemplo n.º 3
0
    def load_loss(self, config_dict):
        weight = 1
        if config_dict['training_set'] in ['h36m', 'h36m_mpii']:
            weight = 17 / 16  # becasue spine is set to root = 0
        print(
            "MPJPE test weight = {}, to normalize different number of joints".
            format(weight))

        # normal
        if config_dict.get('MAE', False):
            pairwise_loss = torch.nn.modules.loss.L1Loss()
        else:
            pairwise_loss = torch.nn.modules.loss.MSELoss()
        image_pixel_loss = losses_generic.LossOnDict(key='img_crop',
                                                     loss=pairwise_loss)

        image_imgNet_bare = losses_images.ImageNetCriterium(
            criterion=pairwise_loss,
            weight=config_dict['loss_weight_imageNet'],
            do_maxpooling=config_dict.get('do_maxpooling', True))
        image_imgNet_loss = losses_generic.LossOnDict(key='img_crop',
                                                      loss=image_imgNet_bare)

        losses_train = []
        losses_test = []

        if 'img_crop' in config_dict['output_types']:
            if config_dict['loss_weight_rgb'] > 0:
                losses_train.append(image_pixel_loss)
                losses_test.append(image_pixel_loss)
            if config_dict['loss_weight_imageNet'] > 0:
                losses_train.append(image_imgNet_loss)
                losses_test.append(image_imgNet_loss)

        loss_train = losses_generic.PreApplyCriterionListDict(losses_train,
                                                              sum_losses=True)
        loss_test = losses_generic.PreApplyCriterionListDict(losses_test,
                                                             sum_losses=True)

        # annotation and pred is organized as a list, to facilitate multiple output types (e.g. heatmap and 3d loss)
        return loss_train, loss_test