Exemplo n.º 1
0
def make_loss(args, gids):
    """
    Construct loss function(s).
    """
    gid = None if gids is None else gids[0]
    if args.loss_type == 'softmax':
        criterion = CrossEntropyLoss()
    elif args.loss_type == 'contrastive':
        criterion = ContrastiveLoss(margin=args.margin)
    elif args.loss_type == 'triplet':
        criterion = TripletLoss(margin=args.margin)
    elif args.loss_type == 'softmax-triplet':
        criterion = {
            'softmax': CrossEntropyLoss(),
            'triplet': TripletLoss(margin=args.margin)
        }
    elif args.loss_type == 'npair':
        criterion = NpairLoss(reg_lambda=0.002, gid=gid)
    elif args.loss_type == 'lifted':
        criterion = LiftedLoss(margin=args.margin, gid=gid)
    elif args.loss_type == 'dmml':
        criterion = DMMLLoss(num_support=args.num_support,
                             distance_mode=args.distance_mode,
                             margin=args.margin,
                             gid=gid)
    else:
        raise NotImplementedError

    return criterion
Exemplo n.º 2
0
    def __init__(self, args):
        super(gradcam_Loss, self).__init__()
        print('[INFO] Making gradcam_Loss...')

        self.nGPU = args.nGPU
        self.args = args
        self.loss = []
        self.loss_module = nn.ModuleList()
        for loss in args.gradcam_loss.split('+'):
            weight, loss_type = loss.split('*')
            if loss_type == 'CrossEntropy':
                loss_function = nn.CrossEntropyLoss()
            elif loss_type == 'Triplet':
                loss_function = TripletLoss(args.margin)

            self.loss.append({
                'type': loss_type,
                'weight': float(weight),
                'function': loss_function
            })

        if len(self.loss) > 1:
            self.loss.append({'type': 'Total', 'weight': 0, 'function': None})

        self.device = torch.device('cuda')
        self.loss_module.to(self.device)

        if args.nGPU > 1:
            self.loss_module = nn.DataParallel(
                self.loss_module, range(args.nGPU)
            )
Exemplo n.º 3
0
 def __init__(self, config):
     self.config = config
     self.device = self._get_device()
     self.train_loader = self._load_lvis_results()
     if self.config['loss']['type'] == 'nce':
         from loss.nt_xent import NTXentLoss
         self.loss_crit = NTXentLoss(self.device, config['batch_size'],
                                     **config['loss'])
     if self.config['loss']['include_hierarchical']:
         self.hierarchical_loss_crit = HierarchicalLoss(
             margin=config['loss']['margin'])
     if self.config['hyperbolic']:
         self.triplet_loss_crit = HTripletLoss(
             margin=config['loss']['margin'])
     else:
         self.triplet_loss_crit = TripletLoss(
             margin=config['loss']['margin'])
Exemplo n.º 4
0
    def __init__(self, args, ckpt):
        super(Loss, self).__init__()
        print('[INFO] Making loss...')

        self.nGPU = args.nGPU
        self.args = args
        self.loss = []
        self.loss_module = nn.ModuleList()
        device = torch.device('cpu' if args.cpu else 'cuda')

        for loss in args.loss.split('+'):
            weight, loss_type = loss.split('*')
            if loss_type == 'CrossEntropy':
                loss_function = nn.CrossEntropyLoss()
            elif loss_type == 'Triplet':
                loss_function = TripletLoss(args.margin)
            elif loss_type == 'TripletSemihard':
                loss_function = TripletSemihardLoss(device, args.margin)
            elif loss_type == 'Center':
                loss_function = CenterLoss(device,
                                           args.num_classes,
                                           feat_dim=256)

            self.loss.append({
                'type': loss_type,
                'weight': float(weight),
                'function': loss_function
            })

        if len(self.loss) > 1:
            self.loss.append({'type': 'Total', 'weight': 0, 'function': None})

        for l in self.loss:
            if l['function'] is not None:
                print('{:.3f} * {}'.format(l['weight'], l['type']))
                self.loss_module.append(l['function'])

        self.log = torch.Tensor()
        self.loss_module.to(device)

        if args.load != '': self.load(ckpt.dir, cpu=args.cpu)
        if not args.cpu and args.nGPU > 1:
            self.loss_module = nn.DataParallel(self.loss_module,
                                               range(args.nGPU))
Exemplo n.º 5
0
    def __init__(self, args, ckpt):
        super(Loss, self).__init__()
        print('[INFO] Making loss...')

        self.nGPU = args.nGPU
        self.args = args
        self.loss = []
        self.loss_module = nn.ModuleList()
        self.circular_mixed_loss_queue = None
        mixed_subloss_options = {
            'TripletSemihard': {
                'function':
                TripletSemihardLoss(
                    torch.device('cpu' if args.cpu else 'cuda'), args.margin),
                'subtype':
                'TripletSemihard'
            },
            'Triplet': {
                'function': TripletLoss(args.margin),
                'subtype': 'Triplet'
            }
        }
        subtype = ''
        for loss in args.loss.split('+'):
            weight, loss_type = loss.split('*')
            if loss_type == 'CrossEntropy':
                loss_function = nn.CrossEntropyLoss()
            elif loss_type == 'Triplet':
                loss_function = TripletLoss(args.margin)
            # CSCE 625: Aligned loss evaluation for aligned features
            elif loss_type == 'AlignedTriplet':
                tri_loss = TripletLoss2(margin=0.3)
                loss_function = AlignedTripletLoss(tri_loss)
            # ------------ BELOW CODE FOR CSCE 625 ---------------
            # Allow a mixed loss function for the training...
            # switch out loss functions after a fixed number of epochs
            # set in args
            elif loss_type.startswith('Mixed'):
                print("Will cycle loss functions {} every {} epochs."\
                    .format(repr(loss_type.split('-')[1:]), self.args.switch_loss_every))
                self.circular_mixed_loss_queue = cycle(
                    mixed_subloss_options[l] for l in loss_type.split('-')[1:])
                # also append other loss functions to be used in the mix
                l = next(self.circular_mixed_loss_queue)
                loss_function = l['function']
                subtype = l['subtype']
                loss_type = 'Mixed'

            # ----------------------------------------------------
            self.loss.append({
                'type': loss_type,
                'weight': float(weight),
                'function': loss_function,
                'subtype': subtype
            })
            subtype = ''

        # CSCE 625: Mutual Learning
        if args.mutual_learning:
            self.ml_pm_weight = 1.0
            self.ml_global_weight = 0.0
            self.ml_local_weight = 1.0
            self.loss.append({
                'type': 'ProbabilityML',
                'weight': 1.0,
                'function': None
            })
            self.loss.append({
                'type': 'GlobalML',
                'weight': 0.0,  # update after implementation
                'function': None
            })
            self.loss.append({
                'type': 'LocalML',
                'weight': 1.0,
                'function': None
            })

        if len(self.loss) > 1:
            self.loss.append({'type': 'Total', 'weight': 0, 'function': None})

        for l in self.loss:
            if l['function'] is not None:
                print('{:.3f} * {}'.format(l['weight'], l['type']))
                self.loss_module.append(l['function'])

        self.device = torch.device('cpu' if args.cpu else 'cuda')
        self.loss_module.to(self.device)

        if args.load != '': self.load(ckpt.dir, cpu=args.cpu)
        if not args.cpu and args.nGPU > 1:
            self.loss_module = nn.DataParallel(self.loss_module,
                                               range(args.nGPU))

        self.log = [torch.Tensor()]
        if args.mutual_learning:
            self.log.append(torch.Tensor())