def __init__(self, num_classes, cfg):
     super(noXBMLoss, self).__init__()
     embedding_size = cfg.MODEL.HEAD.DIM
     from pytorch_metric_learning import losses
     loss_name = cfg.LOSSES.NOXBM_LOSS_NAME
     if loss_name == 'circle_loss':
         loss_func = losses.CircleLoss(m=0.4, gamma=80)
     elif loss_name == 'softtriple_loss':
         K = 10 if num_classes < 500 else 2
         loss_func = losses.SoftTripleLoss(num_classes,
                                           embedding_size,
                                           centers_per_class=K,
                                           la=20,
                                           gamma=0.1,
                                           margin=0.01)
     elif loss_name == 'fastAP_loss':
         loss_func = losses.FastAPLoss(num_bins=10)
     elif loss_name == 'nSoftmax_loss':
         loss_func = losses.NormalizedSoftmaxLoss(num_classes,
                                                  embedding_size,
                                                  temperature=0.05)
     elif loss_name == 'proxyNCA_loss':
         loss_func = losses.ProxyNCALoss(num_classes,
                                         embedding_size,
                                         softmax_scale=3)
     else:
         print('noXBM loss unknown. Given', loss_name)
         assert False
     self.loss_func = loss_func
 def __init__(self, nb_classes, sz_embed, scale=32):
     super(Proxy_NCA, self).__init__()
     self.nb_classes = nb_classes
     self.sz_embed = sz_embed
     self.scale = scale
     self.loss_func = losses.ProxyNCALoss(num_classes=self.nb_classes,
                                          embedding_size=self.sz_embed,
                                          softmax_scale=self.scale).cuda()
예제 #3
0
def proxy_nca_loss(trial,
                   num_classes,
                   embedding_size,
                   scale_range=(0.0, 100.0),
                   **kwargs):
    scale = trial.suggest_uniform("scale", *scale_range)

    loss = losses.ProxyNCALoss(num_classes=num_classes,
                               embedding_size=embedding_size,
                               softmax_scale=scale,
                               **sample_regularizer(trial))

    return {"loss": loss, "param": True}
    def __init__(self,
                 train_dl,
                 val_dl,
                 unseen_dl,
                 model,
                 optimizer,
                 scheduler,
                 criterion,
                 mining_function,
                 loss,
                 savePath='./models/',
                 device='cuda',
                 BATCH_SIZE=64):
        self.device = device
        self.train_dl = train_dl
        self.val_dl = val_dl
        self.unseen_dl = unseen_dl
        self.BATCH_SIZE = BATCH_SIZE
        self.model = model.to(self.device)
        self.optimizer = optimizer
        self.scheduler = scheduler
        self.criterion = criterion
        self.mining_function = mining_function
        self.loss = loss
        self.distance = distances.LpDistance(normalize_embeddings=True,
                                             p=2,
                                             power=1)
        self.reducer = reducers.ThresholdReducer(low=0)
        self.regularizer = regularizers.LpRegularizer(p=2)
        if self.mining_function == 'triplet':
            self.mining_func = miners.TripletMarginMiner(
                margin=0.01,
                distance=self.distance,
                type_of_triplets="semihard")
        elif self.mining_function == 'pair':
            self.mining_func = miners.PairMarginMiner(pos_margin=0,
                                                      neg_margin=0.2)

        if self.loss == 'triplet':
            self.loss_function = losses.TripletMarginLoss(
                margin=0.01, distance=self.distance, reducer=self.reducer)
        elif self.loss == 'contrastive':
            self.loss_function = losses.ContrastiveLoss(pos_margin=0,
                                                        neg_margin=1.5)
        elif self.loss == 'panc':
            self.loss_function = losses.ProxyAnchorLoss(
                9,
                128,
                margin=0.01,
                alpha=5,
                reducer=self.reducer,
                weight_regularizer=self.regularizer)
        elif self.loss == 'pnca':
            self.loss_function = losses.ProxyNCALoss(
                9,
                128,
                softmax_scale=1,
                reducer=self.reducer,
                weight_regularizer=self.regularizer)
        elif self.loss == 'normsoftmax':
            self.loss_function = losses.NormalizedSoftmaxLoss(
                9,
                128,
                temperature=0.05,
                reducer=self.reducer,
                weight_regularizer=self.regularizer)

        if self.loss in ['normsoftmax', 'panc', 'pnca']:
            self.loss_optimizer = optim.SGD(self.loss_function.parameters(),
                                            lr=0.0001,
                                            momentum=0.9)
            self.loss_scheduler = lr_scheduler.ReduceLROnPlateau(
                self.loss_optimizer,
                'min',
                patience=3,
                threshold=0.0001,
                factor=0.1,
                verbose=True)

        self.savePath = savePath + 'efigi{}_{}_128'.format(
            self.mining_function, self.loss)
#
# train_loader = torch.utils.data.DataLoader(dataset1, batch_size=256, shuffle=True)
# test_loader = torch.utils.data.DataLoader(dataset2, batch_size=256)

output_size = 4
input_size = 768
hidden_size = 200
training_epochs = 30

model = LSTM_model(input_size, output_size, hidden_size).to(device)
optimizer = optim.Adam(model.parameters(), lr=1e-3)
num_epochs = 40

### pytorch-metric-learning stuff ###
distance = distances.LpDistance()
reducer = reducers.MeanReducer()
loss_func = losses.ProxyNCALoss(output_size, hidden_size * 2, softmax_scale=1)
mining_func = miners.TripletMarginMiner(margin=0.2,
                                        distance=distance,
                                        type_of_triplets="semihard")
accuracy_calculator = AccuracyCalculator(
    include=("mean_average_precision_at_r", ), k=10)
### pytorch-metric-learning stuff ###

for epoch in range(1, num_epochs + 1):
    train(model, loss_func, mining_func, device, train_loader, optimizer,
          epoch)
    # test(dataset2, model, accuracy_calculator)

torch.save(model.state_dict(),
           './metric_saved_model_' + working_aspect + '.ckpt')