Esempio n. 1
0
 def loss_fns(self):
     if not self._loss_fns:
         self._loss_fns = {
             'contrastive': ContrastiveLoss(margin=self.margin,
                                            average=True),
             'cross_entropy': CrossEntropyLoss()
         }
     return self._loss_fns
Esempio n. 2
0
def test_contrastive_loss():
    batch_size = 10
    emb_size = 512
    emb_vec1 = torch.randn((batch_size, emb_size))
    emb_vec2 = torch.randn((batch_size, emb_size))
    y = torch.randint(2, size=(100, 1))
    c1 = torch.randint(10, size=(100, 1))
    c2 = torch.randint(10, size=(100, 1))
    targets = (c1, c2, y)

    loss_fn = ContrastiveLoss(margin=1.0)

    loss = loss_fn((emb_vec1, emb_vec2), targets)

    assert len(loss.shape) == 0
    assert loss > 0
Esempio n. 3
0
                                     batch_size=config.batch_size,
                                     shuffle=True,
                                     num_workers=config.num_workers,
                                     )
 config.val_loader,_ = get_dataloader(dataset=val_set,
                                     batch_size=config.batch_size,
                                     shuffle=True,
                                     num_workers=config.num_workers,
                                     neighborhood_limits=neighborhood_limits
                                     )
 
 # create evaluation
 if config.desc_loss == 'contrastive':
     desc_loss = ContrastiveLoss(
         pos_margin=config.pos_margin,
         neg_margin=config.neg_margin,
         metric='euclidean', 
         safe_radius=config.safe_radius
         )
 else:
     desc_loss = CircleLoss(
         dist_type=config.dist_type,
         log_scale=config.log_scale,
         safe_radius=config.safe_radius,
         pos_margin=config.pos_margin,
         neg_margin=config.neg_margin,
     ) 
 
 config.evaluation_metric = {
     'desc_loss': desc_loss,
     'det_loss': DetLoss(metric='euclidean'),
 }
Esempio n. 4
0
params = [*siamese_net.parameters(), *clsf_net.parameters()]
optimizer = optim.Adam(params, lr=5e-4, weight_decay=1e-5)
scheduler = CosineAnnealingLR(optimizer,
                              T_max=len(train_ds) * max_epochs / batch_size,
                              eta_min=1e-6)
# Loss functions
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
from torch.nn import CrossEntropyLoss
from utils.loss import ContrastiveLoss

import numpy as np
# margin = np.sqrt(1000)
margin = np.sqrt(10)
con_loss_fn = ContrastiveLoss(margin=margin, average=True)
cs_loss_fn = CrossEntropyLoss()
scale_factor = 0.5 * margin**2  # consider per batch, negative pair ~ m**2 / 2
# Acc
import torch
from ignite.metrics import Accuracy

# from ignite import metrics


# class SiameseNetSimilarityAccuracy(metrics.Accuracy):
class SiameseNetSimilarityAccuracy(Accuracy):
    """
    Calculate the similarity of a siamese network

    Example:
Esempio n. 5
0
    def run(self):

        # Config
        cfg = self.hparams

        # model
        emb_net = SimpleConvEmbNet()
        model = SiameseNet(emb_net)
        self.model = model

        # prepare the loaders
        self.prepare_data_loaders()
        train_loader = self.siamese_train_loader

        # device
        if torch.cuda.is_available():
            device = 'cuda'
        else:
            device = 'cpu'

        # optimizer
        optimizer = optim.Adam(model.parameters(), lr=cfg.lr)

        # learning rate scheduler
        scheduler = StepLR(optimizer=optimizer,
                           step_size=2,
                           gamma=0.1,
                           last_epoch=-1)

        # loss function
        margin = 1.0
        loss_fn = ContrastiveLoss(margin)
        # trainer
        trainer = create_supervised_trainer(model,
                                            optimizer,
                                            loss_fn,
                                            device=device)

        evaluator = create_supervised_evaluator(model,
                                                metrics={
                                                    'accuracy': Accuracy(),
                                                    'loss': Loss(loss_fn)
                                                },
                                                device=device)

        desc = "ITERATION - loss: {:.2f}"
        pbar = tqdm(initial=0,
                    leave=False,
                    total=len(train_loader),
                    desc=desc.format(0))

        # checkpoints
        handler = ModelCheckpoint(dirname='./checkpoints',
                                  filename_prefix='sample',
                                  save_interval=2,
                                  n_saved=3,
                                  create_dir=True,
                                  save_as_state_dict=True)

        # -------------------
        # Callbacks / Events
        # -------------------

        # check point
        trainer.add_event_handler(Events.EPOCH_COMPLETED, handler, {
            'model': model,
            "optimizer": optimizer,
        })

        # learning rate
        # trainer.add_event_handler(Events.I, lambda engine: lr_scheduler.step())
        @trainer.on(Events.EPOCH_COMPLETED)
        def take_scheduler_step(engine):
            scheduler.step()

            # Print out
            tqdm.write("Learning Rate - Epoch: {}  Learning Rate: {}".format(
                engine.state.epoch, scheduler.get_lr()))

        @trainer.on(Events.ITERATION_COMPLETED)
        def log_training_loss(engine):
            iter = (engine.state.iteration - 1) % len(train_loader) + 1

            if iter % cfg.log_interval == 0:
                pbar.desc = desc.format(engine.state.output)
                pbar.update(cfg.log_interval)

        @trainer.on(Events.EPOCH_COMPLETED)
        def log_training_results(engine):
            pbar.refresh()
            evaluator.run(train_loader)
            metrics = evaluator.state.metrics
            avg_accuracy = metrics['accuracy']
            avg_loss = metrics['loss']
            tqdm.write(
                "Training Results - Epoch: {}  Avg accuracy: {:.2f} Avg loss: {:.2f}"
                .format(engine.state.epoch, avg_accuracy, avg_loss))

        @trainer.on(Events.EPOCH_COMPLETED)
        def log_validation_results(engine):
            evaluator.run(val_loader)
            metrics = evaluator.state.metrics
            avg_accuracy = metrics['accuracy']
            avg_loss = metrics['loss']
            tqdm.write(
                "Validation Results - Epoch: {}  Avg accuracy: {:.2f} Avg loss: {:.2f}"
                .format(engine.state.epoch, avg_accuracy, avg_loss))

            pbar.n = pbar.last_print_n = 0

        trainer.run(train_loader, max_epochs=cfg.epochs)
        pbar.close()
Esempio n. 6
0
                                                  batch_size=batch_size,
                                                  shuffle=False,
                                                  **kwargs)
test_loader = torch.utils.data.DataLoader(test_dataset,
                                          batch_size=batch_size,
                                          shuffle=False,
                                          **kwargs)
# ---------------------------------------------------------------------

# Step 2
embedding_net = SimpleCNN(last_layer='emb')
# Step 3
model = SiameseNet(embedding_net)

margin = 1.
loss_fn = ContrastiveLoss(margin)

lr = 1e-3
if has_cuda:
    model.cuda()
optimizer = optim.Adam(model.parameters(), lr=lr)
scheduler = lr_scheduler.StepLR(optimizer,
                                step_size=5,
                                gamma=0.1,
                                last_epoch=-1)
n_epochs = 20
log_interval = 50

fit(siamese_train_loader, siamese_test_loader, model, loss_fn, optimizer,
    scheduler, n_epochs, has_cuda, log_interval)
# ---------------------------------------------------------------------------