コード例 #1
0
ファイル: test_NTXentLoss.py プロジェクト: ssbagalkar/lightly
    def test_get_correlated_mask(self):
        loss = NTXentLoss()
        for bsz in range(1, 1000):
            mask = loss._torch_get_correlated_mask(bsz)

            # correct number of zeros in mask
            self.assertAlmostEqual(mask.sum(), 4 * (bsz * bsz - bsz))

            # if mask is correct,
            # (1 - mask) * v adds up the first and second half of v
            v = torch.randn((2 * bsz))
            mv = torch.mv(1. - mask.float(), v)
            vv = (v[bsz:] + v[:bsz]).repeat(2)
            self.assertAlmostEqual((mv - vv).pow(2).sum(), 0.)
コード例 #2
0
ファイル: test_NTXentLoss.py プロジェクト: ssbagalkar/lightly
    def test_forward_pass_memory_bank_cuda(self):
        if not torch.cuda.is_available():
            return

        loss = NTXentLoss(memory_bank_size=64)
        for bsz in range(1, 100):
            batch = torch.randn(2 * bsz, 32).cuda()
            l = loss(batch)
コード例 #3
0
ファイル: simclr.py プロジェクト: lightly-ai/lightly
    def __init__(self):
        super().__init__()
        resnet = torchvision.models.resnet18()
        self.backbone = nn.Sequential(*list(resnet.children())[:-1])
        self.projection_head = SimCLRProjectionHead(512, 2048, 2048)

        # enable gather_distributed to gather features from all gpus
        # before calculating the loss
        self.criterion = NTXentLoss(gather_distributed=True)
コード例 #4
0
ファイル: nnclr.py プロジェクト: lightly-ai/lightly
    def __init__(self):
        super().__init__()
        resnet = torchvision.models.resnet18()
        self.backbone = nn.Sequential(*list(resnet.children())[:-1])
        self.projection_head = NNCLRProjectionHead(512, 512, 128)
        self.prediction_head = NNCLRPredictionHead(128, 512, 128)
        self.memory_bank = NNMemoryBankModule(size=4096)

        self.criterion = NTXentLoss()
コード例 #5
0
    def test_forward_pass(self):
        loss = NTXentLoss(memory_bank_size=0)
        for bsz in range(1, 20):
            batch_1 = torch.randn((bsz, 32))
            batch_2 = torch.randn((bsz, 32))

            # symmetry
            l1 = loss(batch_1, batch_2)
            l2 = loss(batch_2, batch_1)
            self.assertAlmostEqual((l1 - l2).pow(2).item(), 0.)
コード例 #6
0
ファイル: test_NTXentLoss.py プロジェクト: ssbagalkar/lightly
    def test_forward_pass_neg_temp(self):
        loss = NTXentLoss(temperature=-1., memory_bank_size=0)
        for bsz in range(1, 100):

            batch_1 = torch.randn((bsz, 32))
            batch_2 = torch.randn((bsz, 32))

            # symmetry
            l1 = loss(torch.cat((batch_1, batch_2), 0))
            l2 = loss(torch.cat((batch_2, batch_1), 0))
            self.assertAlmostEqual((l1 - l2).pow(2).item(), 0.)
コード例 #7
0
ファイル: test_NTXentLoss.py プロジェクト: jackyvan/lightly
    def test_forward_pass_1d(self):
        loss = NTXentLoss()
        for bsz in range(1, 100):

            batch_1 = torch.randn((bsz, 1))
            batch_2 = torch.randn((bsz, 1))

            # symmetry
            l1 = loss(torch.cat((batch_1, batch_2), 0))
            l2 = loss(torch.cat((batch_2, batch_1), 0))
            self.assertAlmostEqual((l1 - l2).pow(2).item(), 0.)
コード例 #8
0
    def __init__(self):
        super().__init__()

        # create a ResNet backbone and remove the classification head
        resnet = torchvision.models.resnet18()
        self.backbone = nn.Sequential(*list(resnet.children())[:-1])

        hidden_dim = resnet.fc.in_features
        self.projection_head = SimCLRProjectionHead(hidden_dim, hidden_dim,
                                                    128)

        self.criterion = NTXentLoss()
コード例 #9
0
    def __init__(self):
        super().__init__()
        resnet = torchvision.models.resnet18()
        self.backbone = nn.Sequential(*list(resnet.children())[:-1])
        self.projection_head = MoCoProjectionHead(512, 512, 128)

        self.backbone_momentum = copy.deepcopy(self.backbone)
        self.projection_head_momentum = copy.deepcopy(self.projection_head)

        deactivate_requires_grad(self.backbone_momentum)
        deactivate_requires_grad(self.projection_head_momentum)

        self.criterion = NTXentLoss(memory_bank_size=4096)
コード例 #10
0
ファイル: test_NTXentLoss.py プロジェクト: ssbagalkar/lightly
    def test_forward_pass_cuda(self):
        if torch.cuda.is_available():
            loss = NTXentLoss(memory_bank_size=0)
            for bsz in range(1, 100):

                batch_1 = torch.randn((bsz, 32)).cuda()
                batch_2 = torch.randn((bsz, 32)).cuda()

                # symmetry
                l1 = loss(torch.cat((batch_1, batch_2), 0))
                l2 = loss(torch.cat((batch_2, batch_1), 0))
                self.assertAlmostEqual((l1 - l2).pow(2).item(), 0.)
        else:
            pass
コード例 #11
0
    def test_with_values(self):
        for n_samples in [1, 2, 4]:
            for dimension in [1, 2, 16, 64]:
                for temperature in [0.1, 1, 10]:
                    out0 = np.random.normal(0, 1, size=(n_samples, dimension))
                    out1 = np.random.normal(0, 1, size=(n_samples, dimension))
                    with self.subTest(msg=f"out0.shape={out0.shape}, temperature={temperature}"):
                        out0 = torch.FloatTensor(out0)
                        out1 = torch.FloatTensor(out1)

                        loss_function = NTXentLoss(temperature=temperature)
                        l1 = float(loss_function(out0, out1))
                        l2 = float(loss_function(out1, out0))
                        l1_manual = self.calc_ntxent_loss_manual(out0, out1, temperature=temperature)
                        l2_manual = self.calc_ntxent_loss_manual(out0, out1, temperature=temperature)
                        self.assertAlmostEqual(l1, l2, places=5)
                        self.assertAlmostEqual(l1, l1_manual, places=5)
                        self.assertAlmostEqual(l2, l2_manual, places=5)
コード例 #12
0
    def test_with_correlated_embedding(self):
        for n_samples in [1, 2, 8, 16]:
            for memory_bank_size in [0, 1, 2, 8, 15, 16, 17]:
                for temperature in [0.1, 1, 7]:
                    out0 = np.random.random((n_samples, 1))
                    out1 = np.random.random((n_samples, 1))
                    out0 = np.concatenate([out0, 2 * out0], axis=1)
                    out1 = np.concatenate([out1, 2 * out1], axis=1)
                    out0 = torch.FloatTensor(out0)
                    out1 = torch.FloatTensor(out1)
                    out0.requires_grad = True

                    with self.subTest(msg=f"n_samples: {n_samples}, memory_bank_size: {memory_bank_size},"
                                          f"temperature: {temperature}"):
                        loss_function = NTXentLoss(temperature=temperature, memory_bank_size=memory_bank_size)
                        if memory_bank_size > 0:
                            for i in range(int(memory_bank_size / n_samples) + 2):
                                # fill the memory bank over multiple rounds
                                loss = float(loss_function(out0, out1))
                            expected_loss = -1 * np.log(1 / (memory_bank_size + 1))
                        else:
                            loss = float(loss_function(out0, out1))
                            expected_loss = -1 * np.log(1 / (2 * n_samples - 1))
                        self.assertAlmostEqual(loss, expected_loss, places=5)
コード例 #13
0
ファイル: train_cli.py プロジェクト: lightly-ai/lightly
def _train_cli(cfg, is_cli_call=True):

    input_dir = cfg['input_dir']
    if input_dir and is_cli_call:
        input_dir = fix_input_path(input_dir)

    if 'seed' in cfg.keys():
        seed = cfg['seed']
        torch.manual_seed(seed)
        torch.cuda.manual_seed(seed)
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False

    if cfg["trainer"]["weights_summary"] == "None":
        cfg["trainer"]["weights_summary"] = None

    if torch.cuda.is_available():
        device = 'cuda'
    elif cfg['trainer'] and cfg['trainer']['gpus']:
        device = 'cpu'
        cfg['trainer']['gpus'] = 0
    else:
        device = 'cpu'

    distributed_strategy = None
    if cfg['trainer']['gpus'] > 1:
        distributed_strategy = 'ddp'

    if cfg['loader']['batch_size'] < 64:
        msg = 'Training a self-supervised model with a small batch size: {}! '
        msg = msg.format(cfg['loader']['batch_size'])
        msg += 'Small batch size may harm embedding quality. '
        msg += 'You can specify the batch size via the loader key-word: '
        msg += 'loader.batch_size=BSZ'
        warnings.warn(msg)

    # determine the number of available cores
    if cfg['loader']['num_workers'] < 0:
        cfg['loader']['num_workers'] = cpu_count()

    state_dict = None
    checkpoint = cfg['checkpoint']
    if cfg['pre_trained'] and not checkpoint:
        # if checkpoint wasn't specified explicitly and pre_trained is True
        # try to load the checkpoint from the model zoo
        checkpoint, key = get_ptmodel_from_config(cfg['model'])
        if not checkpoint:
            msg = 'Cannot download checkpoint for key {} '.format(key)
            msg += 'because it does not exist! '
            msg += 'Model will be trained from scratch.'
            warnings.warn(msg)
    elif checkpoint:
        checkpoint = fix_input_path(checkpoint) if is_cli_call else checkpoint

    if checkpoint:
        # load the PyTorch state dictionary and map it to the current device
        if is_url(checkpoint):
            state_dict = load_state_dict_from_url(
                checkpoint, map_location=device)['state_dict']
        else:
            state_dict = torch.load(checkpoint,
                                    map_location=device)['state_dict']

    # load model
    resnet = ResNetGenerator(cfg['model']['name'], cfg['model']['width'])
    last_conv_channels = list(resnet.children())[-1].in_features
    features = nn.Sequential(
        get_norm_layer(3, 0),
        *list(resnet.children())[:-1],
        nn.Conv2d(last_conv_channels, cfg['model']['num_ftrs'], 1),
        nn.AdaptiveAvgPool2d(1),
    )

    model = _SimCLR(features,
                    num_ftrs=cfg['model']['num_ftrs'],
                    out_dim=cfg['model']['out_dim'])
    if state_dict is not None:
        load_from_state_dict(model, state_dict)

    criterion = NTXentLoss(**cfg['criterion'])
    optimizer = torch.optim.SGD(model.parameters(), **cfg['optimizer'])

    dataset = LightlyDataset(input_dir)

    cfg['loader']['batch_size'] = min(cfg['loader']['batch_size'],
                                      len(dataset))

    collate_fn = ImageCollateFunction(**cfg['collate'])
    dataloader = torch.utils.data.DataLoader(dataset,
                                             **cfg['loader'],
                                             collate_fn=collate_fn)

    encoder = SelfSupervisedEmbedding(model, criterion, optimizer, dataloader)
    encoder.init_checkpoint_callback(**cfg['checkpoint_callback'])
    encoder.train_embedding(**cfg['trainer'], strategy=distributed_strategy)

    print(
        f'Best model is stored at: {bcolors.OKBLUE}{encoder.checkpoint}{bcolors.ENDC}'
    )
    os.environ[cfg['environment_variable_names']
               ['lightly_last_checkpoint_path']] = encoder.checkpoint
    return encoder.checkpoint
コード例 #14
0
def _train_cli(cfg, is_cli_call=True):

    data = cfg['data']
    download = cfg['download']

    root = cfg['root']
    if root and is_cli_call:
        root = fix_input_path(root)

    input_dir = cfg['input_dir']
    if input_dir and is_cli_call:
        input_dir = fix_input_path(input_dir)

    if 'seed' in cfg.keys():
        seed = cfg['seed']
        torch.manual_seed(seed)
        torch.cuda.manual_seed(seed)
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False

    if torch.cuda.is_available():
        device = 'cuda'
    elif cfg['trainer'] and cfg['trainer']['gpus']:
        device = 'cpu'
        cfg['trainer']['gpus'] = 0

    if cfg['loader']['batch_size'] < 64:
        msg = 'Training a self-supervised model with a small batch size: {}! '
        msg = msg.format(cfg['loader']['batch_size'])
        msg += 'Small batch size may harm embedding quality. '
        msg += 'You can specify the batch size via the loader key-word: '
        msg += 'loader.batch_size=BSZ'
        warnings.warn(msg)

    state_dict = None
    checkpoint = cfg['checkpoint']
    if cfg['pre_trained'] and not checkpoint:
        # if checkpoint wasn't specified explicitly and pre_trained is True
        # try to load the checkpoint from the model zoo
        checkpoint, key = get_ptmodel_from_config(cfg['model'])
        if not checkpoint:
            msg = 'Cannot download checkpoint for key {} '.format(key)
            msg += 'because it does not exist! '
            msg += 'Model will be trained from scratch.'
            warnings.warn(msg)
    elif checkpoint:
        checkpoint = fix_input_path(checkpoint) if is_cli_call else checkpoint
    
    if checkpoint:
        # load the PyTorch state dictionary and map it to the current device
        if is_url(checkpoint):
            state_dict = load_state_dict_from_url(
                checkpoint, map_location=device
            )['state_dict']
        else:
            state_dict = torch.load(
                checkpoint, map_location=device
            )['state_dict']

    # load model
    model = ResNetSimCLR(**cfg['model'])
    if state_dict is not None:
        model.load_from_state_dict(state_dict)

    criterion = NTXentLoss(**cfg['criterion'])
    optimizer = torch.optim.SGD(model.parameters(), **cfg['optimizer'])

    dataset = LightlyDataset(root,
                           name=data, train=True, download=download,
                           from_folder=input_dir)

    cfg['loader']['batch_size'] = min(
        cfg['loader']['batch_size'],
        len(dataset)
    )

    collate_fn = ImageCollateFunction(**cfg['collate'])
    dataloader = torch.utils.data.DataLoader(dataset,
                                             **cfg['loader'],
                                             collate_fn=collate_fn)

    encoder = SelfSupervisedEmbedding(model, criterion, optimizer, dataloader)
    encoder.init_checkpoint_callback(**cfg['checkpoint_callback'])
    encoder = encoder.train_embedding(**cfg['trainer'])

    print('Best model is stored at: %s' % (encoder.checkpoint))
    return encoder.checkpoint
コード例 #15
0
collate_fn = SimCLRCollateFunction(
    input_size=32,
    gaussian_blur=0.,
)

dataloader = torch.utils.data.DataLoader(
    dataset,
    batch_size=256,
    collate_fn=collate_fn,
    shuffle=True,
    drop_last=True,
    num_workers=8,
)

criterion = NTXentLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.06)

print("Starting Training")
for epoch in range(10):
    total_loss = 0
    for (x0, x1), _, _ in dataloader:
        x0 = x0.to(device)
        x1 = x1.to(device)
        z0 = model(x0)
        z1 = model(x1)
        loss = criterion(z0, z1)
        total_loss += loss.detach()
        loss.backward()
        optimizer.step()
        optimizer.zero_grad()
コード例 #16
0
ファイル: moco.py プロジェクト: lightly-ai/lightly
dataset = LightlyDataset.from_torch_dataset(cifar10)
# or create a dataset from a folder containing images or videos:
# dataset = LightlyDataset("path/to/folder")

collate_fn = MoCoCollateFunction(input_size=32)

dataloader = torch.utils.data.DataLoader(
    dataset,
    batch_size=256,
    collate_fn=collate_fn,
    shuffle=True,
    drop_last=True,
    num_workers=8,
)

criterion = NTXentLoss(memory_bank_size=4096)
optimizer = torch.optim.SGD(model.parameters(), lr=0.06)

print("Starting Training")
for epoch in range(10):
    total_loss = 0
    for (x_query, x_key), _, _ in dataloader:
        update_momentum(model.backbone, model.backbone_momentum, m=0.99)
        update_momentum(model.projection_head, model.projection_head_momentum, m=0.99)
        x_query = x_query.to(device)
        x_key = x_key.to(device)
        query = model(x_query)
        key = model.forward_momentum(x_key)
        loss = criterion(query, key)
        total_loss += loss.detach()
        loss.backward()
コード例 #17
0
 def __init__(self):
     super().__init__()
     resnet = torchvision.models.resnet18()
     self.backbone = nn.Sequential(*list(resnet.children())[:-1])
     self.projection_head = SimCLRProjectionHead(512, 2048, 2048)
     self.criterion = NTXentLoss()
コード例 #18
0
ファイル: test_NTXentLoss.py プロジェクト: ssbagalkar/lightly
 def test_forward_pass_memory_bank(self):
     loss = NTXentLoss(memory_bank_size=64)
     for bsz in range(1, 100):
         batch = torch.randn(2 * bsz, 32)
         l = loss(batch)
コード例 #19
0
 def test_forward_pass_memory_bank(self):
     loss = NTXentLoss(memory_bank_size=64)
     for bsz in range(1, 20):
         batch_1 = torch.randn((bsz, 32))
         batch_2 = torch.randn((bsz, 32))
         l = loss(batch_1, batch_2)