コード例 #1
0
def train(train_loader, model, optimizer, device, epoch):
    model.train()
    losses = []
    accuracy = []
    total_samples = []
    for batch_idx, (spectrograms, int_labels, _) in enumerate(train_loader):
        spectrograms, int_labels = spectrograms.to(device), int_labels.to(
            device)
        #print(spectrograms.size())
        embeddings = model(spectrograms)
        loss, correct_negative, total = batch_hard_triplet_loss(
            int_labels,
            embeddings,
            margin_positive=8,
            margin_negative=8,
            device='cuda',
            squared=True)
        total_samples.append(total)
        losses.append(loss.item())
        acc = (correct_negative / total) * 100
        accuracy.append(acc)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        print(
            ' Training Epoch {} : [{}/{}] \t Loss: {:.4f} \t Accuracy: {:.2f} \t '
            .format(epoch, batch_idx,
                    int(len(train_loader.dataset) / args.train_batch_size),
                    loss, acc),
            flush=True,
            end='\r')
    return mean(losses), mean(accuracy)
コード例 #2
0
def test_windowed(test_loader, SincNet_model, MLP_model, epoch, device):
    MLP_model.eval()
    SincNet_model.eval()
    total_samples = 0
    losses = AverageMeter()
    accuracy = AverageMeter()
    with torch.no_grad():
        for batch_idx, (tracks, int_labels) in enumerate(test_loader):
            tracks, int_labels = tracks.cuda(), int_labels.cuda()
            embeddings = torch.zeros(list(tracks.size())[0],
                                     258,
                                     device=device)
            for i in range(list(tracks.size())[0]):
                track = tracks[i, :, :]
                embedding = MLP_model(SincNet_model(track))
                embedding = torch.mean(embedding, dim=0, keepdim=True)
                embeddings[i, :] = embedding
            loss, correct_negative, total = batch_hard_triplet_loss(
                int_labels,
                embeddings,
                margin_positive=2,
                margin_negative=2,
                device='cuda',
                squared=True)

            total_samples = total_samples + total
            losses.update(loss.detach(), 1)
            accuracy.update((correct_negative / total) * 100, 1)
    print('Test Epoch {}: Loss: {:.4f}, Accuracy {:.2f} \t'.format(
        epoch, losses.avg, accuracy.avg))
    return losses.avg, accuracy.avg
コード例 #3
0
def test(test_loader, SincNet_model, MLP_model, epoch):
    MLP_model.eval()
    SincNet_model.eval()
    total_samples = 0
    losses = AverageMeter()
    accuracy = AverageMeter()
    with torch.no_grad():
        for batch_idx, (tracks, int_labels,
                        string_labels) in enumerate(test_loader):
            tracks, int_labels = tracks.cuda(), int_labels.cuda()
            embeddings = SincNet_model(tracks)
            embeddings = MLP_model(embeddings)
            #print(embeddings)
            loss, correct_negative, total = batch_hard_triplet_loss(
                int_labels,
                embeddings,
                margin_negative=2,
                margin_positive=2,
                device='cuda',
                squared=True)
            total_samples = total_samples + total
            losses.update(loss, 1)
            accuracy.update((correct_negative / total) * 100, 1)
    print('Test Epoch {}: Loss: {:.4f}, Accuracy {:.2f} \t'.format(
        epoch, losses.avg, accuracy.avg))
    return losses.avg, accuracy.avg
コード例 #4
0
def test(data_loader, model, device, epoch):
    model.eval()
    losses = []
    accuracy = []
    total_samples = []
    with torch.no_grad():
        for batch_idx, (spectrograms, int_labels, _) in enumerate(data_loader):
            spectrograms, int_labels = spectrograms.to(device), int_labels.to(
                device)
            embeddings = model(spectrograms)
            loss, correct_negative, total = batch_hard_triplet_loss(
                int_labels,
                embeddings,
                margin_negative=8,
                margin_positive=8,
                device='cuda',
                squared=True)
            total_samples.append(total)
            losses.append(loss.item())
            #print(loss)
            acc = (correct_negative / total) * 100
            accuracy.append(acc)
            #print(' Testing/Validating Epoch {}: \t Loss: {:.4f} \t AccuracyL {:.2f} \t'.format(epoch, loss, acc), flush=True, end='\r')

    print(
        ' Test/Validate Epoch {}: \t Loss: {:.4f}, Accuracy: {:.2f} \t'.format(
            epoch, mean(losses), mean(accuracy)))
    return mean(losses), mean(accuracy)
コード例 #5
0
def train_snippets(SincNet_model, MLP_model, optimizer_SincNet, optimizer_MLP,
                   device, epoch, train_loader):
    SincNet_model.train()
    MLP_model.train()
    losses = []
    accuracy = []
    total_samples = []
    for batch_idx, (tracks, int_labels, _) in enumerate(train_loader):
        print(batch_idx)
        tracks, int_labels = tracks.to(device), int_labels.to(device)
        #batch_size, windows, samples = list(tracks.size())[0], list(tracks.size())[1], list(tracks.size())[2]
        embeddings = MLP_model(SincNet_model(tracks))
        loss, correct_negative, total = batch_hard_triplet_loss(
            int_labels,
            embeddings,
            margin_positive=8,
            margin_negative=8,
            device=device,
            squared=True)
        total_samples.append(total)

        losses.append(loss.item())
        acc = (correct_negative / total) * 100
        accuracy.append(acc)
        optimizer_MLP.zero_grad()
        optimizer_SincNet.zero_grad()
        loss.backward()
        optimizer_MLP.step()
        optimizer_SincNet.step()

        return mean(losses), mean(accuracy)
コード例 #6
0
def train_batch(train_loader, tnet, optimizer, epoch):
    tnet.train()
    losses = AverageMeter()
    i = 0
    total_samples = 0
    correct_negative_samples = 0
    correct_positive_samples = 0
    for batch in iter(train_loader):
        i = i + 1

        spectrograms, int_labels, string_labels = batch
        int_labels = int_labels.cuda()
        #labels = labels.view(labels.size(0), -1).cuda()
        spectrograms = spectrograms.cuda()
        embeddings = tnet(spectrograms)

        loss, correct_positive, correct_negative, total, _ = batch_hard_triplet_loss(
            int_labels,
            embeddings,
            margin_negative=2,
            margin_positive=2,
            device='cuda',
            squared=True)
        correct_negative_samples = correct_negative_samples + correct_negative
        correct_positive_samples = correct_positive_samples + correct_positive
        total_samples = total_samples + (total * 2)
        #negative_dist = negative_dist + hardest_neg.sum()
        #positive_dist = positive_dist + hardest_pos.sum()
        losses.update(loss, 1)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        print('Train Epoch: {} [{}/{}]\t'
              'Loss: {:.4f} \t'.format(epoch, i, len(iter(train_loader)),
                                       loss))
    print('Epoch {} Accuracy: {:.4f} Loss: {:.4f} \t'.format(
        epoch, ((correct_positive_samples + correct_negative_samples) /
                total_samples), losses.avg))
    wandb.log({
        "Train Accuracy":
        (correct_positive_samples + correct_negative_samples) / total_samples,
        "Train Loss":
        losses.avg
    })
コード例 #7
0
def test_batch(test_loader, tnet, epoch):
    tnet.eval()
    losses = AverageMeter()

    i = 0
    total_samples = 0
    correct_negative_samples = 0
    correct_positive_samples = 0

    with torch.no_grad():
        for batch in iter(test_loader):
            spectrograms, int_labels, string_labels = batch
            int_labels = int_labels.cuda()
            # labels = labels.view(labels.size(0), -1).cuda()
            spectrograms = spectrograms.cuda()
            embeddings = tnet(spectrograms)
            loss, correct_positive, correct_negative, total, _ = batch_hard_triplet_loss(
                int_labels,
                embeddings,
                margin_negative=2,
                margin_positive=2,
                device='cuda',
                squared=True)
            losses.update(loss, 1)
            correct_negative_samples = correct_negative_samples + correct_negative
            correct_positive_samples = correct_positive_samples + correct_positive
            total_samples = total_samples + (total * 2)
            #negative_dist = negative_dist + hardest_neg.sum()
            #positive_dist = positive_dist + hardest_pos.sum()

        print('Epoch {} Test Accuracy: {:.4f} Test_Loss: {:.4f} \t'.format(
            epoch, ((correct_negative_samples + correct_positive_samples) /
                    total_samples), losses.avg))
        wandb.log({
            "Test Accuracy":
            (correct_negative_samples + correct_positive_samples) /
            total_samples,
            "Test Loss":
            losses.avg
        })
        return
コード例 #8
0
def train_windowed(SincNet_model, MLP_model, optimizer_SincNet, optimizer_MLP,
                   device, epoch, train_loader):
    SincNet_model.train()
    MLP_model.train()
    losses = []
    accuracy = []
    total_samples = []
    for batch_idx, (tracks, int_labels, _) in enumerate(train_loader):
        tracks, int_labels = tracks.to(device), int_labels.to(device)
        batch_size, windows, samples = list(tracks.size())[0], list(
            tracks.size())[1], list(tracks.size())[2]
        tracks = tracks.view(-1, batch_size * windows, samples).squeeze()
        embeddings = MLP_model(SincNet_model(tracks)).view(
            batch_size, windows, samples)
        mean_embeddings = torch.mean(embeddings, dim=1,
                                     keepdim=True).squeeze()  #64,258
        loss, correct_negative, total = batch_hard_triplet_loss(
            int_labels,
            mean_embeddings,
            margin_positive=8,
            margin_negative=8,
            device=device,
            squared=True)
        total_samples.append(total)
        losses.append(loss.detch())
        acc = (correct_negative / total) * 100
        accuracy.append(acc)
        optimizer_MLP.zero_grad()
        optimizer_SincNet.zero_grad()
        loss.backward()
        optimizer_MLP.step()
        optimizer_SincNet.step()
        print(
            ' Training Epoch {} : [{}/{}] \t Loss: {:.4f} \t Accuracy: {:.2f} \t '
            .format(epoch, batch_idx,
                    int(len(train_loader.dataset) / args.train_batch_size),
                    loss, acc),
            flush=True,
            end='\r')
    return mean(losses), mean(accuracy)
コード例 #9
0
def train_windowed(train_loader, SincNet_model, MLP_model, optimizer_SincNet,
                   optimizer_MLP, epoch, device):
    MLP_model.train()
    SincNet_model.train()
    losses = AverageMeter()
    accuracy = AverageMeter()
    total_samples = 0
    for batch_idx, (tracks, int_labels) in enumerate(train_loader):
        tracks, int_labels = tracks.cuda(), int_labels.cuda()
        embeddings = torch.zeros(list(tracks.size())[0], 258, device=device)
        for i in range(list(tracks.size())[0]):
            track = tracks[i, :, :]  #15, 3600
            embedding = MLP_model(SincNet_model(track))
            embedding = torch.mean(embedding, dim=0, keepdim=True)
            embeddings[i, :] = embedding
        loss, correct_negative, total = batch_hard_triplet_loss(
            int_labels,
            embeddings,
            margin_positive=2,
            margin_negative=2,
            device='cuda',
            squared=True)
        #total_samples = total + total_samples
        #print(correct_negative, total, acc)
        acc = (correct_negative / total) * 100
        #print(correct_negative, total, acc)
        accuracy.update(acc, 1)
        optimizer_SincNet.zero_grad()
        optimizer_MLP.zero_grad()
        loss.backward()
        optimizer_SincNet.step()
        optimizer_MLP.step()
        losses.update(loss.detach(), 1)
        print(' Train epoch: {} [{}/{}]\t Loss {:.4f} Acc {:.4f} \t '.format(
            epoch, batch_idx, int(len(train_loader.dataset) / 64), loss, acc),
              flush=True,
              end='\r')
    return losses.avg, accuracy.avg
コード例 #10
0
def train(train_loader, SincNet_model, MLP_model, optimizer_SincNet,
          optimizer_MLP, epoch):
    MLP_model.train()
    SincNet_model.train()
    losses = AverageMeter()
    accuracy = AverageMeter()
    total_samples = 0
    for batch_idx, (tracks, int_labels,
                    string_labels) in enumerate(train_loader):
        tracks, int_labels = tracks.cuda(), int_labels.cuda()
        embeddings = SincNet_model(tracks)
        embeddings = MLP_model(embeddings)
        loss, correct_negative, total = batch_hard_triplet_loss(
            int_labels,
            embeddings,
            margin_positive=2,
            margin_negative=2,
            device='cuda',
            squared=True)
        total_samples = total_samples + total
        acc = (correct_negative / total) * 100
        accuracy.update(acc, 1)
        optimizer_SincNet.zero_grad()
        optimizer_MLP.zero_grad()
        #print(loss)
        loss.backward()
        optimizer_SincNet.step()
        optimizer_MLP.step()
        losses.update(loss, 1)
        print(' Train epoch: {} [{}/{}]\t  Loss {:.4f} Acc {:.2f} \t '.format(
            epoch, batch_idx, int(len(train_loader.dataset) / 64), loss, acc),
              flush=True,
              end='\r')
        #wandb.log(
        #    {"Train Accuracy": acc, "Train Loss": loss})#, "Test Accuracy": test_accuracy_avg,
        #    # "Test Loss": test_losses_avg})

    return losses.avg, accuracy.avg
コード例 #11
0
def test_windowed(SincNet_model, MLP_model, device, epoch, test_loader):
    SincNet_model.eval()
    MLP_model.eval()
    total_samples = []
    losses = []
    accuracy = []
    with torch.no_grad():
        for batch_idx, (tracks, int_labels, _) in enumerate(test_loader):
            #print(batch_idx)
            tracks, int_labels = tracks.to(device), int_labels.to(device)
            batch_size, windows, samples = list(tracks.size())[0], list(
                tracks.size())[1], list(tracks.size())[2]
            tracks = tracks.view(-1, batch_size * windows, samples).squeeze()
            embeddings = MLP_model(SincNet_model(tracks))
            mean_embeddings = torch.empty(batch_size,
                                          list(embeddings.size())[1])
            for i in range(batch_size):
                mean_embeddings[i][:] = torch.mean(
                    embeddings[(i) * windows:(i) * windows + windows][:],
                    dim=0,
                    keepdim=True)
            loss, correct_negative, total = batch_hard_triplet_loss(
                int_labels,
                mean_embeddings,
                margin_positive=8,
                margin_negative=8,
                device=device,
                squared=True)
            total_samples.append(total)
            losses.append(loss.item())
            acc = (correct_negative / total) * 100
            accuracy.append(acc)
    print(
        ' Test/Validate Epoch {}: \t Loss: {:.4f}, Accuracy: {:.2f} \t'.format(
            epoch, mean(losses), mean(accuracy)))
    return mean(losses), mean(accuracy)