def main():
    # Views the training images and displays the distance on anchor-negative and anchor-positive
    test_display_triplet_distance = False

    # print the experiment configuration
    print('\nparsed options:\n{}\n'.format(vars(args)))
    print('\nNumber of Classes:\n{}\n'.format(len(train_dir.classes)))

    # instantiate model and initialize weightsNUM_FEATURES
    # TODO(xin): IMPORTANT load num_classes from checkpoint
    model = DeepSpeakerModel(embedding_size=args.embedding_size,
                             num_classes=len(train_dir.classes),
                             feature_dim=num_features,
                             frame_dim=c.NUM_FRAMES)

    if args.cuda:
        model.cuda()

    from torchsummary import summary
    summary(model, (1, c.NUM_FRAMES, c.NUM_FEATURES))
    # # More detailed information on model
    # print(model)

    optimizer = create_optimizer(model, args.lr)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print('=> loading checkpoint {}'.format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
        else:
            print('=> no checkpoint found at {}'.format(args.resume))

    start = args.start_epoch
    end = start + args.epochs

    train_loader = torch.utils.data.DataLoader(train_dir, batch_size=args.batch_size, shuffle=False, **kwargs)
    test_loader = torch.utils.data.DataLoader(test_dir, batch_size=args.test_batch_size, shuffle=False, **kwargs)
    for epoch in range(start, end):

        if args.test_only:
            test(test_loader, model, epoch)
            return

        train(train_loader, model, optimizer, epoch)
        test(test_loader, model, epoch)
def main():
    # Views the training images and displays the distance on anchor-negative and anchor-positive
    test_display_triplet_distance = False

    # print the experiment configuration
    print('\nparsed options:\n{}\n'.format(vars(args)))
    print('\nNumber of Classes:\n{}\n'.format(len(train_dir.classes)))

    # instantiate model and initialize weights
    model = DeepSpeakerModel(embedding_size=args.embedding_size,
                             num_classes=len(train_dir.classes))

    if args.cuda:
        model.cuda()

    optimizer = create_optimizer(model, args.lr)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print('=> loading checkpoint {}'.format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            checkpoint = torch.load(args.resume)
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
        else:
            print('=> no checkpoint found at {}'.format(args.resume))

    start = args.start_epoch
    #start = 0
    end = start + args.epochs

    train_loader = torch.utils.data.DataLoader(train_dir,
                                               batch_size=args.batch_size,
                                               shuffle=False,
                                               **kwargs)
    for epoch in range(start, end):

        train(train_loader, model, optimizer, epoch)
        #test(test_loader, model, epoch)
        #break;

        if test_display_triplet_distance:
            display_triplet_distance(model, train_loader,
                                     LOG_DIR + "/train_{}".format(epoch))
Exemplo n.º 3
0
def main(libri_dir=c.DATASET_DIR):


    print('Looking for fbank features [.npy] files in {}.'.format(libri_dir))
    libri = data_catalog(libri_dir)
    #                          filename                                       speaker_id
    #   0    audio/LibriSpeechSamples/train-clean-100-npy/1-100-0001.npy        1
    #   1    audio/LibriSpeechSamples/train-clean-100-npy/1-100-0002.npy        1        
    unique_speakers = libri['speaker_id'].unique() # 251 speaker
    transform=transforms.Compose([transforms.ToTensor()])
                                               
    train_dir = stochastic_mini_batch(libri)
    train_loader = DataLoader(train_dir, batch_size=c.BATCH_SIZE, shuffle=True)
    model = DeepSpeakerModel(embedding_size=c.EMBEDDING_SIZE,num_classes=c.NUM_SPEAKERS)
    
    optimizer = optim.Adam(model.parameters(), lr=0.001, weight_decay=0.0)
    epoch = 0
    model.cuda()
    summary(model, input_size=(1, 160, 64))
    for epoch in range(100):       
        model.train()
      
        for batch_idx, (data_a, data_p, data_n,label_a,label_p,label_n) in tqdm(enumerate(train_loader)):
            
            data_a, data_p, data_n = data_a.type(torch.FloatTensor),data_p.type(torch.FloatTensor),data_n.type(torch.FloatTensor)
            data_a, data_p, data_n = data_a.cuda(), data_p.cuda(), data_n.cuda()
            data_a, data_p, data_n = Variable(data_a), Variable(data_p), Variable(data_n)
            out_a, out_p, out_n = model(data_a), model(data_p), model(data_n)
            
            triplet_loss = TripletMarginLoss(0.2).forward(out_a, out_p, out_n)
            loss = triplet_loss
            # compute gradient and update weights
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            print('selected_triplet_loss', triplet_loss.data)
        print("epoch:",epoch)
        torch.save(model.state_dict(),"checkpoint_{}.pt".format(epoch))