def main(): # Views the training images and displays the distance on anchor-negative and anchor-positive test_display_triplet_distance = True # print the experiment configuration print('\nparsed options:\n{}\n'.format(vars(args))) print('\nNumber of Classes:\n{}\n'.format(len(train_dir.classes))) # instantiate model and initialize weights model = FaceModel(embedding_size=args.embedding_size, num_classes=len(train_dir.classes), pretrained=False) model.to(device) triplet_loss = TripletMarginLoss(args.margin) optimizer = create_optimizer(model, args.lr) # optionally resume from a checkpoint if args.resume: if os.path.isfile(args.resume): print('=> loading checkpoint {}'.format(args.resume)) checkpoint = torch.load(args.resume) args.start_epoch = checkpoint['epoch'] checkpoint = torch.load(args.resume) model.load_state_dict(checkpoint['state_dict']) else: print('=> no checkpoint found at {}'.format(args.resume)) start = args.start_epoch end = start + args.epochs for epoch in range(start, end): print(80 * '=') print('Epoch [{}/{}]'.format(epoch, end - 1)) time0 = time.time() own_train(train_loader, model, triplet_loss, optimizer, epoch, data_size) print(f' Execution time = {time.time() - time0}') print(80 * '=') if test_display_triplet_distance: display_triplet_distance(model, train_loader, LOG_DIR + "/train_{}".format(epoch)) print(80 * '=') time0 = time.time() own_test(test_loader, model, epoch) print(f' Execution time = {time.time() - time0}') print(80 * '=') if test_display_triplet_distance: display_triplet_distance_test(model, test_loader, LOG_DIR + "/test_{}".format(epoch))
def main(): # Views the training images and displays the distance on anchor-negative and anchor-positive test_display_triplet_distance = True # print the experiment configuration print('\nparsed options:\n{}\n'.format(vars(args))) print('\nNumber of Classes:\n{}\n'.format(len(train_dir.classes))) # instantiate model and initialize weights model = FaceModel(inceptionresnet_v1, embedding_size=args.embedding_size, num_classes=len(train_dir.classes), pretrained=False) if args.cuda: model.cuda() optimizer = create_optimizer(model, args.lr) # optionally resume from a checkpoint if args.resume: if os.path.isfile(args.resume): print('=> loading checkpoint {}'.format(args.resume)) checkpoint = torch.load(args.resume) args.start_epoch = checkpoint['epoch'] checkpoint = torch.load(args.resume) model.load_state_dict(checkpoint['state_dict']) else: print('=> no checkpoint found at {}'.format(args.resume)) start = args.start_epoch end = start + args.epochs para_model = torch.nn.parallel.data_parallel(model) for epoch in range(start, end): train(train_loader, para_model, optimizer, epoch) # test(test_loader, model, epoch) # do checkpointing torch.save({ 'epoch': epoch + 1, 'state_dict': model.state_dict() }, '{}/checkpoint_{}.pth'.format(LOG_DIR, epoch)) if test_display_triplet_distance: display_triplet_distance(model, train_loader, LOG_DIR + "/train_{}".format(epoch))
def main(): # Views the training images and displays the distance on anchor-negative and anchor-positive test_display_triplet_distance = False # print the experiment configuration print('\nparsed options:\n{}\n'.format(vars(args))) print('\nNumber of Classes:\n{}\n'.format(len(train_dir.classes))) # instantiate model and initialize weights model = DeepSpeakerModel(embedding_size=args.embedding_size, num_classes=len(train_dir.classes)) if args.cuda: model.cuda() optimizer = create_optimizer(model, args.lr) # optionally resume from a checkpoint if args.resume: if os.path.isfile(args.resume): print('=> loading checkpoint {}'.format(args.resume)) checkpoint = torch.load(args.resume) args.start_epoch = checkpoint['epoch'] checkpoint = torch.load(args.resume) model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) else: print('=> no checkpoint found at {}'.format(args.resume)) start = args.start_epoch #start = 0 end = start + args.epochs train_loader = torch.utils.data.DataLoader(train_dir, batch_size=args.batch_size, shuffle=False, **kwargs) for epoch in range(start, end): train(train_loader, model, optimizer, epoch) #test(test_loader, model, epoch) #break; if test_display_triplet_distance: display_triplet_distance(model, train_loader, LOG_DIR + "/train_{}".format(epoch))
def main(): #test_display_triplet_distance= True ''' why test_display_triplet_distance= True in center loss.py????? ''' test_display_triplet_distance = True # print the experiment configuration print('\nparsed options:\n{}\n'.format(vars(args))) print('\nNumber of Classes:\n{}\n'.format(str(6400))) num_classes = 6400 # optionally resume from a checkpoint if args.resume: if os.path.isfile(args.resume): print('=> loading checkpoint {}'.format(args.resume)) checkpoint = torch.load(args.resume) args.start_epoch = checkpoint['epoch'] else: checkpoint = None print('=> no checkpoint found at {}'.format(args.resume)) #print(checkpoint) # instantiate model and initialize weights #model = FaceModelSoftmax(embedding_size=args.embedding_size,num_classes=len(train_dir.classes),checkpoint=checkpoint) model = FaceModelSoftmax(embedding_size=args.embedding_size, num_classes=num_classes, checkpoint=checkpoint) if args.cuda: #print("you are using gpu") model.cuda() optimizer = create_optimizer(model, args.lr) start = args.start_epoch end = start + args.epochs for epoch in range(start, end): train(train_loader, model, optimizer, epoch) test(test_loader, model, epoch) testaccuracy(testaccuracy_loader, model, epoch) testRecall(testaccuracy_loader, model, epoch) if test_display_triplet_distance: display_triplet_distance_test(model, test_loader, LOG_DIR + "/test_{}".format(epoch)) display_triplet_distance(model, train_loader, LOG_DIR + "/train_{}".format(epoch))