예제 #1
0
    # Load data
    trainset = CSL_Phoenix_Openpose(skeleton_root=train_skeleton_root,annotation_file=train_annotation_file,dictionary=dictionary,
            clip_length=clip_length,stride=stride)
    devset = CSL_Phoenix_Openpose(skeleton_root=dev_skeleton_root,annotation_file=dev_annotation_file,dictionary=dictionary,
            clip_length=clip_length,stride=stride)
    testset = CSL_Phoenix_Openpose(skeleton_root=test_skeleton_root,annotation_file=test_annotation_file,dictionary=dictionary,
            clip_length=clip_length,stride=stride)
    print("Dataset samples: {}".format(len(trainset)+len(devset)))
    trainloader = DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=num_workers, pin_memory=True,
            collate_fn=skeleton_collate)
    devloader = DataLoader(devset, batch_size=batch_size, shuffle=True, num_workers=num_workers, pin_memory=True,
            collate_fn=skeleton_collate)
    testloader = DataLoader(testset, batch_size=batch_size, shuffle=True, num_workers=num_workers, pin_memory=True,
            collate_fn=skeleton_collate)
    # Create model
    model = hcn_lstm(vocab_size,clip_length=clip_length,
                num_classes=num_classes,hidden_dim=hidden_dim).to(device)
    start_epoch, best_wer = resume_model(model, checkpoint)
    # Run the model parallelly
    if torch.cuda.device_count() > 1:
        print("Using {} GPUs".format(torch.cuda.device_count()))
        model = nn.DataParallel(model)
    # Create loss criterion
    criterion = nn.CTCLoss() 

    wer = 100.00
    # Start training
    print("Evaluation Started".center(60, '#'))
    for epoch in range(start_epoch, start_epoch+1):
        # Test the model
        wer = test_hcn_lstm(model, criterion, trainloader, device, epoch, log_interval, writer, reverse_dict)
예제 #2
0
 devloader = DataLoader(devset,
                        batch_size=batch_size,
                        shuffle=True,
                        num_workers=num_workers,
                        pin_memory=True,
                        collate_fn=skeleton_collate)
 testloader = DataLoader(testset,
                         batch_size=batch_size,
                         shuffle=True,
                         num_workers=num_workers,
                         pin_memory=True,
                         collate_fn=skeleton_collate)
 # Create model
 model = hcn_lstm(vocab_size,
                  clip_length=clip_length,
                  num_classes=num_classes,
                  hidden_dim=hidden_dim,
                  dropout=0.8).to(device)  # default dropout is 0.4
 model = resume_hcn_module(model, hcn_checkpoint)
 if hcn_lstm_ckpt is not None:
     model = resume_hcn_lstm(model, hcn_lstm_ckpt)
 if mainpart_ckpt is not None:
     model = resume_main_part(model, mainpart_ckpt)
 if checkpoint is not None:
     start_epoch, best_wer = resume_model(model, checkpoint)
 # Run the model parallelly
 if torch.cuda.device_count() > 1:
     print("Using {} GPUs".format(torch.cuda.device_count()))
     model = nn.DataParallel(model)
 # Create loss criterion & optimizer
 # criterion = nn.CTCLoss(zero_infinity=True)