Beispiel #1
0
                        pin_memory=True,
                        shuffle=True)
valset2 = CSL_Isolated_Openpose2('trainval')
val_loader2 = DataLoader(dataset=valset2,
                         batch_size=8,
                         num_workers=8,
                         pin_memory=True,
                         shuffle=True)
valset3 = CSL_Isolated_Openpose2('test')
val_loader3 = DataLoader(dataset=valset3,
                         batch_size=8,
                         num_workers=8,
                         pin_memory=True,
                         shuffle=True)
print('Total size of the val set: %d' % (len(val_loader)))
model = hcn(args.num_class, f_dim=args.feature_dim).to(device)
# Resume model
if checkpoint is not None:
    start_epoch, best_acc = resume_model(model, checkpoint)
# Create loss criterion & optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)

# Start Evaluation
print("Evaluation Started".center(60, '#'))
for epoch in range(start_epoch, start_epoch + 1):
    # Eval the model
    acc = eval_cnn(model, criterion, val_loader3, device, epoch, log_interval,
                   writer, args)
    print('Batch accu_n on isl: {:.3f}'.format(acc))
    acc = eval_cnn(model, criterion, val_loader, device, epoch, log_interval,
Beispiel #2
0
    trainset = CSL_Continuous_Openpose(skeleton_root=skeleton_root,
                                       list_file=train_list,
                                       dictionary=dictionary,
                                       clip_length=clip_length,
                                       stride=stride)
    # testset = CSL_Continuous_Openpose(skeleton_root=skeleton_root,list_file=val_list,dictionary=dictionary,
    #         clip_length=clip_length,stride=stride)
    print("Trainset samples: {}".format(len(trainset)))
    trainloader = DataLoader(trainset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=1,
                             pin_memory=True)
    # testloader = DataLoader(testset, batch_size=batch_size, shuffle=True, num_workers=1, pin_memory=True)
    # Create model
    model = hcn(num_class=num_classes).to(device)
    if checkpoint is not None:
        start_epoch, best_wer = resume_model(model, checkpoint)
    # Run the model parallelly
    if torch.cuda.device_count() > 1:
        print("Using {} GPUs".format(torch.cuda.device_count()))
        model = nn.DataParallel(model)

    for i, batch in enumerate(trainloader):
        if i == 0:
            input, tgt = batch['input'], batch['tgt']
            input = input.to(device)
            # # Shape of input is: N x S x clip_length x J x D
            # N,S,l,J,D = input.size()
            # # After view & permute, shape of input is: N x D x (Sxl) x J
            # input = input.view(N,-1,J,D).permute(0,3,1,2)
Beispiel #3
0
    devset = CSL_Isolated_Openpose(skeleton_root=skeleton_root,
                                   list_file=val_file,
                                   length=length)
    print("Dataset samples: {}".format(len(trainset) + len(devset)))
    trainloader = DataLoader(trainset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=8,
                             pin_memory=True)
    testloader = DataLoader(devset,
                            batch_size=batch_size,
                            shuffle=False,
                            num_workers=8,
                            pin_memory=True)
    # Create model
    model = hcn(num_class, dropout=dropout).to(device)
    if checkpoint is not None:
        start_epoch, best_prec1 = resume_model(model, checkpoint)
    # Run the model parallelly
    if torch.cuda.device_count() > 1:
        print("Using {} GPUs".format(torch.cuda.device_count()))
        model = nn.DataParallel(model)
    # Create loss criterion & optimizer
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)

    # Start Evaluation
    print("Evaluation Started".center(60, '#'))
    for epoch in range(start_epoch, start_epoch + 1):
        # Test the model
        prec1 = test_isolated(model, criterion, testloader, device, epoch,
Beispiel #4
0
        'runs/cnn',
        time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))))

# Prepare dataset & dataloader
trainset = CSL_Isolated_Openpose('trainvaltest')
train_sampler = TsneSampler(trainset.label,
                            batch_size,
                            select_class=n_class,
                            n_sample=n_sample)
train_loader = DataLoader(dataset=trainset,
                          batch_sampler=train_sampler,
                          num_workers=num_workers,
                          pin_memory=True)
print('Len of the train loader: %d' % (len(train_loader)))
if model_name == 'HCN':
    model = hcn(args.num_class).to(device)
    start_epoch, best_acc = resume_model(model, checkpoint)
elif model_name == 'PN':
    model_cnn = gcrHCN().to(device)
    model = PN(model_cnn,lstm_input_size=args.feature_dim,train_way=args.train_way,test_way=args.test_way,\
        shot=args.shot,query=args.query,query_val=args.query_val).to(device)
    start_epoch, best_acc = resume_model(model, checkpoint)
elif model_name == 'RN':
    model_cnn = gcrHCN().to(device)
    model = RN(model_cnn,lstm_input_size=args.feature_dim,train_way=args.train_way,test_way=args.test_way,\
        shot=args.shot,query=args.query,query_val=args.query_val).to(device)
    start_epoch, best_acc = resume_model(model, checkpoint)
elif model_name == 'MN':
    model_cnn = gcrHCN().to(device)
    model = MN(model_cnn,lstm_input_size=args.feature_dim,train_way=args.train_way,test_way=args.test_way,\
        shot=args.shot,query=args.query,query_val=args.query_val).to(device)
Beispiel #5
0
 def __init__(self,out_dim, f_dim=1024):
     super(HCN_GEN,self).__init__()
     self.hcn = hcn(out_dim)
     self.gen = Hallucinator(f_dim)