def getDataloader(dataset,args): trainset = CSL_Isolated_Openpose('trainvaltest',is_aug=True) train_sampler = CategoriesSampler_train(trainset.label, 100, args.train_way, args.shot, args.query, args.n_base, args.n_reserve) train_loader = DataLoader(dataset=trainset, batch_sampler=train_sampler, num_workers=args.num_workers, pin_memory=True) valset = CSL_Isolated_Openpose('test') val_sampler = CategoriesSampler_val(valset.label, 100, args.test_way, args.shot, args.query_val) val_loader = DataLoader(dataset=valset, batch_sampler=val_sampler, num_workers=args.num_workers, pin_memory=True) return train_loader, val_loader
def getValloader(dataset,args): valset = CSL_Isolated_Openpose('test') val_sampler = CategoriesSampler_val(valset.label, 600, args.test_way, args.shot, args.query_val) val_loader = DataLoader(dataset=valset, batch_sampler=val_sampler, num_workers=args.num_workers, pin_memory=True) return val_loader
# Use specific gpus os.environ["CUDA_VISIBLE_DEVICES"]=device_list # Device setting device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Use writer to record writer = SummaryWriter(os.path.join(summary_name, time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))) best_prec1 = 0.0 start_epoch = 0 # Train with Transformer if __name__ == '__main__': # Load data trainset = CSL_Isolated_Openpose(skeleton_root=skeleton_root,list_file=train_file, length=length) devset = CSL_Isolated_Openpose(skeleton_root=skeleton_root,list_file=val_file, length=length) print("Dataset samples: {}".format(len(trainset)+len(devset))) trainloader = DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=8, pin_memory=True) testloader = DataLoader(devset, batch_size=batch_size, shuffle=False, num_workers=8, pin_memory=True) # Create model model = lstm(input_size=num_joints*2,hidden_size=512,hidden_dim=512, num_layers=3,dropout_rate=dropout,num_classes=num_class, bidirectional=True).to(device) if checkpoint is not None: start_epoch, best_prec1 = resume_model(model,checkpoint) # Run the model parallelly if torch.cuda.device_count() > 1: print("Using {} GPUs".format(torch.cuda.device_count())) model = nn.DataParallel(model)
# Get arguments args = Arguments() # Use specific gpus os.environ["CUDA_VISIBLE_DEVICES"]=device_list # Device setting device = torch.device("cuda" if torch.cuda.is_available() else "cpu") best_prec1 = 0.0 start_epoch = 0 # Train with Transformer if __name__ == '__main__': # Load data trainset = CSL_Isolated_Openpose(skeleton_root=skeleton_root,list_file=train_file, length=length,is_normalize=False) devset = CSL_Isolated_Openpose(skeleton_root=skeleton_root,list_file=val_file, length=length,is_normalize=False) print("Dataset samples: {}".format(len(trainset)+len(devset))) trainloader = DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=num_workers, pin_memory=True) testloader = DataLoader(devset, batch_size=batch_size, shuffle=False, num_workers=num_workers, pin_memory=True) # Create model model = VAE(num_class,dropout=dropout).to(device) if checkpoint is not None: start_epoch, best_prec1 = resume_model(model,checkpoint) # Run the model parallelly if torch.cuda.device_count() > 1: print("Using {} GPUs".format(torch.cuda.device_count())) model = nn.DataParallel(model) # Create loss criterion & optimizer criterion = nn.CrossEntropyLoss()
# Get args args = Arguments(shot, dataset) # Use specific gpus os.environ["CUDA_VISIBLE_DEVICES"] = device_list # Device setting device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Use writer to record writer = SummaryWriter( os.path.join( 'runs/hcn_gen', time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))) # Prepare dataset & dataloader trainset = CSL_Isolated_Openpose('trainvaltest') train_sampler = PretrainSampler(trainset.label, args.shot, args.n_base, batch_size) train_loader = DataLoader(dataset=trainset, batch_sampler=train_sampler, num_workers=num_workers, pin_memory=True) valset = CSL_Isolated_Openpose('trainvaltest') val_sampler = PretrainSampler(valset.label, args.shot, args.n_base, batch_size) val_loader = DataLoader(dataset=valset, batch_sampler=val_sampler, num_workers=num_workers, pin_memory=True) model = CNN_GEN(out_dim=args.num_class, f_dim=args.feature_dim).to(device) # Resume model if hcn_ckpt is not None: