def demo_single(img1_path): resume_single = './model/' + args.resume_single model = single_UNet() model.eval() if os.path.exists(resume_single): model.load_state_dict( torch.load(resume_single, map_location=lambda storage, loc: storage)['model']) print('load trained model {} success'.format(resume_single)) else: print("Warning! no resume file to load\n") img1_tensor = read_img(img1_path) img1_tensor = torch.unsqueeze(img1_tensor, 0) img = Variable(img1_tensor) init = time.time() outputs = model(img) end = time.time() outputs = prob(outputs) _, occupy = torch.max(outputs.data, dim=1) # occupy=(outputs.data[:1]>0.5) # print (occupy) occupy = occupy.view(-1, 64, 64, 64) occupy = torch.squeeze(occupy, dim=0).numpy() with open('0_0.binvox', 'rb') as f: # m1 = read_as_coord_array(f) m1 = read_as_orginal_coord(f) m1.data = occupy with open('result.binvox', 'wb') as f1: write(m1, f1) ## write the result 64x64x64 data to .binvox file if args.gt is not None: eval_model()
data_rootpath=args.data resume='./model/'+args.resume ## args.resume: just the name of checkpoint file logfile=args.data_name+'_single_train.txt' print ('logfile name:{}'.format(logfile)) if is_GPU: torch.cuda.set_device(args.gpu) dataset=singleDataset(data_rootpath,data_name=args.data_name) data_loader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size, shuffle=True, collate_fn=single_collate) model=single_UNet() #model=singleNet_deeper() if is_GPU: model.cuda() critenrion=CrossEntropy_loss() #critenrion=VoxelL1() optimizer=torch.optim.Adam(model.parameters(),lr=args.lr,betas=(0.5,0.999)) current_best_IOU=0 def save_checkpoint(epoch,model,optimizer): global current_best_IOU torch.save({ 'model': model.state_dict(),