예제 #1
0
	def validate(self):
		model.eval()
		with torch.no_grad():	
			iou_epoch = 0.
			iou_NN_epoch = 0.
			num_batches = 0
			loss_epoch = 0.

			# Validation loop
			for i, data in enumerate(tqdm(dataloader_val), 0):

				# data creation
				tgt_odms = data['odms'].to(args.device)
				tgt_voxels = data['voxels'].to(args.device)
				inp_voxels = down_sample(tgt_voxels)
				inp_odms = []
				for voxel in inp_voxels: 
					inp_odms.append(kal.rep.voxel.extract_odms(voxel).unsqueeze(0)) 
				inp_odms = torch.cat(inp_odms)
				
				# inference 
				initial_odms = upsample_omd(inp_odms)*2
				distance = 30 - initial_odms
				pred_odms_update = model(inp_odms)
				pred_odms_update = pred_odms_update * distance
				pred_odms = initial_odms + pred_odms_update

				# losses 
				loss = loss_fn(pred_odms, tgt_odms)
				loss_epoch += float(loss.item())

				NN_pred = up_sample(inp_voxels)
				iou_NN = kal.metrics.voxel.iou(NN_pred.contiguous(), tgt_voxels)
				iou_NN_epoch += iou_NN

				pred_odms = pred_odms.int()
				pred_voxels = []
				for odms, voxel_NN in zip(pred_odms, NN_pred): 
					pred_voxels.append(kal.rep.voxel.project_odms(odms,voxel =voxel_NN, votes = 2).unsqueeze(0))
				pred_voxels = torch.cat(pred_voxels)
				iou = kal.metrics.voxel.iou(pred_voxels.contiguous(), tgt_voxels)
				iou_epoch += iou
				

				# logging
				num_batches += 1
				if i % args.print_every == 0:
						out_iou = iou_epoch.item() / float(num_batches)
						out_iou_NN = iou_NN_epoch.item() / float(num_batches)
						tqdm.write(f'[VAL] Epoch {self.cur_epoch:03d}, Batch {i:03d}: IoU: {out_iou}, Iou Base: {out_iou_NN}')
						
			out_iou = iou_epoch.item() / float(num_batches)
			out_iou_NN = iou_NN_epoch.item() / float(num_batches)
			tqdm.write(f'[VAL Total] Epoch {self.cur_epoch:03d}, Batch {i:03d}: IoU: {out_iou}, Iou Base: {out_iou_NN}')

			loss_epoch = loss_epoch / num_batches
			self.val_loss.append(out_iou)
예제 #2
0
	def train(self):
		loss_epoch = 0.
		num_batches = 0
		diff = 0 
		model.train()
		# Train loop
		for i, data in enumerate(tqdm(dataloader_train), 0):
			optimizer.zero_grad()
			
			# data creation
			tgt_odms = data['odms'].to(args.device)
			tgt_voxels = data['voxels'].to(args.device)
			inp_voxels = down_sample(tgt_voxels)
			inp_odms = []
			for voxel in inp_voxels: 
				inp_odms.append(kal.rep.voxel.extract_odms(voxel).unsqueeze(0)) 
			inp_odms = torch.cat(inp_odms)
			
			# inference 
			initial_odms = upsample_omd(inp_odms)*2
			distance = 30 - initial_odms
			pred_odms_update = model(inp_odms)
			pred_odms_update = pred_odms_update * distance
			pred_odms = initial_odms + pred_odms_update
			
			pred_odms = initial_odms + pred_odms_update

			# losses 
			loss = loss_fn(pred_odms, tgt_odms)
			loss.backward()
			loss_epoch += float(loss.item())

			# logging
			num_batches += 1
			if i % args.print_every == 0:
				tqdm.write(f'[TRAIN] Epoch {self.cur_epoch:03d}, Batch {i:03d}: Loss: {float(loss.item())}')
				
			optimizer.step()
		
		
		loss_epoch = loss_epoch / num_batches
		self.train_loss.append(loss_epoch)
		self.cur_epoch += 1
예제 #3
0
    def train(self):
        loss_epoch = 0.
        num_batches = 0
        diff = 0
        model.train()
        # Train loop
        for i, data in enumerate(tqdm(dataloader_train), 0):
            optimizer.zero_grad()

            # data creation

            tgt_odms = data['odms_128'].to(args.device)
            inp_odms = data['odms_32'].to(args.device)

            # inference
            initial_odms = upsample_omd(inp_odms) * 4
            distance = 128 - initial_odms
            pred_odms_update = model(inp_odms)
            pred_odms_update = pred_odms_update * distance
            pred_odms = initial_odms + pred_odms_update

            # losses
            loss = loss_fn(pred_odms, tgt_odms)
            loss.backward()
            loss_epoch += float(loss.item())

            # logging
            num_batches += 1
            if i % args.print_every == 0:
                tqdm.write(
                    f'[TRAIN] Epoch {self.cur_epoch:03d}, Batch {i:03d}: Loss: {float(loss.item())}'
                )

            optimizer.step()

        loss_epoch = loss_epoch / num_batches
        self.train_loss.append(loss_epoch)
        self.cur_epoch += 1
예제 #4
0
model_res.eval()
model_occ.eval()
with torch.no_grad():
    for data in tqdm(dataloader_val):

        tgt_odms = data['odms'].to(args.device)
        tgt_voxels = data['voxels'].to(args.device)
        inp_voxels = down_sample(tgt_voxels)
        inp_odms = []
        for voxel in inp_voxels:
            inp_odms.append(kal.rep.voxel.extract_odms(voxel).unsqueeze(0))
        inp_odms = torch.cat(inp_odms)

        # inference res
        initial_odms = upsample_omd(inp_odms) * 2
        distance = 30 - initial_odms
        pred_odms_update = model_res(inp_odms)
        pred_odms_update = pred_odms_update * distance
        pred_odms_res = initial_odms + pred_odms_update

        # inference occ
        pred_odms_occ = model_occ(inp_odms)

        # combine
        pred_odms_res = pred_odms_res.int()
        ones = pred_odms_occ > .5
        zeros = pred_odms_occ <= .5
        pred_odms_occ[ones] = pred_odms_occ.shape[-1]
        pred_odms_occ[zeros] = 0