Ejemplo n.º 1
0
	def validate(self):
		model.eval()
		with torch.no_grad():	
			iou_epoch = 0.
			iou_NN_epoch = 0.
			num_batches = 0
			loss_epoch = 0.

			# Validation loop
			for i, data in enumerate(tqdm(dataloader_val), 0):

				# data creation
				tgt_odms = data['odms'].to(args.device)
				tgt_voxels = data['voxels'].to(args.device)
				inp_voxels = down_sample(tgt_voxels)

				inp_odms = []
				for voxel in inp_voxels: 
					inp_odms.append(kal.rep.voxel.extract_odms(voxel).unsqueeze(0)) 
				inp_odms = torch.cat(inp_odms)

				tgt_odms_occ = to_occpumancy_map(tgt_odms)
				
				# inference 
				pred_odms = model(inp_odms)

				# losses 
				loss = loss_fn(pred_odms, tgt_odms_occ)
				loss_epoch += float(loss.item())

				ones = pred_odms > .5
				zeros = pred_odms <= .5
				pred_odms[ones] =  pred_odms.shape[-1]
				pred_odms[zeros] = 0 

				NN_pred = up_sample(inp_voxels)
				iou_NN = kal.metrics.voxel.iou(NN_pred.contiguous(), tgt_voxels)
				iou_NN_epoch += iou_NN
				
				pred_voxels = []
				for odms, voxel_NN in zip(pred_odms, NN_pred): 
					pred_voxels.append(kal.rep.voxel.project_odms(odms, voxel = voxel_NN, votes = 2).unsqueeze(0))
				pred_voxels = torch.cat(pred_voxels)
				iou = kal.metrics.voxel.iou(pred_voxels.contiguous(), tgt_voxels)
				iou_epoch += iou
				

				# logging
				num_batches += 1
				if i % args.print_every == 0:
						out_iou = iou_epoch.item() / float(num_batches)
						out_iou_NN = iou_NN_epoch.item() / float(num_batches)
						tqdm.write(f'[VAL] Epoch {self.cur_epoch:03d}, Batch {i:03d}: IoU: {out_iou}, Iou Base: {out_iou_NN}')
						
			out_iou = iou_epoch.item() / float(num_batches)
			out_iou_NN = iou_NN_epoch.item() / float(num_batches)
			tqdm.write(f'[VAL Total] Epoch {self.cur_epoch:03d}, Batch {i:03d}: IoU: {out_iou}, Iou Base: {out_iou_NN}')

			loss_epoch = loss_epoch / num_batches
			self.val_loss.append(out_iou)
Ejemplo n.º 2
0
    def validate(self):
        model.eval()
        with torch.no_grad():
            iou_epoch = 0.
            iou_NN_epoch = 0.
            num_batches = 0
            loss_epoch = 0.

            # Validation loop
            for i, sample in enumerate(tqdm(dataloader_val), 0):
                data = sample['data']
                # data creation
                tgt = data['128'].to(device)
                inp = data['32'].to(device)

                # inference
                pred = model(inp.unsqueeze(1))

                # losses
                tgt = tgt.long()
                loss = loss_fn(pred, tgt)
                loss_epoch += float(loss.item())

                pred = pred[:, 1, :, :]
                iou = kal.metrics.voxel.iou(pred.contiguous(), tgt)
                iou_epoch += iou

                NN_pred = up_sample(inp)
                iou_NN = kal.metrics.voxel.iou(NN_pred.contiguous(), tgt)
                iou_NN_epoch += iou_NN

                # logging
                num_batches += 1
                if i % args.print_every == 0:
                    out_iou = iou_epoch.item() / float(num_batches)
                    out_iou_NN = iou_NN_epoch.item() / float(num_batches)
                    tqdm.write(
                        f'[VAL] Epoch {self.cur_epoch:03d}, Batch {i:03d}: IoU: {out_iou}, Iou Base: {out_iou_NN}'
                    )

            out_iou = iou_epoch.item() / float(num_batches)
            out_iou_NN = iou_NN_epoch.item() / float(num_batches)
            tqdm.write(
                f'[VAL Total] Epoch {self.cur_epoch:03d}, Batch {i:03d}: IoU: {out_iou}, Iou Base: {out_iou_NN}'
            )

            loss_epoch = loss_epoch / num_batches
            self.val_loss.append(out_iou)
Ejemplo n.º 3
0
iou_NN_epoch = 0.
num_batches = 0

model.eval()
with torch.no_grad():
	for data in tqdm(dataloader_val): 
		tgt = data['data'].to(args.device)
		inp = down_sample(tgt)

		# inference 
		pred = model(inp)

		iou = kal.metrics.voxel.iou(pred[:,1,:,:].contiguous(), tgt)
		iou_epoch += iou
		
		NN_pred = up_sample(inp)
		iou_NN = kal.metrics.voxel.iou(NN_pred.contiguous(), tgt)
		iou_NN_epoch += iou_NN

		if args.vis: 
			for i in range(pred.shape[0]):
				print ('Rendering low resolution input')
				kal.visualize.show_voxel(inp[i,0], mode = 'exact', thresh = .5)
				print ('Rendering high resolution target')
				kal.visualize.show_voxel(tgt[i], mode = 'exact', thresh = .5)
				print ('Rendering high resolution prediction')
				kal.visualize.show_voxel(pred[i,1], mode = 'exact', thresh = .5)
				print('----------------------')
		num_batches += 1. 

out_iou_NN = iou_NN_epoch.item() / float(num_batches)
Ejemplo n.º 4
0
iou_NN_epoch = 0.
num_batches = 0

model.eval()
with torch.no_grad():
    for data in tqdm(dataloader_val):

        tgt_odms = data['odms_128'].to(args.device)
        tgt_voxels = data['voxels_128'].to(args.device)
        inp_odms = data['odms_32'].to(args.device)
        inp_voxels = data['voxels_32'].to(args.device)

        # inference
        pred_odms = model(inp_odms) * 128

        NN_pred = up_sample(inp_voxels)
        iou_NN = kal.metrics.voxel.iou(NN_pred.contiguous(), tgt_voxels)
        iou_NN_epoch += iou_NN

        pred_odms = pred_odms.int()
        pred_voxels = []
        for odms, NN_odms in zip(pred_odms, NN_pred):
            pred_voxels.append(
                kal.rep.voxel.project_odms(odms, voxel=NN_odms,
                                           votes=2).unsqueeze(0))
        pred_voxels = torch.cat(pred_voxels)

        iou = kal.metrics.voxel.iou(pred_voxels.contiguous(), tgt_voxels)
        iou_epoch += iou

        if args.vis:
Ejemplo n.º 5
0
    def validate(self):
        model.eval()
        with torch.no_grad():
            iou_epoch = 0.
            iou_NN_epoch = 0.
            num_batches = 0
            loss_epoch = 0.

            # Validation loop
            for i, data in enumerate(tqdm(dataloader_val), 0):

                # data creation
                tgt_odms = data['odms_128'].to(args.device)
                tgt_voxels = data['voxels_128'].to(args.device)
                inp_odms = data['odms_32'].to(args.device)
                inp_voxels = data['voxels_32'].to(args.device)

                # inference
                initial_odms = upsample_omd(inp_odms) * 4
                distance = 128 - initial_odms
                pred_odms_update = model(inp_odms)
                pred_odms_update = pred_odms_update * distance
                pred_odms = initial_odms + pred_odms_update

                # losses
                loss = loss_fn(pred_odms, tgt_odms)
                loss_epoch += float(loss.item())

                NN_pred = up_sample(inp_voxels)
                iou_NN = kal.metrics.voxel.iou(NN_pred.contiguous(),
                                               tgt_voxels)
                iou_NN_epoch += iou_NN

                pred_voxels = []
                pred_odms = pred_odms.int()
                for odms, voxel_NN in zip(pred_odms, NN_pred):
                    pred_voxels.append(
                        kal.rep.voxel.project_odms(odms, voxel_NN,
                                                   votes=2).unsqueeze(0))
                pred_voxels = torch.cat(pred_voxels)
                iou = kal.metrics.voxel.iou(pred_voxels.contiguous(),
                                            tgt_voxels)
                iou_epoch += iou

                # logging
                num_batches += 1
                if i % args.print_every == 0:
                    out_iou = iou_epoch.item() / float(num_batches)
                    out_iou_NN = iou_NN_epoch.item() / float(num_batches)
                    tqdm.write(
                        f'[VAL] Epoch {self.cur_epoch:03d}, Batch {i:03d}: IoU: {out_iou}, Iou Base: {out_iou_NN}'
                    )

            out_iou = iou_epoch.item() / float(num_batches)
            out_iou_NN = iou_NN_epoch.item() / float(num_batches)
            tqdm.write(
                f'[VAL Total] Epoch {self.cur_epoch:03d}, Batch {i:03d}: IoU: {out_iou}, Iou Base: {out_iou_NN}'
            )

            loss_epoch = loss_epoch / num_batches
            self.val_loss.append(out_iou)
Ejemplo n.º 6
0
from model import FFTSR
import tensorflow as tf
import numpy as np
import cv2
from utils import fft, bicubic, up_sample, imshow, ifft, imshow_spectrum
from matplotlib import pyplot as plt

if __name__ == '__main__':
    img = 'images_train/butterfly.bmp'
    # img = cv2.imread(img,cv2.IMREAD_GRAYSCALE)
    img = cv2.imread(img)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2YCR_CB)

    print('img_shape ->', img.shape)  #Y cr cb

    # img = img.reshape([1,256,256,1])
    with tf.Session() as sess:
        hr_img = (img) / 255.0 * (1e3 * 1e-5)
        lr_img = (up_sample(bicubic(img))) / 255.0 * (1e3 * 1e-5)
        # imshow_spectrum(lr_img)

        fftsr = FFTSR(sess, 1e-4, 10000)

        # fftsr.build_model()
        # fftsr.run(hr_img,lr_img)
        fftsr.run(hr_img[:, :, 0], lr_img[:, :, 0])

        # out = fftsr.pred
        # print(out)