Esempio n. 1
0
    def train(self, input, real_val):
        self.model.train()
        self.optimizer.zero_grad()
        input = nn.functional.pad(input, (1, 0, 0, 0))
        output = self.model(input).transpose(
            1, 3)  # now, output = [batch_size,1,num_nodes, seq_length]
        predict = self.scaler.inverse_transform(output)
        assert predict.shape[1] == 1
        mae, mape, rmse = util.calc_metrics(predict.squeeze(1),
                                            real_val,
                                            null_val=0.0)

        if self.fp16:
            from apex import amp
            with amp.scale_loss(mae, self.optimizer) as scaled_loss:
                scaled_loss.backward()
            torch.nn.utils.clip_grad_norm_(amp.master_params(self.optimizer),
                                           self.clip)
        else:
            mae.backward()
            if self.clip is not None:
                torch.nn.utils.clip_grad_norm_(self.model.parameters(),
                                               self.clip)
        self.optimizer.step()
        return mae.item(), mape.item(), rmse.item()
Esempio n. 2
0
 def eval(self, input, real_val):
     self.model.eval()
     input = nn.functional.pad(input,(1,0,0,0))
     output = self.model(input).transpose(1,3) #  [batch_size,seq_length,num_nodes,1]
     real = torch.unsqueeze(real_val,dim=1)
     predict = self.scaler.inverse_transform(output)
     predict = torch.clamp(predict, min=0., max=70.)
     mae, mape, rmse = [x.item() for x in util.calc_metrics(predict, real, null_val=0.0)]
     return mae, mape, rmse
Esempio n. 3
0
def start(params):
    report_name = datetime.datetime.now().__str__() if params["name"] is None else params["name"]
    directory = os.path.join(os.getcwd(), 'reports', report_name)
    os.mkdir(directory)
    print("Saving run to directory ", directory)
    util.write_json(params, os.path.join(directory, 'params.json'))
    simulation_statistics = {'tp': 0,
                             'tn': 0,
                             'fp': 0,
                             'fn': 0,
                             'infeasible_times': 0}
    print("Metrics")
    util.calc_metrics(params["D"], params["d"], params["B"], params["N"])
    for run in range(params["runs"]):
        print("\n\nSimulation run ", run)

        infection_samples = util.draw_samples(params["N"], params["f"])
        print("Infection samples vector", sum(infection_samples))
        print("Indices of positive samples", [i for i, x in enumerate(infection_samples) if x])

        labels = util.assign_labels(params["N"], params["B"], params["D"])
        print("Labels", len(labels), labels)

        labels_in_digits = label_conversion.convert_labels_to_digits(labels, params["B"], params["D"])
        membership_matrix_global, psd_global = get_membership_matrix(labels_in_digits, params["D"], params["d"], params["B"])
        result = perform_testing_of_pools(infection_samples, membership_matrix_global, params["eps_fp"], params["eps_fn"])

        # Recover individual samples
        solution, status = solve_mip(params["N"], membership_matrix_global.get_matrix(), result, result.shape[0],
                                     params["eps_fp"], params["eps_fn"], params["f"])

        comparison_array = np.column_stack((infection_samples, solution, labels_in_digits))
        np.savetxt(os.path.join(directory, str(run)), comparison_array, fmt='%.18e %.18e %d %d %d %d')

        simulation_statistics['tp'] += np.count_nonzero(np.logical_and(infection_samples == True, solution == True))
        simulation_statistics['tn'] += np.count_nonzero(np.logical_and(infection_samples == False, solution == False))
        simulation_statistics['fp'] += np.count_nonzero(np.logical_and(infection_samples == False, solution == True))
        simulation_statistics['fn'] += np.count_nonzero(np.logical_and(infection_samples == True, solution == False))
        if status != OptimizationStatus.OPTIMAL and status != OptimizationStatus.FEASIBLE:
            simulation_statistics['infeasible_times'] += 1
    # Print results
    simulation_statistics = util.calculate_stats(simulation_statistics)
    print("Stats: ", simulation_statistics)
    util.write_json(simulation_statistics, os.path.join(directory, 'stats.json'))
Esempio n. 4
0
def main():
    scale = 2
    image1_dir = os.path.join(
        os.getcwd(),
        '/home/ilaopis/桌面/RCAN-master/RCAN_TestCode/SR/BI/RCAN/Set5/Hx2')
    image2_dir = os.path.join(
        os.getcwd(),
        '/home/ilaopis/桌面/RCAN-master/RCAN_TestCode/SR/BI/RCAN/Set5/x2')
    total_psnr = []
    total_ssim = []
    total_ssim_RGB = []
    total_lpips = []

    path_list = sorted(glob.glob('{}/*'.format(image1_dir)))
    for image_path1 in path_list:
        _name = image_path1.split("/")
        _name[-2] = "x2"
        image_path2 = "/".join(_name)
        hr = pil_image.open(image_path1).convert('RGB')
        sr = pil_image.open(image_path2).convert('RGB')

        hr = np.array(hr).astype(np.float32)
        sr = np.array(sr).astype(np.float32)
        print(sr.shape)

        # calculate PSNR/SSIM metrics on Python
        psnr, ssim = util.calc_metrics(hr, sr, crop_border=scale)
        sr = np.transpose(sr, (1, 2, 0))
        hr = np.transpose(hr, (1, 2, 0))
        sr = torch.tensor(sr)
        hr = torch.tensor(hr)
        ssim_RGB = pytorch_ssim.ssim(sr / 255, hr / 255).item()
        # ssim_single = 0
        #lpips = ps_loss((sr / 255 * 2 - 1),(hr / 255 * 2 - 1)).item()

        total_psnr.append(psnr)
        total_ssim.append(ssim)

        total_ssim_RGB.append(ssim_RGB)
        #total_lpips.append(lpips)

        #print("PSNR(dB)/SSIM/SSIM_RGB: %.2f/%.4f/%.4f/%.4f." %(psnr, ssim,ssim_RGB,lpips))
        print("PSNR(dB)/SSIM/SSIM_RGB: %.2f/%.4f/%.4f." %
              (psnr, ssim, ssim_RGB))

    print("PSNR: %.2f      SSIM: %.4f     " %
          (sum(total_psnr) / len(total_psnr), sum(total_ssim) /
           len(total_ssim), sum(total_ssim_RGB) / len(total_ssim_RGB)))

    print("==================================================")
    print("===> Finished !")
Esempio n. 5
0
 def train(self, input, real_val):
     self.model.train()
     self.optimizer.zero_grad()
     input = nn.functional.pad(input, (1, 0, 0, 0))
     output = self.model(input).transpose(
         1, 3)  # now, output = [batch_size,1,num_nodes, seq_length]
     predict = self.scaler.inverse_transform(output)
     assert predict.shape[1] == 1
     mae, mape, rmse = util.calc_metrics(predict.squeeze(1),
                                         real_val,
                                         null_val=0.0)
     print('MAPE', mape.item())
     mae.backward()
     if self.clip is not None:
         torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.clip)
     self.optimizer.step()
     return mae.item(), mape.item(), rmse.item()
Esempio n. 6
0
def main(config):
	device = torch.device(config['device'])

	##### Setup Dirs #####
	experiment_dir = config['path']['experiments'] + config['name']
	util.mkdir_and_rename(
                experiment_dir)  # rename experiment folder if exists
	util.mkdirs((experiment_dir+'/sr_images', experiment_dir+'/lr_images'))

	##### Setup Logger #####
	logger = util.Logger('test', experiment_dir, 'test_' + config['name'])

	##### print Experiment Config
	logger.log(util.dict2str(config))
	
	###### Load Dataset #####
	testing_data_loader = dataset.get_test_sets(config['dataset'], logger)

	trainer = create_model(config, logger)
	trainer.print_network_params(logger)

	total_avg_psnr = 0.0
	total_avg_ssim = 0.0

	for name, test_set in testing_data_loader.items():
		logger.log('Testing Dataset {:s}'.format(name))
		valid_start_time = time.time()
		avg_psnr = 0.0
		avg_ssim = 0.0
		idx = 0
		for i, batch in enumerate(test_set):
			idx += 1
			img_name = batch[2][0][batch[2][0].rindex('/')+1:]
			# print(img_name)
			img_name = img_name[:img_name.index('.')]
			img_dir_sr = experiment_dir+'/sr_images'
			img_dir_lr = experiment_dir+'/lr_images'
			util.mkdir(img_dir_sr)
			infer_time = trainer.test(batch)
			visuals = trainer.get_current_visuals()
			lr_img = util.tensor2img(visuals['LR'])
			sr_img = util.tensor2img(visuals['SR'])  # uint8
			gt_img = util.tensor2img(visuals['HR'])  # uint8
			save_sr_img_path = os.path.join(img_dir_sr, '{:s}.png'.format(img_name))
			save_lr_img_path = os.path.join(img_dir_lr, '{:s}.png'.format(img_name))
			util.save_img(lr_img, save_lr_img_path)
			util.save_img(sr_img, save_sr_img_path)
			crop_size = config['dataset']['scale']
			psnr, ssim = util.calc_metrics(sr_img, gt_img, crop_size)
			#logger.log('[ Image: {:s}  PSNR: {:.4f} SSIM: {:.4f} Inference Time: {:.8f}]'.format(img_name, psnr, ssim, infer_time))
			avg_psnr += psnr
			avg_ssim += ssim
		avg_psnr = avg_psnr / idx
		avg_ssim = avg_ssim / idx
		valid_t = time.time() - valid_start_time
		logger.log('[ Set: {:s} Time:{:.3f}] PSNR: {:.2f} SSIM {:.4f}'.format(name, valid_t, avg_psnr, avg_ssim))
		
		iter_start_time = time.time()

		total_avg_ssim += avg_ssim
		total_avg_psnr += avg_psnr

	total_avg_ssim /= len(testing_data_loader)
	total_avg_psnr /= len(testing_data_loader)
	
	logger.log('[ Total Average of Sets: PSNR: {:.2f} SSIM {:.4f}'.format(total_avg_psnr, total_avg_ssim))