def main(): cudnn.benchmark = True base_path = '/NSL/data/images/HyperspectralImages/ICVL/' # Dataset val_data = DatasetFromHdf5(base_path + '/testclean_si50_st80.h5') print(len(val_data)) # Data Loader (Input Pipeline) val_loader = DataLoader(dataset=val_data, num_workers=1, batch_size=1, shuffle=False, pin_memory=True) # Model model_path = base_path + 'hscnn_5layer_dim10_93.pkl' result_path = base_path + '/test_results/' var_name = 'rad' save_point = torch.load(model_path) model_param = save_point['state_dict'] model = resblock(conv_relu_res_relu_block, 16, 3, 31) model = nn.DataParallel(model) model.load_state_dict(model_param) model = model.cuda() model.eval() model_path = base_path if not os.path.exists(model_path): os.makedirs(model_path) loss_csv = open(os.path.join(model_path, 'loss.csv'), 'w+') log_dir = os.path.join(model_path, 'train.log') logger = initialize_logger(log_dir) test_loss = validate(val_loader, model, rrmse_loss) print("Test Loss: %.9f " % (test_loss)) # save loss record_loss(loss_csv, test_loss)
import os import numpy as np from imageio import imread from resblock import resblock,conv_relu_res_relu_block from utils import save_matv73,reconstruction model_path = './models/res_jpg_n14.pkl' img_path = './test_imgs/' result_path = './test_results3/' var_name = 'rad' save_point = torch.load(model_path) model_param = save_point['state_dict'] model = resblock(conv_relu_res_relu_block,14,3,31) model.load_state_dict(model_param) model = model.cuda() model.eval() for img_name in sorted(os.listdir(img_path)): print (img_name) img_path_name = os.path.join(img_path, img_name) rgb = imread(img_path_name) rgb = rgb/255 rgb = np.expand_dims(np.transpose(rgb,[2,1,0]), axis=0).copy() img_res1 = reconstruction(rgb,model)
def main(): cudnn.benchmark = True # Dataset train_data = DatasetFromHdf5('./Data/train_Material_.h5') print(len(train_data)) val_data = DatasetFromHdf5('./Data/valid_Material_.h5') print(len(val_data)) # Data Loader (Input Pipeline) train_data_loader = DataLoader(dataset=train_data, num_workers=1, batch_size=64, shuffle=True, pin_memory=True) val_loader = DataLoader(dataset=val_data, num_workers=1, batch_size=1, shuffle=False, pin_memory=True) # Model model = resblock(conv_bn_relu_res_block, 10, 25, 25) if torch.cuda.device_count() > 1: model = nn.DataParallel(model) if torch.cuda.is_available(): model.cuda() # Parameters, Loss and Optimizer start_epoch = 0 end_epoch = 100 init_lr = 0.0001 iteration = 0 record_test_loss = 1000 # criterion_RRMSE = torch.nn.L1Loss() criterion_RRMSE = rrmse_loss criterion_Angle = Angle_Loss criterion_MSE = torch.nn.MSELoss() criterion_SSIM = pytorch_msssim.SSIM() # criterion_Div = Divergence_Loss criterion_Div = torch.nn.KLDivLoss() optimizer = torch.optim.Adam(model.parameters(), lr=init_lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=0.01) model_path = './models/' if not os.path.exists(model_path): os.makedirs(model_path) loss_csv = open(os.path.join(model_path, 'loss_material.csv'), 'w+') log_dir = os.path.join(model_path, 'train_material.log') logger = initialize_logger(log_dir) # Resume resume_file = '' if resume_file: if os.path.isfile(resume_file): print("=> loading checkpoint '{}'".format(resume_file)) checkpoint = torch.load(resume_file) start_epoch = checkpoint['epoch'] iteration = checkpoint['iter'] model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) for epoch in range(start_epoch + 1, end_epoch): start_time = time.time() train_loss, iteration, lr = train(train_data_loader, model, criterion_MSE, criterion_RRMSE, criterion_Angle, criterion_SSIM, criterion_Div, optimizer, iteration, init_lr, end_epoch, epoch) test_loss, loss_angle, loss_reconstruct, loss_SSIM, loss_Div = validate( val_loader, model, criterion_MSE, criterion_RRMSE, criterion_Angle, criterion_SSIM, criterion_Div) # xxx_loss = validate_save(val_loader, model, criterion_MSE, criterion_RRMSE, epoch) save_checkpoint_material(model_path, epoch, iteration, model, optimizer) # print loss end_time = time.time() epoch_time = end_time - start_time print( "Epoch [%d], Iter[%d], Time:%.9f, learning rate : %.9f, Train Loss: %.9f Test Loss: %.9f , Angle Loss: %.9f, Recon Loss: %.9f, SSIM Loss: %.9f , Div Loss: %.9f" % (epoch, iteration, epoch_time, lr, train_loss, test_loss, loss_angle, loss_reconstruct, loss_SSIM, loss_Div)) # save loss record_loss(loss_csv, epoch, iteration, epoch_time, lr, train_loss, test_loss) logger.info( "Epoch [%d], Iter[%d], Time:%.9f, learning rate : %.9f, Train Loss: %.9f Test Loss: %.9f, Angle Loss: %.9f, Recon Loss: %.9f, SSIM Loss: %.9f, Div Loss: %.9f " % (epoch, iteration, epoch_time, lr, train_loss, test_loss, loss_angle, loss_reconstruct, loss_SSIM, loss_Div))
def main(): cudnn.benchmark = True base_path = '/NSL/data/images/HyperspectralImages/ICVL/' # Dataset train_data = DatasetFromHdf5(base_path + '/train.h5') print(len(train_data)) val_data = DatasetFromHdf5(base_path + '/valid.h5') print(len(val_data)) # Data Loader (Input Pipeline) train_data_loader = DataLoader(dataset=train_data, num_workers=1, batch_size=64, shuffle=True, pin_memory=True) val_loader = DataLoader(dataset=val_data, num_workers=1, batch_size=1, shuffle=False, pin_memory=True) # Model model = resblock(conv_batch_relu_res_block, 16, 3, 31) if torch.cuda.device_count() > 1: model = nn.DataParallel(model) if torch.cuda.is_available(): model.cuda() # Parameters, Loss and Optimizer start_epoch = 0 end_epoch = 1000 init_lr = 0.0002 iteration = 0 record_test_loss = 1000 criterion = rrmse_loss optimizer = torch.optim.Adam(model.parameters(), lr=init_lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=0) model_path = base_path + '/models/' if not os.path.exists(model_path): os.makedirs(model_path) loss_csv = open(os.path.join(model_path, 'loss.csv'), 'w+') log_dir = os.path.join(model_path, 'train.log') logger = initialize_logger(log_dir) # Resume resume_file = '' if resume_file: if os.path.isfile(resume_file): print("=> loading checkpoint '{}'".format(resume_file)) checkpoint = torch.load(resume_file) start_epoch = checkpoint['epoch'] iteration = checkpoint['iter'] model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) for epoch in range(start_epoch + 1, end_epoch): print("epoch [%d]" % (epoch)) start_time = time.time() train_loss, iteration, lr = train(train_data_loader, model, criterion, optimizer, iteration, init_lr, end_epoch) print("train done! epoch [%d]" % (epoch)) test_loss = validate(val_loader, model, criterion) print("test done! epoch [%d]" % (epoch)) # Save model if test_loss < record_test_loss: record_test_loss = test_loss save_checkpoint(model_path, epoch, iteration, model, optimizer) # print loss end_time = time.time() epoch_time = end_time - start_time print( "Epoch [%d], Iter[%d], Time:%.9f, learning rate : %.9f, Train Loss: %.9f Test Loss: %.9f " % (epoch, iteration, epoch_time, lr, train_loss, test_loss)) # save loss record_loss(loss_csv, epoch, iteration, epoch_time, lr, train_loss, test_loss) logger.info( "Epoch [%d], Iter[%d], Time:%.9f, learning rate : %.9f, Train Loss: %.9f Test Loss: %.9f " % (epoch, iteration, epoch_time, lr, train_loss, test_loss))
import numpy as np from imageio import imread import PIL.Image from resblock import resblock, conv_bn_relu_res_block from utils import save_matv73, reconstruction, load_mat, mrae, rmse model_path = './models/HS_veins_34to1band.pkl' img_path = '../dataset/veins_t34bands/test_data/rgb/' result_path = '../dataset/veins_t34bands/test_data/inference/' gt_path = '../dataset/veins_t34bands/test_data/mat/' var_name = 'rad' save_point = torch.load(model_path) model_param = save_point['state_dict'] model = resblock(conv_bn_relu_res_block, 10, 3, 1) model.load_state_dict(model_param) model = model.cuda() model.eval() for img_name in sorted(os.listdir(img_path)): img_path_name = os.path.join(img_path, img_name) rgb = imread(img_path_name) rgb = rgb / 255 rgb = np.expand_dims(np.transpose(rgb, [2, 1, 0]), axis=0).copy() img_res1 = reconstruction(rgb, model) img_res2 = np.flip(reconstruction(np.flip(rgb, 2).copy(), model), 1) img_res3 = (img_res1 + img_res2) / 2
from resblock import resblock, conv_bn_relu_res_block from utils import save_matv73, reconstruction, load_mat, mrae, rmse, Angle_Loss #98 model_path = './models/HS_Material_Model.pkl' img_path = './Data/Material/test_inputs/' result_path = './Data/Material/test_results/' if not os.path.exists(result_path): os.makedirs(result_path) gt_path = './Data/Material/test_labels/' var_name = 'CompData' save_point = torch.load(model_path) model_param = save_point['state_dict'] model = resblock(conv_bn_relu_res_block, 10, 25, 25) model.load_state_dict(model_param) model = model.cuda() model.eval() criterion_Angle = Angle_Loss # criterion_MSE = torch.nn.MSELoss() # criterion_SSIM = pytorch_msssim.SSIM() Loss_SID = 0 ANG_loss = 0 Total_MRAE = 0 Total_RRMSE = 0
def main(): #https://drive.google.com/file/d/1QxQxf2dzfSbvCgWlI9VuxyBgfmQyCmfE/view?usp=sharing - train data #https://drive.google.com/file/d/11INkjd_ajT-RSCSFqfB7reLI6_m1jCAC/view?usp=sharing - val data #https://drive.google.com/file/d/1m0EZaRjla2o_eL3hOd7UMkSwoME5mF4A/view?usp=sharing - extra val data cudnn.benchmark = True # train_data = DatasetFromHdf5('C:/Users/alawy/Desktop/Training/Training-shadesofgrey/train_tbands.h5') train_data = DatasetFromHdf5('/storage/train_cropped14.h5') print(len(train_data)) val_data_extra = DatasetFromHdf5('/storage/valid_extra99.h5') val_data = DatasetFromHdf5('/storage/valid_cropped89.h5') new_val=[] new_val.append(val_data) new_val.append(val_data_extra) print(len(new_val)) print('con') val_new = data.ConcatDataset(new_val) print(len(val_new)) # Data Loader (Input Pipeline) train_data_loader = DataLoader(dataset=train_data, num_workers=4, batch_size=512, shuffle=True, pin_memory=True) val_loader = DataLoader(dataset=val_new, num_workers=1, batch_size=1, shuffle=False, pin_memory=True) # Dataset # torch.set_num_threads(12) # Model model = resblock(conv_relu_res_relu_block, 16, 3, 25) if torch.cuda.device_count() > 1: model = nn.DataParallel(model) if torch.cuda.is_available(): model = model.to('cuda') # Parameters, Loss and Optimizer start_epoch = 0 end_epoch = 1000 init_lr = 0.0002 iteration = 0 record_test_loss = 1000 criterion = rrmse_loss #optimizer=torch.optim.AdamW(model.parameters(), lr=init_lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=0) optimizer=torch.optim.Adam(model.parameters(), lr=init_lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=0.01) # model_path = '/storage/models-crop/' model_path = './models-crop/' if not os.path.exists(model_path): os.makedirs(model_path) loss_csv = open(os.path.join(model_path,'loss.csv'), 'w+') log_dir = os.path.join(model_path,'train.log') logger = initialize_logger(log_dir) # Resume resume_file = '' #resume_file = '/storage/notebooks/r9h1kyhq8oth90j/models/hscnn_5layer_dim10_69.pkl' #resume_file = '/storage/notebooks/r9h1kyhq8oth90j/models-crop/hscnn_5layer_dim10_95.pkl' if resume_file: if os.path.isfile(resume_file): print("=> loading checkpoint '{}'".format(resume_file)) checkpoint = torch.load(resume_file) start_epoch = checkpoint['epoch'] iteration = checkpoint['iter'] model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) for epoch in range(start_epoch+1, end_epoch): start_time = time.time() train_loss, iteration, lr = train(train_data_loader, model, criterion, optimizer, iteration, init_lr, end_epoch) test_loss = validate(val_loader, model, criterion) # Save model if test_loss < record_test_loss: record_test_loss = test_loss save_checkpoint(model_path, epoch, iteration, model, optimizer) else: save_checkpoint(model_path, epoch, iteration, model, optimizer) # print loss end_time = time.time() epoch_time = end_time - start_time print ("Epoch [%d], Iter[%d], Time:%.9f, learning rate : %.9f, Train Loss: %.9f Test Loss: %.9f " %(epoch, iteration, epoch_time, lr, train_loss, test_loss)) # save loss record_loss(loss_csv,epoch, iteration, epoch_time, lr, train_loss, test_loss) logger.info("Epoch [%d], Iter[%d], Time:%.9f, learning rate : %.9f, Train Loss: %.9f Test Loss: %.9f " %(epoch, iteration, epoch_time, lr, train_loss, test_loss)) gc.collect()