Esempio n. 1
0
import os
import numpy as np
import h5py
from utils import save_matv73

mat_path1 = './test_results1'
mat_path2 = './test_results2'
mat_path3 = './test_results3'
save_path = './final_results'

for mat_name in sorted(os.listdir(mat_path1)):
    print(mat_name)
    mat_path_name1 = os.path.join(mat_path1, mat_name)
    hf1 = h5py.File(mat_path_name1)
    data1 = hf1.get('rad')
    res1 = np.transpose(np.array(data1), [2, 1, 0])

    mat_path_name2 = os.path.join(mat_path2, mat_name)
    hf2 = h5py.File(mat_path_name2)
    data2 = hf2.get('rad')
    res2 = np.transpose(np.array(data2), [2, 1, 0])

    mat_path_name3 = os.path.join(mat_path3, mat_name)
    hf3 = h5py.File(mat_path_name3)
    data3 = hf3.get('rad')
    res3 = np.transpose(np.array(data3), [2, 1, 0])

    res = 0.4 * res1 + 0.3 * res2 + 0.3 * res3
    mat_dir = os.path.join(save_path, mat_name)
    save_matv73(mat_dir, 'rad', res)
Esempio n. 2
0
from utils import save_matv73, reconstruction

model_path = './models/res_jpg_n16_64.pkl'
img_path = './test_imgs/'
result_path = './test_results1/'
var_name = 'rad'

save_point = torch.load(model_path)
model_param = save_point['state_dict']
model = resblock(conv_relu_res_relu_block, 16, 3, 31)
model.load_state_dict(model_param)

model = model.cuda()
model.eval()

for img_name in sorted(os.listdir(img_path)):
    print(img_name)
    img_path_name = os.path.join(img_path, img_name)
    rgb = imread(img_path_name)
    rgb = rgb / 255
    rgb = np.expand_dims(np.transpose(rgb, [2, 1, 0]), axis=0).copy()

    img_res1 = reconstruction(rgb, model)
    img_res2 = np.flip(reconstruction(np.flip(rgb, 2).copy(), model), 1)
    img_res3 = (img_res1 + img_res2) / 2

    mat_name = img_name[:-11] + '.mat'
    mat_dir = os.path.join(result_path, mat_name)

    save_matv73(mat_dir, var_name, img_res3)
Esempio n. 3
0
    criterion_Div = torch.nn.KLDivLoss()
    a = torch.log_softmax(output * 6.00, dim=1)
    b = torch.softmax(gt * 6.00, dim=1)

    a2 = torch.log_softmax(gt * 6.00, dim=1)
    b2 = torch.softmax(output * 6.00, dim=1)

    loss_Div = (criterion_Div(a, b) + criterion_Div(a2, b2))

    ANG_loss = ANG_loss + criterion_Angle(output * 6.00,
                                          gt * 6.00).item() / (512 * 512)

    output = np.transpose(np.squeeze(output.cpu().detach().numpy()))
    inputt = np.transpose(np.squeeze(inputt.cpu().detach().numpy()))
    gt = np.transpose(np.squeeze(gt.cpu().detach().numpy()))
    save_matv73(mat_dir, 'rad', output)

    Loss_SID = Loss_SID + loss_Div.item()
    mrae_error = mrae(output * 6.00, gt * 6.00)
    # rrmse_error = rmse(inputt, gt)

    Total_MRAE = Total_MRAE + mrae_error
    # Total_RRMSE = Total_RRMSE + rrmse_error

    print("[%s]" % (img_name))

print("#############################################")

print("Average SAM Loss is:")
print(ANG_loss / images_number)