def testImage(net,
              img,
              brdf_inNet,
              tid,
              pid,
              out_img=False,
              fixedChannal=[0] * 3):
    if (test_params['Ratio']):
        brdf_in = np.zeros((1, 2, 1, 1))
        brdf_in[0, 0, 0,
                0] = np.log(brdf_inNet[0, 0, 0, 0] / brdf_inNet[0, 1, 0, 0])
        brdf_in[0, 1, 0, 0] = brdf_inNet[0, 2, 0, 0]
    else:
        brdf_in = brdf_inNet

    net.blobs['Data_Image'].data[...] = img
    net.blobs['Data_BRDF'].data[...] = brdf_in
    net.forward()

    if (net.blobs.has_key('Out_Ratio')):
        brdf_ratio_predict = np.exp(net.blobs['Out_Ratio'].data.flatten()[0])
        brdf_roughness_predict = np.exp(
            net.blobs['Out_Roughness_Fix'].data.flatten()[0])
        brdf_predict = np.array([
            brdf_ratio_predict * brdf_inNet[0, 1, 0, 0],
            brdf_inNet[0, 1, 0, 0], brdf_roughness_predict
        ])
    else:
        brdf_predict = net.blobs['Out_LossFeature'].data.flatten()
        brdf_predict[2] = np.exp(brdf_predict[2])
        brdf_predict = np.maximum(0, brdf_predict)

    for cid, ch in enumerate(fixedChannal):
        if (ch == 1):
            brdf_predict[cid] = brdf.flatten()[cid]

    if (net.blobs.has_key('Out_Ratio')):
        loss_brdf = [
            net.blobs['RatioLoss'].data.flatten()[0],
            (0.5 * (brdf_predict[0] - brdf_inNet[0, 0, 0, 0]))**2,
            net.blobs['RoughnessLoss'].data.flatten()[0],
            net.blobs['RatioLoss'].data.flatten()[0] +
            net.blobs['RoughnessLoss'].data.flatten()[0]
        ]
    else:
        loss_brdf = [
            0.5 * net.blobs['DiffuseLoss'].data.flatten()[0],
            0.5 * net.blobs['SpecLoss'].data.flatten()[0],
            0.5 * net.blobs['RoughnessLoss'].data.flatten()[0],
            0.5 * net.blobs['MSELoss'].data.flatten()[0]
        ]

    if (test_params['envLighting']):
        OnlineRender.SetEnvLightByID(lightID[tid] + 1)
        OnlineRender.SetAlbedoValue(
            [brdf_predict[0], brdf_predict[0], brdf_predict[0]])
        OnlineRender.SetSpecValue(
            [brdf_predict[1], brdf_predict[1], brdf_predict[1]])
        OnlineRender.SetRoughnessValue(brdf_predict[2])
        img_predict = OnlineRender.Render()[:, :, 0]

        save_pfm('test_p.pfm', img_predict)
        save_pfm('test_gt.pfm', img[0, :, :])

        loss_mse = 0.5 * np.mean((img[0, :, :] - img_predict)**2)
        loss_ssim = 0.5 * (1.0 - ssim(img[0, :, :], img_predict, win_size=5))
    else:
        px = np.sin(thetaList[tid]) * np.cos(phiList[pid])
        py = np.sin(thetaList[tid]) * np.sin(phiList[pid])
        pz = np.cos(thetaList[tid])

        OnlineRender.SetPointLight(0, px, py, pz, 0, 1, 1, 1)
        OnlineRender.SetAlbedoValue(
            [brdf_predict[0], brdf_predict[0], brdf_predict[0]])
        OnlineRender.SetSpecValue(
            [brdf_predict[1], brdf_predict[1], brdf_predict[1]])
        OnlineRender.SetRoughnessValue(brdf_predict[2])
        img_predict = OnlineRender.Render()[:, :, 0]

        loss_mse = np.mean((img[0, :, :] - img_predict)**2)
        loss_ssim = 1.0 - ssim(img[0, :, :], img_predict, win_size=5)

    if (out_img):
        return brdf_predict, loss_brdf, loss_mse, loss_ssim, img_predict
    else:
        return brdf_predict, loss_brdf, loss_mse, loss_ssim, -1
            brdfbatch[0,6,:,:] = roughnessvalue
            brdfbatch[0,7:10,:,:] = normal.transpose(2,0,1)

            lids = [l]
            rotX = lightPool[m][lightIDToEnumerateID[l], v, 0]
            rotY = lightPool[m][lightIDToEnumerateID[l], v, 1]
            lxforms = [[rotX],[rotY]]

            imgbatch, normValue = renderOnlineEnvlight(brdfbatch, AugmentRender, lids, lxforms)
            outfolder = rendered_labeled_out + r'/m_{}'.format(m)        
            make_dir(outfolder)
        
            #0:HDR 1:LDR 2:BOTH
           
            if(renderType == 0 or renderType == 2):
                save_pfm(outfolder + r'/{}_{}_{}_{}_image.pfm'.format(m, l, v, o), imgbatch[0,:,:,:].transpose((1,2,0)))
            if(renderType == 1 or renderType == 2):
                cv2.imwrite(outfolder + r'/{}_{}_{}_{}_image.jpg'.format(m, l, v, o), toLDR(imgbatch[0,:,:,:].transpose((1,2,0))))

    if(renderTag == 'test' or renderTag == 'all'):
    #render test
        make_dir(rendered_test_out)
        path, file = os.path.split(test_file_in)
        dataset = DataLoaderSVBRDF(path, file, 384, 384, False)
        for k in range(0, dataset.dataSize):
            if(k % 1000 == 0):
                print('{}/{}'.format(k, dataset.dataSize))
            name = map(int, dataset.dataList[k].split('_'))
            m,l,v,o = name           
            brdfbatch = np.ones((1, 10, 384, 384))
                loss_v = []
                for tid in lRange0:
                    for pid in lRange1:
                        img, brdf = testSet.GetItemByID(
                            aid, sid, rid, tid, pid,
                            False)  #test_params['envLighting'])
                        brdf_predict, loss_brdf, loss_mse, loss_ssim, img_predict = testImage(
                            testnet, img, brdf, tid, pid, True, fixChannal)
                        l_ratio = np.log(brdf[0, 0, 0, 0] / brdf[0, 1, 0, 0])
                        l_roughness = brdf[0, 2, 0, 0]
                        loss_v.append(loss_mse)
                        ratio_id = np.abs(l_ratio - logratioAxis).argmin()

                loss_ratio_roughness[ratio_id, rid] = np.mean(loss_v)

    save_pfm(outputFolder + r'/test_slice_ratio_R_visual_all.pfm',
             loss_ratio_roughness)
    draw2DHeatMap(outputFolder + r'/test_slice_ratio_R_visual_all.png',
                  loss_ratio_roughness, 'Roughness', 'Ratio',
                  np.max(loss_ratio_roughness))

    for aid in test_params['albedoRange']:
        for sid in test_params['specRange']:
            for rid in test_params['roughnessRange']:
                loss_list = [[], [], [], [], [], []]
                for tid in lRange0:
                    for pid in lRange1:
                        if (test_params['resample']):
                            brdf_gt = np.array([
                                testAlbedo[aid], testSpec[sid],
                                testRoughness[rid]
                            ])
                trainCube[aid, sid, rid] = [a, s, r]
                brdfFolder = out_root + r'/train_envlight/{}_{}_{}'.format(
                    aid, sid, rid)
                make_dir(brdfFolder)

                OnlineRender.SetAlbedoValue([a, a, a])
                OnlineRender.SetSpecValue([s, s, s])
                OnlineRender.SetRoughnessValue(r)

                for lid, l in enumerate(lightID):
                    OnlineRender.SetEnvLightByID(l + 1)
                    lightView, lightX, lightY = getLightTransList(3, 3)
                    for vid, v in enumerate(lightView):
                        OnlineRender.SetLightXform(lightX[vid], lightY[vid])
                        img = OnlineRender.Render()
                        save_pfm(brdfFolder + r'/{}_{}.pfm'.format(lid, vid),
                                 img)
                        ftrain.write('{}_{}_{}_{}_{}\n'.format(
                            aid, sid, rid, lid, vid))
                        lightMatrix[lid, vid, 0] = lightX[vid]
                        lightMatrix[lid, vid, 1] = lightY[vid]

                np.savetxt(
                    out_root +
                    r'/train_envlight/lightMatrix_{}_{}_{}.txt'.format(
                        aid, sid, rid), lightMatrix.flatten())

    ftrain.close()
    np.savetxt(out_root + r'/train_envlight/brdfcube.txt', trainCube.flatten())
    print('Done.\n')

    print('Rendering Test data...\n')