roughBatch = roughBatch.cuda(opt.gpuId)
    depthBatch = depthBatch.cuda(opt.gpuId)
    segBatch = segBatch.cuda(opt.gpuId)

    imP1Batch = imP1Batch.cuda(opt.gpuId)
    imP2Batch = imP2Batch.cuda(opt.gpuId)
    imP3Batch = imP3Batch.cuda(opt.gpuId)
    imPBatch  = imPBatch.cuda(opt.gpuId)

    globIllu1to2 = globIllu1to2.cuda(opt.gpuId)
    globIllu2to3 = globIllu2to3.cuda(opt.gpuId)
####################################


####################################
brdfDataset = dataLoader.BatchLoader(opt.dataRoot, imSize = opt.imageSize)
brdfLoader = DataLoader(brdfDataset, batch_size = opt.batchSize, num_workers = 8, shuffle = False)

j = 0
directErrsNpList = np.ones( [1, 1 + opt.cascadeLevel], dtype = np.float32)
globalIllu1ErrsNpList= np.ones( [1, 1+opt.cascadeLevel], dtype = np.float32)
globalIllu2ErrsNpList = np.ones( [1, 1+opt.cascadeLevel], dtype = np.float32)
globalIllu3ErrsNpList= np.ones( [1, 1+opt.cascadeLevel], dtype = np.float32)
renderErrsNpList = np.ones( [1, 1+opt.cascadeLevel], dtype = np.float32)
renderGtErrsNpList = np.ones( [1, 1+opt.cascadeLevel], dtype = np.float32)

epoch = opt.epochId
testingLog = open('{0}/testingLog_{1}.txt'.format(opt.experiment, epoch), 'w')
for i, dataBatch in enumerate(brdfLoader):
    j += 1
    # Load data from cpu to gpu
    labelIndexBatch = labelIndexBatch.cuda(opt.gpuId)
    maskBatch = maskBatch.cuda(opt.gpuId)

    encoder_normal = encoder_normal.cuda(opt.gpuId)
    decoder_normal = decoder_normal.cuda(opt.gpuId)

    encoder_dilation = encoder_dilation.cuda(opt.gpuId)
    decoder_dilation = decoder_dilation.cuda(opt.gpuId)

    encoder_spp = encoder_spp.cuda(opt.gpuId)
    decoder_spp = decoder_spp.cuda(opt.gpuId)

# Initialize dataLoader
segDataset = dataLoader.BatchLoader(imageRoot=opt.imageRoot,
                                    labelRoot=opt.labelRoot,
                                    fileList=opt.fileList,
                                    imHeight=320,
                                    imWidth=320)
segLoader = DataLoader(segDataset,
                       batch_size=opt.batchSize,
                       num_workers=0,
                       shuffle=False)

lossArr = []
iteration = 0
epoch = opt.epochId
confcounts = np.zeros((opt.numClasses, opt.numClasses), dtype=np.int64)
accuracy = np.zeros(opt.numClasses, dtype=np.float32)
testingLog = open('{0}/testingLog_{1}.txt'.format(opt.experiment, epoch), 'w')
for i, dataBatch in enumerate(segLoader):
    iteration += 1
示例#3
0
os.system('mkdir -p  {0}'.format(opt.experimentAtoB))
os.system('mkdir -p  {0}'.format(opt.experimentBtoA))

opt.manualSeed = random.randint(1, 10000)  # fix seed
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)

if torch.cuda.is_available() and not opt.cuda:
    print(
        "WARNING: You have a CUDA device, so you should probably run with --cuda"
    )

dset = dataLoader.BatchLoader(dataroot1=opt.dataroot1,
                              dataroot2=opt.dataroot2,
                              batch_size=opt.batchSize,
                              im_size=[opt.imageSize, opt.imageSize],
                              isRandom=False,
                              phase='TEST')

imgsDomA = torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize)
imgsDomB = torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize)
netG_AtoB = models.Generator(opt.isInstanceNorm)
netG_BtoA = models.Generator(opt.isInstanceNorm)
modelName_AtoB = select_model('{0}/netG_AtoB_iter_*.pth'.format(
    opt.experiment))
modelName_BtoA = select_model('{0}/netG_BtoA_iter_*.pth'.format(
    opt.experiment))
netG_AtoB.load_state_dict(torch.load(modelName_AtoB))
netG_BtoA.load_state_dict(torch.load(modelName_BtoA))
print(modelName_AtoB)
print(modelName_BtoA)
示例#4
0
文件: test.py 项目: Abhisek97/hw3
os.system('mkdir {0}'.format(opt.testRoot))

# Hyper parameter
imSize = 28

# data batch
imBatch = Variable(torch.FloatTensor(opt.batchSize, 1, imSize, imSize))
labelBatch = Variable(torch.LongTensor(opt.batchSize, 1))

# Network
classifier = models.classifier(isDropOut=opt.isDropOut).eval()
classifier.load_state_dict(
    torch.load('{0}/classifier_{1}.pth'.format(opt.trainRoot, opt.nepoch - 1)))

# DataLoader
mnistDataset = dataLoader.BatchLoader(dataRoot=opt.dataRoot, phase='TEST')
mnistLoader = DataLoader(mnistDataset,
                         batch_size=opt.batchSize,
                         num_workers=4,
                         shuffle=False)

# Move data and network to gpu
if opt.cuda:
    imBatch = imBatch.cuda(opt.gpuId)
    labelBatch = labelBatch.cuda(opt.gpuId)
    classifier = classifier.cuda(opt.gpuId)

# train
j = 0
errorList = []
testingLog = open(
print("Random Seed: ", opt.seed)
random.seed(opt.seed)
torch.manual_seed(opt.seed)

if torch.cuda.is_available() and not opt.cuda:
    print(
        "WARNING: You have a CUDA device, so you should probably run with --cuda"
    )

####################################
brdfDataset = dataLoader.BatchLoader(imWidth=opt.envCol,
                                     imHeight=opt.envRow,
                                     dataRoot=opt.dataRoot,
                                     rs=opt.rs,
                                     re=opt.re,
                                     envHeight=opt.envHeight,
                                     envWidth=opt.envWidth,
                                     envRow=opt.envRow,
                                     envCol=opt.envCol,
                                     isAllLight=True,
                                     isLight=True)
brdfLoader = DataLoader(brdfDataset,
                        batch_size=opt.batchSize,
                        num_workers=0,
                        shuffle=False)
envNum = opt.envRow * opt.envCol

for i, dataBatch in enumerate(brdfLoader):
    maskBatch = dataBatch['segObj']
    envBatch = dataBatch['envmaps']
    envIndBatch = dataBatch['envmapsInd']
    imP2PredBatch = imP2PredBatch.cuda(opt.gpuId)
    imP3PredBatch = imP3PredBatch.cuda(opt.gpuId)

    encoderRefs[0] = encoderRefs[0].cuda(opt.gpuId)
    albedoRefs[0] = albedoRefs[0].cuda(opt.gpuId)
    normalRefs[0] = normalRefs[0].cuda(opt.gpuId)
    roughRefs[0] = roughRefs[0].cuda(opt.gpuId)
    depthRefs[0] = depthRefs[0].cuda(opt.gpuId)
    envRefs[0] = envRefs[0].cuda(opt.gpuId)
    globIllu1to2 = globIllu1to2.cuda(opt.gpuId)
    globIllu2to3 = globIllu2to3.cuda(opt.gpuId)
####################################

####################################
brdfDataset = dataLoader.BatchLoader(opt.dataRoot,
                                     imSize=opt.imageSize,
                                     cascade=opt.cascadeLevel - 1)
brdfLoader = DataLoader(brdfDataset,
                        batch_size=opt.batchSize,
                        num_workers=8,
                        shuffle=False)

j = 0
albedoErrsNpList = np.ones([1, 2], dtype=np.float32)
normalErrsNpList = np.ones([1, 2], dtype=np.float32)
roughErrsNpList = np.ones([1, 2], dtype=np.float32)
depthErrsNpList = np.ones([1, 2], dtype=np.float32)

globalIllu1ErrsNpList = np.ones([1, 2], dtype=np.float32)
globalIllu2ErrsNpList = np.ones([1, 2], dtype=np.float32)
globalIllu3ErrsNpList = np.ones([1, 2], dtype=np.float32)
示例#7
0
for p in [opt.outputPath, image_directory, summary_directory, model_directory]:
    if not os.path.exists(p):
        os.makedirs(p)

restore = opt.isRefine

if __name__ == "__main__":

    assert opt.model in ['AE', 'RAE']

    # Load Data
    if opt.phase == 'train' or opt.phase == 'test':
        cropSize = (opt.cropH, opt.cropW)
    else:
        cropSize = (416, 800)
    data_loader = dataLoader.BatchLoader(dataRoot = opt.dataPath, inpChannel = opt.inpChannel, batchSize = opt.batchSize, seqLen = opt.seqLen, \
        scaleSize=(opt.scaleH, opt.scaleW), cropSize=cropSize, rseed=opt.manualSeed, phase=opt.phase)

    print('Data Loaded')

    start_time = time.strftime("%Y%m%d%H%M%S", time.localtime())

    # file directory
    model_name = opt.model
    model_dir = os.path.join(model_directory, model_name)
    log_dir = os.path.join(summary_directory, (model_name + '_' + start_time))
    img_dir = os.path.join(image_directory, (model_name + '_' + start_time))

    enc_model_dir = os.path.join(model_dir, 'Enc.ckpt')
    dec_model_dir = os.path.join(model_dir, 'Dec.ckpt')

    # initialize model
示例#8
0
                                             imWidth=opt.imageWidth,
                                             imHeight=opt.imageHeight,
                                             envWidth=opt.envWidth,
                                             envHeight=opt.envHeight,
                                             sampleNum=opt.sampleNum)
else:
    buildCostVolume = None

brdfDataset = dataLoader.BatchLoader(opt.dataRoot,
                                     shapeRoot=opt.shapeRoot,
                                     imHeight=opt.imageHeight,
                                     imWidth=opt.imageWidth,
                                     envHeight=opt.envHeight,
                                     envWidth=opt.envWidth,
                                     isRandom=False,
                                     phase='TEST',
                                     rseed=1,
                                     isLoadVH=True,
                                     isLoadEnvmap=True,
                                     isLoadCam=True,
                                     shapeRs=opt.shapeStart,
                                     shapeRe=opt.shapeEnd,
                                     camNum=opt.camNumReal,
                                     batchSize=opt.batchSize)
brdfLoader = DataLoader(brdfDataset,
                        batch_size=1,
                        num_workers=0,
                        shuffle=False)

j = 0
'''
normal1ErrsNpList = np.ones( [1, 2], dtype = np.float32 )
示例#9
0
if not opt.noCuda:
    device = 'cuda'
else:
    device = 'cpu'

imBatch = imBatch.to(device)
labelBatch = labelBatch.to(device)
labelIndexBatch = labelIndexBatch.to(device)
maskBatch = maskBatch.to(device)
encoder = encoder.to(device)
decoder = decoder.to(device)

# Initialize dataLoader
segDataset = dataLoader.BatchLoader(
    imageRoot=opt.imageRoot,
    labelRoot=opt.labelRoot,
    fileList=opt.fileList
)
segLoader = DataLoader(segDataset, batch_size=opt.batchSize, num_workers=0, shuffle=True)

lossArr = []
iteration = 0
epoch = opt.epochId
confcounts = np.zeros((opt.numClasses, opt.numClasses), dtype=np.int64)
accuracy = np.zeros(opt.numClasses, dtype=np.float32)
testingLog = open('{0}/testingLog_{1}.txt'.format(opt.experiment, epoch), 'w')
for i, dataBatch in enumerate(segLoader):
    iteration += 1

    # Read data
    imBatch = Variable(dataBatch['im']).to(device)
示例#10
0
####################################


####################################
# Optimizer
opEncoder = optim.Adam(encoder1.parameters(), lr=1e-4 * lr_scale, betas=(0.5, 0.999) )
opAlbedo = optim.Adam(albedoDecoder1.parameters(), lr=1e-4 * lr_scale, betas=(0.5, 0.999) )
opNormal = optim.Adam(normalDecoder1.parameters(), lr=1e-4 * lr_scale, betas=(0.5, 0.999) )
opRough = optim.Adam(roughDecoder1.parameters(), lr=1e-4 * lr_scale, betas=(0.5, 0.999) )
opDepth = optim.Adam(depthDecoder1.parameters(), lr=1e-4 * lr_scale, betas=(0.5, 0.999) )
#####################################


####################################
brdfDataset = dataLoader_ours.BatchLoader( opt.dataRoot,
        imWidth = opt.imWidth1, imHeight = opt.imHeight1,
        cascadeLevel = 0, isLight = False, phase = 'TRAIN' )
IIWDataset = dataLoader_iiw.IIWLoader(
        dataRoot = opt.IIWRoot,
        imHeight = opt.imHeight1,
        imWidth = opt.imWidth1,
        phase = 'TRAIN' )
trainDataset = dataLoader_iiw.ConcatDataset(brdfDataset, IIWDataset )
brdfLoader = DataLoader(trainDataset, batch_size = opt.batchSize,
        num_workers = 8, shuffle = False )

j = 0
# BRDFLost
albedoErrsNpList = np.ones( [1, 1], dtype = np.float32 )
normalErrsNpList = np.ones( [1, 1], dtype = np.float32 )
roughErrsNpList= np.ones( [1, 1], dtype = np.float32 )
                      betas=(0.5, 0.999))
opNormal = optim.Adam(normalDecoder.parameters(),
                      lr=1e-4 * lr_scale,
                      betas=(0.5, 0.999))
opRough = optim.Adam(roughDecoder.parameters(),
                     lr=1e-4 * lr_scale,
                     betas=(0.5, 0.999))
opDepth = optim.Adam(depthDecoder.parameters(),
                     lr=1e-4 * lr_scale,
                     betas=(0.5, 0.999))
#####################################

####################################
brdfDataset = dataLoader_ours.BatchLoader(opt.dataRoot,
                                          imWidth=opt.imWidth,
                                          imHeight=opt.imHeight,
                                          cascadeLevel=0,
                                          isLight=False)
IIWDataset = dataLoader_iiw.IIWLoader(dataRoot=opt.IIWRoot,
                                      imHeight=opt.imHeight,
                                      imWidth=opt.imWidth,
                                      phase='TRAIN')
trainDataset = dataLoader_iiw.ConcatDataset(brdfDataset, IIWDataset)
brdfLoader = DataLoader(trainDataset,
                        batch_size=opt.batchSize,
                        num_workers=6,
                        shuffle=True)

j = 0
# BRDFLost
albedoErrsNpList = np.ones([1, 1], dtype=np.float32)
示例#12
0
opt.experiment += ('_' + opt.dataroot1.split('/')[0])
os.system('mkdir -p  {0}'.format(opt.experiment))

opt.manualSeed = random.randint(1, 10000)  # fix seed
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)

if torch.cuda.is_available() and not opt.cuda:
    print(
        "WARNING: You have a CUDA device, so you should probably run with --cuda"
    )

dset = dataLoader.BatchLoader(
    dataroot1=opt.dataroot1,
    dataroot2=opt.dataroot2,
    batch_size=opt.batchSize,
    im_size=[opt.imageSize, opt.imageSize],
)

imgsDomA = torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize)
imgsDomB = torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize)
reallabel = torch.FloatTensor(opt.batchSize)
fakelabel = torch.FloatTensor(opt.batchSize)
labelBtoA = torch.FloatTensor(opt.batchSize)
labelAtoB = torch.FloatTensor(opt.batchSize)

netG_AtoB = models.Generator(opt.isInstanceNorm)
netD_B = models.Discriminator70()
netG_BtoA = models.Generator(opt.isInstanceNorm)
netD_A = models.Discriminator70()
示例#13
0
####################################


####################################
# Optimizer
lr_scale = 1
opLightEncoder = optim.Adam(lightEncoder.parameters(), lr=1e-4 * lr_scale, betas=(0.5, 0.999) )
opAxisDecoder = optim.Adam(axisDecoder.parameters(), lr=1e-4 * lr_scale, betas=(0.5, 0.999) )
opLambDecoder = optim.Adam(lambDecoder.parameters(), lr=1e-4 * lr_scale, betas=(0.5, 0.999) )
opWeightDecoder = optim.Adam(weightDecoder.parameters(), lr=1e-4 * lr_scale, betas=(0.5, 0.999) )
#####################################


####################################
brdfDataset = dataLoader.BatchLoader( opt.dataRoot, isAllLight = True,
        imWidth = opt.imWidth, imHeight = opt.imHeight, isLight = True,
        cascadeLevel = opt.cascadeLevel, SGNum = opt.SGNum )
brdfLoader = DataLoader(brdfDataset, batch_size = opt.batchSize, num_workers =
        16, shuffle = True )

j = 0
# BRDFLost
albedoErrsNpList = np.ones( [1, 1], dtype = np.float32 )
normalErrsNpList = np.ones( [1, 1], dtype = np.float32 )
roughErrsNpList= np.ones( [1, 1], dtype = np.float32 )
depthErrsNpList = np.ones( [1, 1], dtype = np.float32 )

reconstErrsNpList = np.ones( [1, 1], dtype = np.float32 )
renderErrsNpList = np.ones( [1, 1], dtype = np.float32 )
for epoch in list(range(0, opt.nepoch) ):
    trainingLog = open('{0}/trainingLog_{1}.txt'.format(opt.experiment, epoch), 'w')
示例#14
0
encoder = encoder.eval()
decoder = decoder.eval()

# Move network and containers to gpu
if not opt.noCuda:
    imBatch = imBatch.cuda(opt.gpuId )
    labelBatch = labelBatch.cuda(opt.gpuId )
    labelIndexBatch = labelIndexBatch.cuda(opt.gpuId )
    maskBatch = maskBatch.cuda(opt.gpuId )
    encoder = encoder.cuda(opt.gpuId )
    decoder = decoder.cuda(opt.gpuId )

# Initialize dataLoader
segDataset = dataLoader.BatchLoader(
        imageRoot = opt.imageRoot,
        labelRoot = opt.labelRoot,
        fileList = opt.fileList,
        randomShuffle=False
        )
segLoader = DataLoader(segDataset, batch_size=opt.batchSize, num_workers=0, shuffle=False )

lossArr = []
iteration = 0
epoch = opt.epochId
confcounts = np.zeros( (opt.numClasses, opt.numClasses), dtype=np.int64 )
accuracy = np.zeros(opt.numClasses, dtype=np.float32 )
testingLog = open('{0}/testingLog_{1}.txt'.format(opt.experiment, epoch), 'w')
for i, dataBatch in enumerate(segLoader ):
    iteration += 1

    # Read data
    image_cpu = dataBatch['im']
示例#15
0
                      lr=1e-4 * lr_scale,
                      betas=(0.5, 0.999))
opNormal = optim.Adam(normalDecoder.parameters(),
                      lr=1e-4 * lr_scale,
                      betas=(0.5, 0.999))
opRough = optim.Adam(roughDecoder.parameters(),
                     lr=1e-4 * lr_scale,
                     betas=(0.5, 0.999))
opDepth = optim.Adam(depthDecoder.parameters(),
                     lr=1e-4 * lr_scale,
                     betas=(0.5, 0.999))
#####################################

####################################
brdfDataset = dataLoader.BatchLoader(opt.dataRoot,
                                     imWidth=opt.imWidth,
                                     imHeight=opt.imHeight,
                                     cascadeLevel=opt.cascadeLevel)
brdfLoader = DataLoader(brdfDataset,
                        batch_size=opt.batchSize,
                        num_workers=8,
                        shuffle=True)

j = 0
albedoErrsNpList = np.ones([1, 1], dtype=np.float32)
normalErrsNpList = np.ones([1, 1], dtype=np.float32)
roughErrsNpList = np.ones([1, 1], dtype=np.float32)
depthErrsNpList = np.ones([1, 1], dtype=np.float32)

for epoch in list(range(opt.epochIdFineTune + 1, opt.nepoch)):
    trainingLog = open('{0}/trainingLog_{1}.txt'.format(opt.experiment, epoch),
                       'w')
                           batchSize=opt.batchSize,
                           fov=opt.fov,
                           imWidth=opt.imageWidth,
                           imHeight=opt.imageHeight,
                           envWidth=opt.envWidth,
                           envHeight=opt.envHeight)

brdfDataset = dataLoader.BatchLoader(opt.dataRoot,
                                     shapeRoot=opt.shapeRoot,
                                     imHeight=opt.imageHeight,
                                     imWidth=opt.imageWidth,
                                     envHeight=opt.envHeight,
                                     envWidth=opt.envWidth,
                                     isRandom=True,
                                     phase='TRAIN',
                                     rseed=1,
                                     isLoadOptim=True,
                                     isLoadEnvmap=True,
                                     isLoadCam=True,
                                     isLoadVH=True,
                                     isLoadPoints=True,
                                     shapeRs=opt.shapeStart,
                                     shapeRe=opt.shapeEnd,
                                     camNum=opt.camNum)
brdfLoader = DataLoader(brdfDataset,
                        batch_size=1,
                        num_workers=12,
                        shuffle=True)

sampler = models.groundtruthSampler(camNum=opt.camNum,
                                    fov=opt.fov,
示例#17
0
    encoder = encoder.cuda(opt.gpuId)
    albedoDecoder = albedoDecoder.cuda(opt.gpuId)
    normalDecoder = normalDecoder.cuda(opt.gpuId)
    roughDecoder = roughDecoder.cuda(opt.gpuId)
    depthDecoder = depthDecoder.cuda(opt.gpuId)

    albedoBs = albedoBs.cuda(opt.gpuId)
    normalBs = normalBs.cuda(opt.gpuId)
    roughBs = roughBs.cuda(opt.gpuId)
    depthBs = depthBs.cuda(opt.gpuId)
####################################

####################################
brdfDataset = dataLoader.BatchLoader(opt.dataRoot,
                                     imWidth=opt.imWidth,
                                     imHeight=opt.imHeight,
                                     rseed=opt.seed,
                                     cascadeLevel=opt.cascadeLevel,
                                     phase='TEST')
brdfLoader = DataLoader(brdfDataset,
                        batch_size=opt.batchSize,
                        num_workers=6,
                        shuffle=False)

j = 0
albedoErrsNpList = np.ones([1, 2], dtype=np.float32)
normalErrsNpList = np.ones([1, 2], dtype=np.float32)
roughErrsNpList = np.ones([1, 2], dtype=np.float32)
depthErrsNpList = np.ones([1, 2], dtype=np.float32)

epoch = opt.nepoch
testingLog = open('{0}/testingLog_{1}.txt'.format(opt.testRoot, epoch), 'w')
示例#18
0
net = faceNet.faceNet(m=opt.marginFactor, feature=False)
lossLayer = faceNet.CustomLoss(s=opt.scaleFactor)

# Move network and containers to gpu
if not opt.noCuda:
    net = net.cuda(opt.gpuId)

# Initialize optimizer
optimizer = optim.SGD(net.parameters(),
                      lr=opt.initLR,
                      momentum=0.9,
                      weight_decay=5e-4)

# Initialize dataLoader
faceDataset = dataLoader.BatchLoader(imageRoot=opt.imageRoot,
                                     alignmentRoot=opt.alignmentRoot,
                                     cropSize=(opt.imWidth, opt.imHeight))
faceLoader = DataLoader(faceDataset,
                        batch_size=opt.batchSize,
                        num_workers=16,
                        shuffle=False)

lossArr = []
accuracyArr = []
iteration = 0
for epoch in range(0, opt.nepoch):
    trainingLog = open('{0}/trainingLog_{1}.txt'.format(opt.experiment, epoch),
                       'w')
    for i, dataBatch in enumerate(faceLoader):
        iteration += 1
示例#19
0
    normalDecoder = normalDecoder.cuda(opt.gpuId)
    roughDecoder = roughDecoder.cuda(opt.gpuId)
    depthDecoder = depthDecoder.cuda(opt.gpuId)

    lightEncoder = lightEncoder.cuda(opt.gpuId)
    axisDecoder = axisDecoder.cuda(opt.gpuId)
    lambDecoder = lambDecoder.cuda(opt.gpuId)
    weightDecoder = weightDecoder.cuda(opt.gpuId)
####################################

####################################

####################################
brdfDataset = dataLoader.BatchLoader(opt.dataRoot,
                                     phase=opt.mode,
                                     imWidth=opt.imWidth,
                                     imHeight=opt.imHeight,
                                     isLight=True,
                                     cascadeLevel=opt.cascadeLevel)
brdfLoader = DataLoader(brdfDataset,
                        batch_size=opt.batchSize,
                        num_workers=16,
                        shuffle=False)

j = 0
# BRDFLost
albedoErrsNpList = np.ones([1, 1], dtype=np.float32)
normalErrsNpList = np.ones([1, 1], dtype=np.float32)
roughErrsNpList = np.ones([1, 1], dtype=np.float32)
depthErrsNpList = np.ones([1, 1], dtype=np.float32)

reconstErrsNpList = np.ones([1, 1], dtype=np.float32)