if nh < newH: im = cv2.resize(im_cpu, (newW, newH), interpolation=cv2.INTER_AREA) else: im = cv2.resize(im_cpu, (newW, newH), interpolation=cv2.INTER_LINEAR) newEnvWidth = newW newEnvHeight = newH im = (np.transpose(im, [2, 0, 1]).astype(np.float32) / 255.0)[np.newaxis, :, :, :] im = im / im.max() imBatchSmall = Variable(torch.from_numpy(im**(2.2))).cuda() renderLayer = models.renderingLayer(isCuda=opt.cuda, imWidth=newEnvWidth, imHeight=newEnvHeight, fov=fov, envWidth=opt.envWidth, envHeight=opt.envHeight) output2env = models.output2env(isCuda=opt.cuda, envWidth=opt.envWidth, envHeight=opt.envHeight, SGNum=opt.SGNum) ######################################################## # Build the cascade network architecture # albedoPreds, normalPreds, roughPreds, depthPreds = [], [], [], [] albedoBSPreds, roughBSPreds, depthBSPreds = [], [], [] envmapsPreds, envmapsPredImages, renderedPreds = [], [], [] cAlbedos = [] cLights = []
torch.load('{0}/normalRefs{1}_{2}.pth'.format(opt.modelRoot, opt.cascadeLevel, opt.epochId))) roughRefs[0].load_state_dict( torch.load('{0}/roughRefs{1}_{2}.pth'.format(opt.modelRoot, opt.cascadeLevel, opt.epochId))) depthRefs[0].load_state_dict( torch.load('{0}/depthRefs{1}_{2}.pth'.format(opt.modelRoot, opt.cascadeLevel, opt.epochId))) envRefs[0].load_state_dict( torch.load('{0}/envRefs{1}_{2}.pth'.format(opt.modelRoot, opt.cascadeLevel, opt.epochId))) renderLayer = models.renderingLayer(gpuId=opt.gpuId, isCuda=opt.cuda) globIllu1to2.load_state_dict( torch.load('{0}/globIllu1to2_{1}.pth'.format(opt.modelRootGlob, opt.epochIdGlob))) globIllu2to3.load_state_dict( torch.load('{0}/globIllu2to3_{1}.pth'.format(opt.modelRootGlob, opt.epochIdGlob))) for param in encoderRefs[0].parameters(): param.requires_grad = False for param in albedoRefs[0].parameters(): param.requires_grad = False for param in normalRefs[0].parameters(): param.requires_grad = False for param in roughRefs[0].parameters():
roughDecoder = models.decoder0(mode=2 ) depthDecoder = models.decoder0(mode=4 ) encoder1 = models.encoder0(cascadeLevel = 1 ) albedoDecoder1 = models.decoder0(mode=0 ) normalDecoder1 = models.decoder0(mode=1 ) roughDecoder1 = models.decoder0(mode=2 ) depthDecoder1 = models.decoder0(mode=4 ) lightEncoder = models.encoderLight(cascadeLevel = 0, SGNum = opt.SGNum ) axisDecoder = models.decoderLight(mode=0, SGNum = opt.SGNum ) lambDecoder = models.decoderLight(mode = 1, SGNum = opt.SGNum ) weightDecoder = models.decoderLight(mode = 2, SGNum = opt.SGNum ) renderLayer = models.renderingLayer(isCuda = opt.cuda, imWidth=opt.envCol, imHeight=opt.envRow, envWidth = opt.envWidth, envHeight = opt.envHeight) output2env = models.output2env(isCuda = opt.cuda, envWidth = opt.envWidth, envHeight = opt.envHeight, SGNum = opt.SGNum ) #################################################################### ######################################### encoder.load_state_dict( torch.load('{0}/encoder{1}_{2}.pth'.format(opt.experimentBRDF0, 0, opt.nepochBRDF0-1 ) ).state_dict() ) albedoDecoder.load_state_dict( torch.load('{0}/albedo{1}_{2}.pth'.format(opt.experimentBRDF0, 0, opt.nepochBRDF0-1 ) ).state_dict() ) normalDecoder.load_state_dict(