param.requires_grad = False
    for param in albedoDecoders[n].parameters():
        param.requires_grad = False
    for param in normalDecoders[n].parameters():
        param.requires_grad = False
    for param in roughDecoders[n].parameters():
        param.requires_grad = False
    for param in depthDecoders[n].parameters():
        param.requires_grad = False

    if opt.isLight or (opt.level == 2 and n == 0):
        # Light network
        lightEncoders.append(
            models.encoderLight(cascadeLevel=n, SGNum=opt.SGNum).eval())
        axisDecoders.append(
            models.decoderLight(mode=0, SGNum=opt.SGNum).eval())
        lambDecoders.append(
            models.decoderLight(mode=1, SGNum=opt.SGNum).eval())
        weightDecoders.append(
            models.decoderLight(mode=2, SGNum=opt.SGNum).eval())

        lightEncoders[n].load_state_dict(
            torch.load('{0}/lightEncoder{1}_{2}.pth'.format(
                experimentsLight[n], n, nepochsLight[n] - 1)).state_dict())
        axisDecoders[n].load_state_dict(
            torch.load('{0}/axisDecoder{1}_{2}.pth'.format(
                experimentsLight[n], n, nepochsLight[n] - 1)).state_dict())
        lambDecoders[n].load_state_dict(
            torch.load('{0}/lambDecoder{1}_{2}.pth'.format(
                experimentsLight[n], n, nepochsLight[n] - 1)).state_dict())
        weightDecoders[n].load_state_dict(
示例#2
0
# Initial Network
encoder = models.encoder0(cascadeLevel = 0 )
albedoDecoder = models.decoder0(mode=0 )
normalDecoder = models.decoder0(mode=1 )
roughDecoder = models.decoder0(mode=2 )
depthDecoder = models.decoder0(mode=4 )

encoder1 = models.encoder0(cascadeLevel = 1 )
albedoDecoder1 = models.decoder0(mode=0 )
normalDecoder1 = models.decoder0(mode=1 )
roughDecoder1 = models.decoder0(mode=2 )
depthDecoder1 = models.decoder0(mode=4 )

lightEncoder = models.encoderLight(cascadeLevel = 0, SGNum = opt.SGNum )
axisDecoder = models.decoderLight(mode=0, SGNum = opt.SGNum )
lambDecoder = models.decoderLight(mode = 1, SGNum = opt.SGNum )
weightDecoder = models.decoderLight(mode = 2, SGNum = opt.SGNum )

renderLayer = models.renderingLayer(isCuda = opt.cuda,
        imWidth=opt.envCol, imHeight=opt.envRow,
        envWidth = opt.envWidth, envHeight = opt.envHeight)

output2env = models.output2env(isCuda = opt.cuda,
        envWidth = opt.envWidth, envHeight = opt.envHeight, SGNum = opt.SGNum )
####################################################################


#########################################
encoder.load_state_dict(
        torch.load('{0}/encoder{1}_{2}.pth'.format(opt.experimentBRDF0, 0,