Exemple #1
0
    def build_model():
        modules = []

        mask = torch.arange(0, num_inputs) % 2
        #mask = torch.ones(num_inputs)
        #mask[round(num_inputs/2):] = 0
        mask = mask.to(device).float()

        # build each modules
        for _ in range(args.num_blocks):
            modules += [
                fnn.ActNorm(num_inputs),
                fnn.LUInvertibleMM(num_inputs),
                fnn.CouplingLayer(num_inputs,
                                  num_hidden,
                                  mask,
                                  num_cond_inputs,
                                  s_act='tanh',
                                  t_act='relu')
            ]
            mask = 1 - mask

        # build model
        model = fnn.FlowSequential(*modules)

        # initialize
        for module in model.modules():
            if isinstance(module, nn.Linear):
                nn.init.orthogonal_(module.weight)
                if hasattr(module, 'bias') and module.bias is not None:
                    module.bias.data.fill_(0)

        model.to(device)

        return model
Exemple #2
0
    def testCoupling(self):
        m1 = fnn.FlowSequential(fnn.CouplingLayer(NUM_INPUTS, NUM_HIDDEN))

        x = torch.randn(BATCH_SIZE, NUM_INPUTS)

        y, logdets = m1(x)
        z, inv_logdets = m1(y, mode='inverse')

        self.assertTrue((logdets + inv_logdets).abs().max() < EPS,
                        'CouplingLayer Det is not zero.')
        self.assertTrue((x - z).abs().max() < EPS, 'CouplingLayer is wrong')
Exemple #3
0
    def testSequentialBN(self):
        m1 = fnn.FlowSequential(fnn.BatchNormFlow(NUM_INPUTS),
                                fnn.InvertibleMM(NUM_INPUTS),
                                fnn.CouplingLayer(NUM_INPUTS, NUM_HIDDEN))

        m1.train()
        x = torch.randn(BATCH_SIZE, NUM_INPUTS)

        y, logdets = m1(x)
        z, inv_logdets = m1(y, mode='inverse')

        self.assertTrue((logdets + inv_logdets).abs().max() < EPS,
                        'Sequential BN Det is not zero.')
        self.assertTrue((x - z).abs().max() < EPS, 'Sequential BN is wrong.')

        # Second run.
        x = torch.randn(BATCH_SIZE, NUM_INPUTS)

        y, logdets = m1(x)
        z, inv_logdets = m1(y, mode='inverse')

        self.assertTrue((logdets + inv_logdets).abs().max() < EPS,
                        'Sequential BN Det is not zero for the second run.')
        self.assertTrue((x - z).abs().max() < EPS,
                        'Sequential BN is wrong for the second run.')

        m1.eval()
        # Eval run.
        x = torch.randn(BATCH_SIZE, NUM_INPUTS)

        y, logdets = m1(x)
        z, inv_logdets = m1(y, mode='inverse')

        self.assertTrue((logdets + inv_logdets).abs().max() < EPS,
                        'Sequential BN Det is not zero for the eval run.')
        self.assertTrue((x - z).abs().max() < EPS,
                        'Sequential BN is wrong for the eval run.')
Exemple #4
0
    def testSequential(self):
        m1 = fnn.FlowSequential(
            fnn.ActNorm(NUM_INPUTS), fnn.InvertibleMM(NUM_INPUTS),
            fnn.CouplingLayer(NUM_INPUTS, NUM_HIDDEN, mask))

        x = torch.randn(BATCH_SIZE, NUM_INPUTS)

        y, logdets = m1(x)
        z, inv_logdets = m1(y, mode='inverse')

        self.assertTrue((logdets + inv_logdets).abs().max() < EPS,
                        'Sequential Det is not zero.')
        self.assertTrue((x - z).abs().max() < EPS, 'Sequential is wrong.')

        # Second run.
        x = torch.randn(BATCH_SIZE, NUM_INPUTS)

        y, logdets = m1(x)
        z, inv_logdets = m1(y, mode='inverse')

        self.assertTrue((logdets + inv_logdets).abs().max() < EPS,
                        'Sequential Det is not zero for the second run.')
        self.assertTrue((x - z).abs().max() < EPS,
                        'Sequential is wrong for the second run.')
Exemple #5
0
modules = []

assert args.flow in ['maf', 'maf-split', 'maf-split-glow', 'realnvp', 'glow']
if args.flow == 'glow':
    mask = torch.arange(0, num_inputs) % 2
    mask = mask.to(device).float()

    print("Warning: Results for GLOW are not as good as for MAF yet.")
    for _ in range(args.num_blocks):
        modules += [
            fnn.BatchNormFlow(num_inputs),
            fnn.LUInvertibleMM(num_inputs),
            fnn.CouplingLayer(num_inputs,
                              num_hidden,
                              mask,
                              num_cond_inputs,
                              s_act='tanh',
                              t_act='relu')
        ]
        mask = 1 - mask
elif args.flow == 'realnvp':
    mask = torch.arange(0, num_inputs) % 2
    mask = mask.to(device).float()

    for _ in range(args.num_blocks):
        modules += [
            fnn.CouplingLayer(num_inputs,
                              num_hidden,
                              mask,
                              num_cond_inputs,
                              s_act='tanh',
Exemple #6
0
    'MOONS': 64
}[args.dataset]

act = 'tanh' if args.dataset is 'GAS' else 'relu'

modules = []

assert args.flow in ['maf', 'glow']
for _ in range(args.num_blocks):
    if args.flow == 'glow':
        print("Warning: Results for GLOW are not as good as for MAF yet.")
        modules += [
            fnn.BatchNormFlow(num_inputs),
            fnn.InvertibleMM(num_inputs),
            fnn.CouplingLayer(num_inputs,
                              num_hidden,
                              s_act='tanh',
                              t_act='relu')
        ]
    elif args.flow == 'maf':
        modules += [
            fnn.MADE(num_inputs, num_hidden, act=act),
            fnn.BatchNormFlow(num_inputs),
            fnn.Reverse(num_inputs)
        ]

model = fnn.FlowSequential(*modules)

for module in model.modules():
    if isinstance(module, nn.Linear):
        nn.init.orthogonal_(module.weight)
        module.bias.data.fill_(0)
Exemple #7
0
def init_model(args, num_inputs=72):
    args.cuda = not args.no_cuda and torch.cuda.is_available()
    if args.cuda:
        os.environ["CUDA_VISIBLE_DEVICES"] = args.device
        device = torch.device("cuda:" + args.device)
    else:
        device = torch.device("cpu")
    # network structure
    num_hidden = args.num_hidden
    num_cond_inputs = None

    act = 'relu'
    assert act in ['relu', 'sigmoid', 'tanh']

    modules = []

    # normalization flow
    assert args.flow in ['maf', 'realnvp', 'glow']

    if args.flow == 'glow':
        mask = torch.arange(0, num_inputs) % 2
        mask = mask.to(device).float()

        print("Warning: Results for GLOW are not as good as for MAF yet.")
        for _ in range(args.num_blocks):
            modules += [
                fnn.BatchNormFlow(num_inputs),
                fnn.LUInvertibleMM(num_inputs),
                fnn.CouplingLayer(num_inputs,
                                  num_hidden,
                                  mask,
                                  num_cond_inputs,
                                  s_act='tanh',
                                  t_act='relu')
            ]
            mask = 1 - mask

    elif args.flow == 'realnvp':
        mask = torch.arange(0, num_inputs) % 2
        mask = mask.to(device).float()

        for _ in range(args.num_blocks):
            modules += [
                fnn.CouplingLayer(num_inputs,
                                  num_hidden,
                                  mask,
                                  num_cond_inputs,
                                  s_act='tanh',
                                  t_act='relu'),
                fnn.BatchNormFlow(num_inputs)
            ]
            mask = 1 - mask

    elif args.flow == 'maf':
        for _ in range(args.num_blocks):
            modules += [
                fnn.MADE(num_inputs, num_hidden, num_cond_inputs, act=act),
                fnn.BatchNormFlow(num_inputs),
                fnn.Reverse(num_inputs)
            ]

    model = fnn.FlowSequential(*modules)

    for module in model.modules():
        if isinstance(module, nn.Linear):
            nn.init.orthogonal_(module.weight)
            if hasattr(module, 'bias') and module.bias is not None:
                module.bias.data.fill_(0)

    return model
    'HEPMASS': 512,
    'MINIBOONE': 512,
    'BSDS300': 512,
    'MOONS': 64
}[args.dataset]

modules = []

assert args.flow in ['maf', 'glow']
for _ in range(args.num_blocks):
    if args.flow == 'glow':
        print("Warning: Results for GLOW are not as good as for MAF yet.")
        modules += [
            fnn.BatchNormFlow(num_inputs),
            fnn.InvertibleMM(num_inputs),
            fnn.CouplingLayer(num_inputs, num_hidden)
        ]
    elif args.flow == 'maf':
        modules += [
            fnn.MADE(num_inputs, num_hidden),
            fnn.BatchNormFlow(num_inputs),
            fnn.Reverse(num_inputs)
        ]

model = fnn.FlowSequential(*modules)

for module in model.modules():
    if isinstance(module, nn.Linear):
        nn.init.orthogonal_(module.weight)
        module.bias.data.fill_(0)