Пример #1
0
def make_nf(n_blocks, affine_class, base_dist):
    blocks = []
    for _ in range(n_blocks):
        blocks += [affine_class(2), PReLUFlow(2), BatchNormFlow(2)]
    blocks += [affine_class(2)]

    return NormalizingFlow(
        *blocks,
        base_dist=base_dist,
    )
Пример #2
0
def make_nf(n_blocks, base_dist, affine_flow=StructuredAffineFlow):
    blocks = []
    for _ in range(n_blocks):
        blocks += [affine_flow(2), PReLUFlow(2), BatchNormFlow(2)]
    blocks += [affine_flow(2)]

    return NormalizingFlow(
        *blocks,
        base_dist=base_dist,
    )
Пример #3
0
    def __init__(self, obsdim, outdim, **kwargs):
        super(AddNormalizingFlow, self).__init__(obsdim, outdim, **kwargs)

        self.K = 12
        self.flow_params = nn.Linear(self.encoder_dims,
                                     self.K * (self.latent_dim * 2 + 1))

        torch.nn.init.xavier_uniform_(self.flow_params.weight)

        self.flow = NormalizingFlow(K=self.K, D=self.latent_dim)
Пример #4
0
def make_nf(shared_blocks, n_blocks, base_dist):
    blocks = [] + shared_blocks
    for _ in range(n_blocks):
        blocks += [StructuredAffineFlow(2), PReLUFlow(2), BatchNormFlow(2)]
    blocks += [StructuredAffineFlow(2)]
    #      blocks += [AffineLUFlow(2), PReLUFlow(2), BatchNormFlow(2)]
    #  blocks += [AffineLUFlow(2)]

    return NormalizingFlow(
        *blocks,
        base_dist=base_dist,
    )
Пример #5
0
idx_2 = np.logical_and(X0[:, 0] >= 0, X0[:, 1] >= 0)
colors[idx_2] = 2
idx_3 = np.logical_and(X0[:, 0] < 0, X0[:, 1] >= 0)
colors[idx_3] = 3

# %%
plt.scatter(X0[:, 0], X0[:, 1], s=5, c=colors)

# %%
blocks = sum(
    [[StructuredAffineFlow(2), PReLUFlow(2)] for _ in range(5)] + [[StructuredAffineFlow(2)]],
[])

# %%
flow = NormalizingFlow( 
    *blocks,
    base_dist=base_dist,
)

opt = optim.Adam(flow.parameters(), lr=5e-4)

# %%
count_parameters(flow)

# %%
writer = SummaryWriter(f"/workspace/sandbox/tensorboard_logs/{now_str()}")

best_loss = torch.Tensor([float("+inf")])

attempts = 0

for it in trange(int(1e5)):
Пример #6
0
# %%
from normalizing_flows import NormalizingFlow
from normalizing_flows.flows import CouplingLayerFlow, BatchNormFlow

from thesis_utils import now_str, count_parameters, figure2tensor

# %%
base_dist = distrib.Normal(loc=torch.zeros(2), scale=torch.ones(2))

# %%
mask = torch.arange(2) % 2
blocks = [CouplingLayerFlow(dim=2, hidden_size=3, n_hidden=2, mask=mask)]

true_flow = NormalizingFlow(
    *blocks,
    base_dist=base_dist,
)

# %%
true_flow[0].s[0]._parameters

# %%
true_flow[0].s[0]._parameters["weight"].data = torch.Tensor([[1], [1], [1]])
true_flow[0].s[2]._parameters["weight"].data = torch.Tensor([[1, 0, 0],
                                                             [0, 1, 0],
                                                             [0, 0, 1]])
true_flow[0].s[0]._parameters["bias"].data = torch.Tensor([0, 0, 0])
true_flow[0].s[2]._parameters["bias"].data = torch.Tensor([0, 0, 0])

true_flow[0].t[0]._parameters["weight"].data = torch.Tensor([[-1], [1], [0.5]])
true_flow[0].t[2]._parameters["weight"].data = torch.Tensor([[100, 0, 0],
Пример #7
0
# %%
f1.forward(X)

# %%
#blocks = sum(
#    [[AffineLUFlow(2), PReLUFlow(2), BatchNormFlow(2)] for _ in range(5)] +
#    [[AffineLUFlow(2)]],
#[])

blocks = sum(
    [[StructuredAffineFlow(2),
      PReLUFlow(2), BatchNormFlow(2)]
     for _ in range(5)] + [[StructuredAffineFlow(2)]], [])

flow = NormalizingFlow(
    *blocks,
    base_dist=base_dist,
)

opt = optim.Adam(flow.parameters(), lr=1e-2)

# %%
count_parameters(flow)

# %%
n_epochs = 2000
bs = 512
clip_grad = 1e6

# %%
writer = SummaryWriter(f"../tensorboard_logs/{now_str()}")
Пример #8
0
colors[idx_1] = 1
idx_2 = np.logical_and(X0[:, 0] >= 0, X0[:, 1] >= 0)
colors[idx_2] = 2
idx_3 = np.logical_and(X0[:, 0] < 0, X0[:, 1] >= 0)
colors[idx_3] = 3

# %%
plt.scatter(X0[:, 0], X0[:, 1], s=5, c=colors)

# %%
blocks = sum(
    [[StructuredAffineFlow(2), PReLUFlow(2)] for _ in range(5)] + [[StructuredAffineFlow(2)]],
[])

flow = NormalizingFlow( 
    *blocks,
    base_dist=base_dist,
)

opt = optim.Adam(flow.parameters(), lr=2e-3)

# %%
count_parameters(flow)

# %%
n_epochs = 50000
bs = 512

# %%
writer = SummaryWriter(f"./tensorboard_logs/{now_str()}")

best_loss = torch.Tensor([float("+inf")])
Пример #9
0
    target_density = pot_1
elif args.POTENTIAL == 'POT_2':
    target_density = pot_2
elif args.POTENTIAL == 'POT_3':
    target_density = pot_3
elif args.POTENTIAL == 'POT_4':
    target_density = pot_4

else:
    raise ValueError("Invalid potential function option passed")

plot_pot_func(target_density)
plt.savefig(pjoin(OUT_DIR, 'target_density.png'))
plt.close()

model = NormalizingFlow(2, args.N_FLOWS)

# RMSprop is what they used in renzende et al
opt = torch.optim.RMSprop(params=model.parameters(),
                          lr=args.LR,
                          momentum=args.MOMENTUM)

scheduler = ReduceLROnPlateau(opt, 'min', patience=1000)
losses = []

for iter_ in range(args.N_ITERS):
    if iter_ % 100 == 0:
        print("Iteration {}".format(iter_))

    samples = Variable(random_normal_samples(args.BATCH_SIZE))
Пример #10
0
# %%
X0 = base_dist.sample((1000, )).numpy()

# %%
colors = np.zeros(len(X0))

colors[X0 <= 0] = 0
colors[X0 > 0] = 1

# %%
plt.scatter(X0, torch.rand((1000, )), s=5, c=colors)

# %%
flow = NormalizingFlow(dim=1,
                       blocks=([StructuredAffineFlow, PReLUFlow] * 5 +
                               [StructuredAffineFlow]),
                       base_density=base_dist,
                       flow_length=1)

opt = optim.Adam(flow.parameters(), lr=1e-3)

# %%
count_parameters(flow)

# %%
n_epochs = 10000
bs = 512

# %%
xx = torch.Tensor(np.arange(0.01, 1.0, 0.01))
Пример #11
0
# %%
def get_meshes(cur_z, density, grid_side=1000, dim=2):
    mesh = cur_z.reshape([grid_side, grid_side, dim]).transpose(2, 0, 1)
    xx = mesh[0]
    yy = mesh[1]
    zz = density.numpy().reshape([grid_side, grid_side])
    
    return xx, yy, zz


# %%
blocks = [PReLUFlow(2)]

flow = NormalizingFlow( 
    *blocks,
    base_dist=base_dist,
)

# %%
from mpl_toolkits.axes_grid1 import make_axes_locatable

# %%
x = np.linspace(-5, 5, 1000)
z = np.array(np.meshgrid(x, x)).transpose(1, 2, 0)
z = np.reshape(z, [z.shape[0] * z.shape[1], -1])

with torch.no_grad():
    densities = flow.base_dist.log_prob(torch.Tensor(z)).sum(dim=1).exp().numpy()

mesh = z.reshape([1000, 1000, 2]).transpose(2, 0, 1)
xx = mesh[0]