def __init__(self, flowtype=0, dim=2, dimh=64, n=500, num_hid_layers=2, act=nn.ELU(), num_flow_layers=2, num_ds_dim=16, num_ds_layers=1, lr=0.005, betas=(0.9,0.999)): nn.Module.__init__(self) if flowtype == 0: flow = flows.IAF elif flowtype == 1: flow = lambda **kwargs:flows.IAF_DSF(num_ds_dim=num_ds_dim, num_ds_layers=num_ds_layers, **kwargs) elif flowtype == 2: flow = lambda **kwargs:flows.IAF_DDSF(num_ds_dim=num_ds_dim, num_ds_layers=num_ds_layers, **kwargs) sequels = [nn_.SequentialFlow( flow(dim=dim, hid_dim=dimh, context_dim=1, num_layers=num_hid_layers+1, activation=act, fixed_order=True), flows.FlipFlow(1)) for i in range(num_flow_layers)] + \ [flows.LinearFlow(dim, 1),] self.mdl = nn.Sequential( *sequels).cuda() self.n = n self.context = Variable(torch.FloatTensor(n, 1).zero_()) + 2.0 self.lgd = Variable(torch.FloatTensor(n).zero_()) self.zeros = Variable(torch.FloatTensor(n, 2).zero_())
def __init__(self, args): flowtype = args.flowtype dimh = args.dimh num_hid_layers = args.num_hid_layers act = nn.ELU() num_flow_layers = args.num_flow_layers num_ds_dim = args.num_ds_dim num_ds_layers = args.num_ds_layers lr = args.lr betas = (args.beta1, args.beta2) self.n = args.num_particle self.dim = args.dim self.lamb = args.lamb self.meta_sample_path = args.meta_sample_path self.meta_data_path = args.meta_data_path self.adapt_sample_path = args.adapt_sample_path self.adapt_data_path = args.adapt_data_path #two types of neural inverse autoregressive flow if flowtype == 0: flow = lambda **kwargs: flows.IAF_DSF( num_ds_dim=num_ds_dim, num_ds_layers=num_ds_layers, **kwargs) elif flowtype == 1: flow = lambda **kwargs: flows.IAF_DDSF( num_ds_dim=num_ds_dim, num_ds_layers=num_ds_layers, **kwargs) sequels = [nn_.SequentialFlow( flow(dim=self.dim, hid_dim=dimh, context_dim=1, num_layers=num_hid_layers+1, activation=act, fixed_order=True), flows.FlipFlow(1)) for i in range(num_flow_layers)] + \ [flows.LinearFlow(self.dim, 1),] self.mdl = nn.Sequential(*sequels) self.optim = optim.Adam(self.mdl.parameters(), lr=lr, betas=betas) self.context = Variable(torch.FloatTensor(self.n, 1).zero_()) + 2.0 self.lgd = Variable(torch.FloatTensor(self.n).zero_()) self.zeros = Variable(torch.FloatTensor(self.n, 2).zero_())
def __init__(self, args, p): self.args = args self.__dict__.update(args.__dict__) self.p = p dim = p dimc = 1 flowtype = args.flow_type dimh = args.dimh num_flow_layers = args.num_flow_layers num_ds_dim = args.num_ds_dim num_ds_layers = args.num_ds_layers fixed_order = args.fixed_order act = nn.ELU() if flowtype == 'affine': flow = flows.IAF elif flowtype == 'dsf': flow = lambda **kwargs: flows.IAF_DSF( num_ds_dim=num_ds_dim, num_ds_layers=num_ds_layers, **kwargs) elif flowtype == 'ddsf': flow = lambda **kwargs: flows.IAF_DDSF( num_ds_dim=num_ds_dim, num_ds_layers=num_ds_layers, **kwargs) sequels = [nn_.SequentialFlow( flow(dim=dim, hid_dim=dimh, context_dim=dimc, num_layers=args.num_hid_layers+1, activation=act, fixed_order=fixed_order), flows.FlipFlow(1)) for i in range(num_flow_layers)] + \ [flows.LinearFlow(dim, dimc),] self.flow = nn.Sequential(*sequels) if self.cuda: self.flow = self.flow.cuda()
def __init__(self, args, p): self.args = args self.__dict__.update(args.__dict__) self.p = p dim = p dimc = 1 dimh = p flowtype = args.flow_family num_flow_layers = args.n_flows num_ds_dim = p num_ds_layers = 2 fixed_order = False act = nn.ELU() if flowtype == 'iaf': flow = flows.IAF elif flowtype == 'dsf': flow = lambda **kwargs: flows.IAF_DSF( num_ds_dim=num_ds_dim, num_ds_layers=num_ds_layers, **kwargs) elif flowtype == 'ddsf': flow = lambda **kwargs: flows.IAF_DDSF( num_ds_dim=num_ds_dim, num_ds_layers=num_ds_layers, **kwargs) sequels = [nn_.SequentialFlow( flow(dim=dim, hid_dim=dimh, context_dim=dimc, num_layers=2+1, activation=act, fixed_order=fixed_order), flows.FlipFlow(1)) for i in range(num_flow_layers)] + \ [flows.LinearFlow(dim, dimc),] self.flow = nn.Sequential(*sequels)
def __init__(self, args): self.args = args self.__dict__.update(args.__dict__) dimz = args.dimz dimc = args.dimc dimh = args.dimh flowtype = args.flowtype num_flow_layers = args.num_flow_layers num_ds_dim = args.num_ds_dim num_ds_layers = args.num_ds_layers act = nn.ELU() if flowtype == 'affine': flow = flows.IAF elif flowtype == 'dsf': flow = lambda **kwargs: flows.IAF_DSF( num_ds_dim=num_ds_dim, num_ds_layers=num_ds_layers, **kwargs) elif flowtype == 'ddsf': flow = lambda **kwargs: flows.IAF_DDSF( num_ds_dim=num_ds_dim, num_ds_layers=num_ds_layers, **kwargs) self.enc = nn.Sequential( nn_.ResConv2d(1, 16, 3, 2, padding=1, activation=act), act, nn_.ResConv2d(16, 16, 3, 1, padding=1, activation=act), act, nn_.ResConv2d(16, 32, 3, 2, padding=1, activation=act), act, nn_.ResConv2d(32, 32, 3, 1, padding=1, activation=act), act, nn_.ResConv2d(32, 32, 3, 2, padding=1, activation=act), act, nn_.Reshape((-1, 32 * 4 * 4)), nn_.ResLinear(32 * 4 * 4, dimc), act) self.inf = nn.Sequential( flows.LinearFlow(dimz, dimc), *[ nn_.SequentialFlow( flow(dim=dimz, hid_dim=dimh, context_dim=dimc, num_layers=2, activation=act), flows.FlipFlow(1)) for i in range(num_flow_layers) ]) self.dec = nn.Sequential( nn_.ResLinear(dimz, dimc), act, nn_.ResLinear(dimc, 32 * 4 * 4), act, nn_.Reshape((-1, 32, 4, 4)), nn.Upsample(scale_factor=2, mode='bilinear'), nn_.ResConv2d(32, 32, 3, 1, padding=1, activation=act), act, nn_.ResConv2d(32, 32, 3, 1, padding=1, activation=act), act, nn_.slicer[:, :, :-1, :-1], nn.Upsample(scale_factor=2, mode='bilinear'), nn_.ResConv2d(32, 16, 3, 1, padding=1, activation=act), act, nn_.ResConv2d(16, 16, 3, 1, padding=1, activation=act), act, nn.Upsample(scale_factor=2, mode='bilinear'), nn_.ResConv2d(16, 1, 3, 1, padding=1, activation=act), ) self.dec[-1].conv_01.bias.data.normal_(-3, 0.0001) if self.cuda: self.enc = self.enc.cuda() self.inf = self.inf.cuda() self.dec = self.dec.cuda() amsgrad = bool(args.amsgrad) polyak = args.polyak self.optim = optim.Adam(chain(self.enc.parameters(), self.inf.parameters(), self.dec.parameters()), lr=args.lr, betas=(args.beta1, args.beta2), amsgrad=amsgrad, polyak=polyak)