def __init__(self, params, logger=None): BaseModel.__init__(self, logger) ProbabilisticModel.__init__(self) self._hp = self._default_hparams() self._hp.overwrite(params) # override defaults with config file self._hp.builder = LayerBuilderParams(self._hp.use_convs, self._hp.normalization) self.device = self._hp.device self.build_network()
def _updated_encoder_params(self): params = copy.deepcopy(self._hp) return params.overwrite(AttrDict( use_convs=True, use_skips=False, # no skip connections needed flat we are not reconstructing img_sz=self._hp.input_res, # image resolution input_nc=self._hp.input_nc, # number of input feature maps builder=LayerBuilderParams(use_convs=True, normalization=self._hp.normalization) ))
def _updated_encoder_params(self): params = copy.deepcopy(self._hp) return params.overwrite(AttrDict( use_convs=True, use_skips=False, # no skip connections needed flat we are not reconstructing img_sz=self._hp.prior_input_res, # image resolution input_nc=3*self._hp.n_input_frames, # number of input feature maps ngf=self._hp.encoder_ngf, # number of feature maps in shallowest level nz_enc=self.prior_input_size, # size of image encoder output feature builder=LayerBuilderParams(use_convs=True, normalization=self._hp.normalization) ))
def __init__(self, hp): super().__init__() self._hp = self._default_hparams().overwrite(hp) self._hp.builder = LayerBuilderParams(use_convs=False, normalization=self._hp.normalization) self._vector_enc = Predictor(self._hp, input_size=self._hp.input_dim, output_size=self._hp.nz_enc, mid_size=self._hp.nz_mid, num_layers=self._hp.n_layers, final_activation=None, spatial=False) self._image_enc = Encoder(self._updated_encoder_params()) self._head = Predictor(self._hp, input_size=2*self._hp.nz_enc, output_size=self._hp.output_dim, mid_size=self._hp.nz_mid, num_layers=2, final_activation=None, spatial=False)
def __init__(self, config): self._hp = self._default_hparams().overwrite(config) self._hp.builder = LayerBuilderParams(use_convs=False, normalization=self._hp.normalization) super().__init__()
pi = torch.tensor([0.7, 0.1, 0.1, 0.1])[None].repeat(256, 1) mu = torch.tensor([[1.0, -1.0, 0.0, 0.0], [0.0, 0.0, 1.0, -1.0]])[None].repeat(256, 1, 1).transpose(-1, -2) log_sigma = torch.zeros_like(mu) + torch.tensor(np.log(0.1)) data_dist = GMM(pi=pi, mu=mu, log_sigma=log_sigma) data = data_dist.sample().data.numpy() # set up flow model trainable_input = torch.zeros((256, 2), requires_grad=True) hp = AttrDict({ 'nz_mid': 32, 'n_processing_layers': 3, }) hp.builder = LayerBuilderParams(False, 'batch') model = torch.nn.Sequential( Predictor(hp, input_size=2, output_size=hp.nz_mid), MDN(input_size=hp.nz_mid, output_size=2, num_gaussians=4)) pydata = torch.tensor(data, dtype=torch.float32) optimizer = torch.optim.Adam(model.parameters(), lr=0.005) # train flow model for i in range(6000): optimizer.zero_grad() gmm_dist = GMM(model(trainable_input)) loss_samples = [] for _ in range(10): data_sample = data_dist.sample() gmm_sample = gmm_dist.rsample()
def __init__(self, config): # TODO automate the setup by getting params from the environment self._hp = self._default_hparams().overwrite(config) self._hp.builder = LayerBuilderParams( use_convs=False, normalization=self._hp.normalization) super().__init__()