def train_mog_made(n_hiddens, act_fun, n_comps, mode): assert is_data_loaded(), 'Dataset hasn\'t been loaded' model = mades.MixtureOfGaussiansMade(data.n_dims, n_hiddens, act_fun, n_comps, mode=mode) train(model, a_made) save_model(model, 'made', mode, n_hiddens, act_fun, n_comps, False)
def __init__(self, n_inputs, n_hiddens, act_fun, n_layers, n_comps, batch_norm=True, input_order='sequential', mode='sequential', input=None, rng=np.random): """ Constructor. :param n_inputs: number of inputs :param n_hiddens: list with number of hidden units for each hidden layer :param act_fun: name of activation function :param n_layers: number of layers in the flow :param n_comps: number of gaussians per conditional for the target made :param batch_norm: whether to use batch normalization between layers :param input_order: order of inputs of last made :param mode: strategy for assigning degrees to hidden nodes: can be 'random' or 'sequential' :param input: theano variable to serve as input; if None, a new variable is created """ # save input arguments self.n_inputs = n_inputs self.n_hiddens = n_hiddens self.act_fun = act_fun self.n_layers = n_layers self.n_comps = n_comps self.batch_norm = batch_norm self.mode = mode self.input = tt.matrix('x', dtype=dtype) if input is None else input self.parms = [] # maf self.maf = MaskedAutoregressiveFlow(n_inputs, n_hiddens, act_fun, n_layers, batch_norm, input_order, mode, self.input, rng) self.bns = self.maf.bns self.parms += self.maf.parms self.input_order = self.maf.input_order # mog made input_order = input_order if input_order == 'random' else self.maf.mades[-1].input_order[::-1] self.made = mades.MixtureOfGaussiansMade(n_inputs, n_hiddens, act_fun, n_comps, input_order, mode, self.maf.u, rng) self.parms += self.made.parms # log likelihoods self.L = self.made.L + self.maf.logdet_dudx self.L.name = 'L' # train objective self.trn_loss = -tt.mean(self.L) self.trn_loss.name = 'trn_loss' # theano evaluation functions, will be compiled when first needed self.eval_lprob_f = None self.eval_grad_f = None self.eval_us_f = None