Exemple #1
0
def test_trainer_updates():
    n_components = 1
    n_params = 2
    seed = 42
    svi = True

    m = Gauss(dim=n_params)
    p = dd.Gaussian(m=np.zeros((n_params, )), S=np.eye(n_params))
    s = ds.Identity()
    g = dg.Default(model=m, prior=p, summary=s)

    nn = NeuralNet(
        n_components=n_components,
        n_hiddens=[10],
        n_inputs=n_params,
        n_outputs=n_params,
        seed=seed,
        svi=svi)
    loss = -tt.mean(nn.lprobs)

    trn_inputs = [nn.params, nn.stats]
    trn_data = g.gen(100)  # params, stats
    trn_data = tuple(x.astype(dtype) for x in trn_data)

    t = Trainer(network=nn, loss=loss, trn_data=trn_data, trn_inputs=trn_inputs)

    # single update
    outputs = t.make_update(*trn_data)

    # training
    outputs = t.train(100, 50)
Exemple #2
0
def test_IndependentJoint_uniform_rejection():
    # check that proposed samples are correctly rejected when using a
    # IndependentJoint prior with some child distributions uniform. We used a
    # Gaussian proposal to generate some samples that need to be rejected.
    N = 1000
    B1 = [-1.0, 1.0]
    B2 = [-2.0, 2.0]
    u1 = dd.Uniform(B1[0], B1[1])
    u2 = dd.Uniform(B2[0], B2[1])
    prior = dd.IndependentJoint([u1, u2])

    m = [0., 0.]
    S = [[
        2.,
        0.,
    ], [
        0.,
        2.,
    ]]
    proposal = dd.Gaussian(m=m, S=S)

    model = Gauss(dim=2)

    s = ds.Identity()

    g = dg.Default(model=model, prior=prior, summary=s)
    g.proposal = proposal

    params, stats = g.gen(N, verbose=False)
    assert (params.min(axis=0) >= np.array([B1[0], B2[0]])).all() and \
        (params.min(axis=0) <= np.array([B1[1], B2[1]])).all(), \
        "rejection failed"
Exemple #3
0
def prior(true_params,
          prior_uniform=True,
          prior_extent=False,
          prior_log=False,
          seed=None):
    """Prior"""
    if not prior_extent:
        range_lower = param_transform(prior_log, 0.5 * true_params)
        range_upper = param_transform(prior_log, 1.5 * true_params)
    else:
        range_lower = param_transform(
            prior_log, np.array([.5, 1e-4, 1e-4, 1e-4, 50., 40., 1e-4, 35.]))
        range_upper = param_transform(
            prior_log, np.array([80., 15., .6, .6, 3000., 90., .15, 100.]))

        range_lower = range_lower[0:len(true_params)]
        range_upper = range_upper[0:len(true_params)]

    if prior_uniform:
        prior_min = range_lower
        prior_max = range_upper
        return dd.Uniform(lower=prior_min, upper=prior_max, seed=seed)
    else:
        prior_mn = param_transform(prior_log, true_params)
        prior_cov = np.diag((range_upper - range_lower)**2) / 12
        return dd.Gaussian(m=prior_mn, S=prior_cov, seed=seed)
Exemple #4
0
    def gen_single(self, param):
        # See BaseSimulator for docstring
        param = np.asarray(param).reshape(-1)
        assert param.ndim == 1
        assert param.shape[0] == self.dim_param

        sample = dd.Gaussian(m=param,
                             S=self.noise_cov,
                             seed=self.gen_newseed()).gen(1)

        return {'data': sample.reshape(-1)}
Exemple #5
0
def test_gaussian_3d():
    N = 50000
    m = [1., 3., 0.]
    S = [[8., 2., 1.], [2., 3., 2.], [1., 2., 3.]]
    dist = dd.Gaussian(m=m, S=S, seed=seed)
    samples = dist.gen(N)
    logprobs = dist.eval(samples)

    assert samples.shape == (N, 3)
    assert logprobs.shape == (N, )
    assert np.allclose(np.mean(samples, axis=0), m, atol=0.1)
    assert np.allclose(np.cov(samples, rowvar=False), S, atol=0.1)
Exemple #6
0
def test_gaussian_1d():
    N = 50000
    m = [1.]
    S = [[3.]]
    dist = dd.Gaussian(m=m, S=S, seed=seed)
    samples = dist.gen(N)
    logprobs = dist.eval(samples)

    assert samples.shape == (N, 1)
    assert logprobs.shape == (N,)
    assert np.isclose(np.mean(samples).reshape(-1), m, atol=0.1)
    assert np.isclose(
        np.cov(samples, rowvar=False).reshape(-1, 1), S, atol=0.1)
Exemple #7
0
def test_mpgen(n_samples=1000, n_params=2, n_cores=4, seed=500):
    p = dd.Gaussian(m=np.zeros((n_params, )), S=np.eye(n_params), seed=seed)
    s = ds.Identity(seed=seed + 1)

    mlist = [Gauss(dim=n_params, seed=seed + 2 + i) for i in range(n_cores)]
    g = dg.MPGenerator(models=mlist,
                       prior=p,
                       summary=s,
                       seed=seed + 2 + n_cores)
    params, stats = g.gen(n_samples, verbose=False)

    # make sure the different models are providing different outputs
    assert np.unique(params.size) == params.size
    assert np.unique(stats.size) == stats.size
Exemple #8
0
def test_gauss_shape():
    for n_params in range(1, 3):
        m = Gauss(dim=n_params)
        p = dd.Gaussian(m=np.zeros((n_params, )), S=np.eye(n_params))
        s = ds.Identity()

        g = dg.Default(model=m, prior=p, summary=s)

        n_samples = 100
        params, stats = g.gen(n_samples)

        n_summary = n_params
        assert params.shape == (n_samples, n_params)
        assert stats.shape == (n_samples, n_summary)
Exemple #9
0
def get_maprf_prior_01(params_ls, seed=None, no_transform=False):
    ## prior over simulation parameters
    prior = collections.OrderedDict()

    if no_transform:
        lims = np.array([[-1.5, -1.1, .001,         0.01,          .001, 0.01, 0.01, -.999, -.999],
                         [ 1.5,  1.1, .999*np.pi, 2.49,   1.999*np.pi, 1.99, 3.99, .999,   .999]]).T
        p = dd.Uniform(lower=lims[:,0], upper=lims[:,1])
        return p, prior

    ## prior over simulation parameters
    if 'bias' in params_ls['glm']:
        prior['bias'] = {'mu' : np.array([-0.57]), 'sigma' : np.array([np.sqrt(1.63)]) }
    if 'gain' in params_ls['kernel']['s']:
#        prior['A'] = {'mu' : np.zeros(1), 'sigma' : 2 * np.ones(1) }
        prior['log_A'] = {'mu' : np.zeros(1), 'sigma' : np.ones(1) / 2 }  
    if 'phase' in params_ls['kernel']['s']:
        prior['logit_φ']  = {'mu' : np.array([0]), 'sigma' : np.array([1.9]) }
    if 'freq' in params_ls['kernel']['s']:
        prior['log_f']  = {'mu' : np.zeros(1), 'sigma' : np.ones(1) / 2 }
    if 'angle' in params_ls['kernel']['s']:
        prior['logit_θ']  = {'mu' : np.zeros(1), 'sigma' :   np.array([1.78]) }
    if 'ratio' in params_ls['kernel']['s']:
        prior['log_γ']  = {'mu' : np.zeros(1), 'sigma' : np.ones(1) / 2}
    if 'width' in params_ls['kernel']['s']:
        prior['log_b']  = {'mu' : np.zeros(1), 'sigma' : np.ones(1) / 2}
    if 'xo' in params_ls['kernel']['l']:
        prior['logit_xo'] = {'mu' : np.array([0.]), 'sigma' : np.array([1.78]) }
        #prior['xo'] = {'mu' : np.array([0.]), 'sigma' : np.array([1./np.sqrt(4)]) }
    if 'yo' in params_ls['kernel']['l']:
        prior['logit_yo'] = {'mu' : np.array([0.]), 'sigma' : np.array([1.78]) }
        #prior['yo'] = {'mu' : np.array([0.]), 'sigma' : np.array([1./np.sqrt(4)]) }
    L = np.diag(np.concatenate([prior[i]['sigma'] for i in list(prior.keys())]))
    if 'value' in params_ls['kernel']['t']:
        ax_t = m.dt * np.arange(1,len_kt+1)
        Λ =  np.diag(ax_t / 0.075 * np.exp(1 - ax_t / 0.075))
        D = np.eye(ax_t.shape[0]) - np.eye(ax_t.shape[0], k=-1)
        F = np.dot(D, D.T)
        Σ = np.dot(Λ, np.linalg.inv(F).dot(Λ))
        prior['kt'] = {'mu': np.zeros_like(ax_t), 'sigma': np.linalg.inv(D).dot(Λ)}
        L = np.block([[L, np.zeros((L.shape[0], ax_t.size))],
                      [np.zeros((ax_t.size, L.shape[1])), prior['kt']['sigma']]])
    mu  = np.concatenate([prior[i][ 'mu'  ] for i in prior.keys()])
    p = dd.Gaussian(m=mu, S=L.T.dot(L), seed=seed)

    return p, prior
Exemple #10
0
def init_all_gaussian(n_params=2,
                      seed=42,
                      inferenceobj=None,
                      **inf_setup_opts):
    model = Gauss(dim=n_params, seed=seed)
    prior = dd.Gaussian(m=np.zeros((n_params, )),
                        S=np.eye(n_params),
                        seed=seed + 1)
    s = ds.Identity(seed=seed + 2)
    g = dg.Default(model=model, prior=prior, summary=s, seed=seed + 3)
    obs = np.zeros((1, n_params))  # reseed generator etc. (?)

    res = inferenceobj(g, obs=obs, seed=seed + 4, **inf_setup_opts)
    res.reset(seed=seed + 4)

    m_true, S_true = simplegaussprod(obs, model.noise_cov, prior.m, prior.S)
    return res, m_true, S_true
Exemple #11
0
    def predict(self, *args, **kwargs):
        p = super().predict(*args, **kwargs)

        if self.round > 0 and self.proposal_used[-1] in ['gaussian', 'mog']:
            assert self.network.density == 'mog' and isinstance(p, dd.MoG)
            P_offset = np.eye(p.ndim) * self.Ptol
            # add the prior precision to each posterior component if needed
            if self.add_prior_precision and isinstance(self.generator.prior,
                                                       dd.Gaussian):
                P_offset += self.generator.prior.P
            p = dd.MoG(a=p.a,
                       xs=[
                           dd.Gaussian(m=x.m, P=x.P + P_offset, seed=x.seed)
                           for x in p.xs
                       ])

        return p
def smoothing_prior(n_params=10, seed=None):
    """Prior"""
    M = n_params-1

    # Smoothing prior on h; N(0, 1) on b0. Smoothness encouraged by
    # penalyzing 2nd order differences of filter elements
    D = np.diag(np.ones(M)) - np.diag(np.ones(M-1), -1)
    F = np.dot(D, D) + np.diag(1.0 * np.arange(M)/(M))**0.5  # Binv is block diagonal
    Binv = np.zeros(shape=(M+1,M+1))
    Binv[0,0] = 0.5    # offset (b0)
    Binv[1:,1:] = np.dot(F.T, F) # filter (h)

    # Prior params
    prior_mn = np.zeros((n_params, ))
    prior_prec = Binv

    return dd.Gaussian(m=prior_mn, P=prior_prec, seed=seed)
Exemple #13
0
def test_basic_inference(n_params=2, seed=42):
    m = Gauss(dim=n_params, seed=seed)
    p = dd.Gaussian(m=np.zeros((n_params, )), S=np.eye(n_params), seed=seed)
    s = ds.Identity()
    g = dg.Default(model=m, prior=p, summary=s)

    # set up inference
    res = infer.Basic(g, seed=seed)

    # run with N samples
    out = res.run(1000)

    # check result
    posterior = res.predict(np.array([0., 0.]).reshape(1, -1))
    assert np.allclose(posterior.xs[0].S,
                       np.array([[0.1, 0.0], [0.0, 0.1]]),
                       atol=0.05)
    assert np.allclose(posterior.xs[0].m, np.array([0.0, 0.0]), atol=0.05)
Exemple #14
0
def test_rng_repeatability():
    mu = np.atleast_1d([0.0])
    S = np.atleast_2d(1.0)

    # distributions
    pG = dd.Gaussian(m=mu, S=S)
    check_repeatability_dist(pG)

    pMoG = dd.MoG(a=np.array([0.25, 0.75]), ms=[mu, mu], Ss=[S, S])
    check_repeatability_dist(pMoG)

    # simulators
    mG = sims.Gauss()
    check_repeatability_sim(mG, np.zeros(mG.dim_param).reshape(-1, 1))

    mMoG = sims.GaussMixture()
    check_repeatability_sim(mMoG, np.zeros(mMoG.dim_param).reshape(-1, 1))

    # generators
    g = gen.Default(model=mMoG, prior=pMoG, summary=Identity())
    check_repeatability_gen(g)

    # inference methods
    # we're going to create each one with a different deepcopy of g to make
    # sure thre are are no side effects e.g. changes to the proposal
    x0 = g.gen(1, verbose=False)[1]
    inf_opts = dict(obs=x0,
                    n_components=2,
                    n_hiddens=[5, 5],
                    verbose=False,
                    pilot_samples=0)

    yB_nosvi = inf.Basic(deepcopy(g), svi=False, **inf_opts)
    check_repeatability_infer(yB_nosvi)

    yB_svi = inf.Basic(deepcopy(g), svi=True, **inf_opts)
    check_repeatability_infer(yB_svi)

    # skip CDELFI for now since it might crash if we don't use the prior
    #yC = inf.CDELFI(deepcopy(g), **inf_opts)
    #check_repeatability_infer(yC)

    yS = inf.SNPE(deepcopy(g), prior_mixin=0.5, **inf_opts)
    check_repeatability_infer(yS)
Exemple #15
0
def prior(true_params, seed=None, prior_log=False, prior_uniform=False):
    """Prior"""
    range_lower = param_transform(prior_log ,0.5*true_params)
    range_upper = param_transform(prior_log, 1.5*true_params)

    range_lower = range_lower[0:len(true_params)]
    range_upper = range_upper[0:len(true_params)]

    if prior_uniform:
        prior_min = range_lower
        prior_max = range_upper

        return dd.Uniform(lower=prior_min, upper=prior_max,
                               seed=seed)
    else:
        prior_mn = param_transform(prior_log,true_params)
        prior_cov = np.diag((range_upper - range_lower)**2)/12

        return dd.Gaussian(m=prior_mn, S=prior_cov, seed=seed)
Exemple #16
0
def dont_test_remotegen_slurm(n_samples=500,
                              n_params=2,
                              seed=66,
                              save_every=200,
                              hostname=None,
                              username=None,
                              clusters=None,
                              remote_python_executable=None,
                              remote_work_path=None):
    assert type(hostname) is str and type(
        username) is str, "hostname and username must be provided"
    assert type(
        clusters
    ) is str, "cluster(s) must be specified as a (comma-delimited) string"
    '''this test is currently disabled because we don't have slurm running on travis right now. it works fine
    if it's run locally on a machine with key-based ssh-access to a SLURM cluster, as of 17.09.2019'''

    p = dd.Gaussian(m=np.zeros((n_params, )), S=np.eye(n_params), seed=seed)

    simulator_kwargs = dict(dim=2)

    slurm_options = {
        'clusters': clusters,
        'time': '0:10:00',
        'ntasks-per-node': 2,
        'nodes': 2
    }

    g = dg.RemoteGenerator(simulator_class=Gauss,
                           prior=p,
                           summary_class=ds.Identity,
                           hostname=hostname,
                           username=username,
                           simulator_kwargs=simulator_kwargs,
                           use_slurm=True,
                           remote_python_executable=remote_python_executable,
                           remote_work_path=remote_work_path,
                           slurm_options=slurm_options,
                           save_every=save_every,
                           seed=seed + 2)
    params, stats = g.gen(n_samples, verbose=False)
    return params, stats
Exemple #17
0
def test_basic_inference_inputsamples(n_params=2, seed=42, n_pilot=1000):
    model = Gauss(dim=n_params, seed=seed)
    prior = dd.Gaussian(m=np.zeros((n_params, )),
                        S=np.eye(n_params),
                        seed=seed + 1)
    s = ds.Identity(seed=seed + 2)
    g = dg.Default(model=model, prior=prior, summary=s, seed=seed + 3)
    obs = np.zeros((1, n_params))  # reseed generator etc. (?)
    m_true, S_true = simplegaussprod(obs, model.noise_cov, prior.m, prior.S)

    params, stats = g.gen(n_pilot)
    pilot_samples = (params, stats)

    res = infer.Basic(g, obs=obs, seed=seed + 4, pilot_samples=pilot_samples)
    res.reset(seed=seed + 4)

    out = res.run(n_train=1000)
    posterior = res.predict(res.obs.reshape(1, -1))

    check_gaussian_posterior(posterior, m_true, S_true)
Exemple #18
0
def test_IndependentJoint_marginals():
    N = 1000
    m = np.array([1., 3., 0.])
    S = [[8., 2., 1.], [2., 3., 2.], [1., 2., 3.]]
    gs = [dd.Gaussian(m=m + i, S=S) for i in [-1, 0, 1]]
    dist = dd.IndependentJoint(gs)
    samples = dist.gen(N)
    log_probs = dist.eval(samples, log=True)
    jjs = [np.arange(3 * i, 3 * (i + 1)) for i in range(3)]
    log_marginals = [dist.eval(samples[:, jj], ii=jj) for jj in jjs]
    assert np.isclose(log_probs, np.vstack(log_marginals).sum(axis=0)).all()

    log_submarginal_1 = dist.dists[0].eval(samples[:, [1]], ii=[1])
    log_submarginal_4 = dist.dists[1].eval(samples[:, [4]], ii=[1])
    log_submarginal_6_7_8 = dist.dists[2].eval(samples[:, [6, 7, 8]])
    log_submarginal_1_4_6_7_8 = dist.eval(samples[:, [1, 4, 6, 7, 8]],
                                          ii=[1, 4, 6, 7, 8])
    assert np.isclose(
        log_submarginal_1_4_6_7_8,
        log_submarginal_1 + log_submarginal_4 + log_submarginal_6_7_8).all()
Exemple #19
0
def test_snpe_inference(n_params=2, seed=42):
    m = Gauss(dim=n_params, seed=seed)
    p = dd.Gaussian(m=np.zeros((n_params, )), S=np.eye(n_params), seed=seed)
    s = ds.Identity()
    g = dg.Default(model=m, prior=p, summary=s)

    # observation
    _, obs = g.gen(1)

    # set up inference
    res = infer.SNPE(g, obs=obs)

    # run with N samples
    out = res.run(n_train=1000, n_rounds=1)

    # check result
    posterior = res.predict(np.array([0., 0.]).reshape(1, -1))
    assert np.allclose(posterior.xs[0].S,
                       np.array([[0.1, 0.0], [0.0, 0.1]]),
                       atol=0.05)
    assert np.allclose(posterior.xs[0].m, np.array([0.0, 0.0]), atol=0.05)
Exemple #20
0
    def gen_single(self, param):
        # See BaseSimulator for docstring
        param = np.asarray(param).reshape(-1)
        assert param.ndim == 1
        assert param.shape[0] == self.dim_param

        q_moving = dd.Gaussian(m=param,
                               S=self.noise_cov,
                               seed=self.gen_newseed())
        q_distractors = dd.MoG(a=self.a,
                               ms=self.ms,
                               Ss=self.Ss,
                               seed=self.gen_newseed())

        samples = []
        for _ in range(self.n_samples):
            if np.random.rand() < self.p_true:
                samples.append(q_moving.gen(1))
            else:
                samples.append(q_distractors.gen(1))

        return {'data': np.concatenate(samples, axis=0)}
Exemple #21
0
def test_remotegen(n_samples=1000, n_params=2, seed=66, run_diagnostics=False):
    """
    test the RemoteGenerator by using the local machine to ssh into itself.
    For this test to succeed, an ssh private key will need to be added to the
    ssh agent, and the corresponding public key added to authorized_keys

    NOTE: This test will fail on travis unless eval $(ssh-agent -s) is run in the main script. Running it here using
    os.system seems to have no effect.
    """
    p = dd.Gaussian(m=np.zeros((n_params, )), S=np.eye(n_params), seed=seed)

    simulator_kwargs = dict(dim=2)

    hostname = '127.0.0.1'
    username = getpass.getuser()

    # in a real-world scenario, we would have already manually authenticated
    # the host. what we're doing here is a big security risk, but for localhost
    # it's (probably?) ok
    os.system('cp ~/.ssh/known_hosts ~/.ssh/known_hosts_backup')
    os.system('cp ~/.ssh/authorized_keys ~/.ssh/authorized_keys_backup')
    os.system('ssh-keyscan -H {0} >> ~/.ssh/known_hosts'.format(hostname))
    # generate a key-pair to use on localhost
    os.system('ssh-keygen -b 2048 -t rsa -f ~/.ssh/test_remotegen -q -N ""')
    os.system(
        'ssh-add ~/.ssh/test_remotegen')  # add private key for client side
    os.system('cat ~/.ssh/test_remotegen.pub >> ~/.ssh/authorized_keys')

    if run_diagnostics:
        # run some diagnostics and print results to stderr
        sshdir = os.path.expanduser('~/.ssh/')
        sys.stderr.write(
            subprocess.run(['ssh-add', '-l'],
                           stdout=subprocess.PIPE).stdout.decode() + '\n\n')
        sys.stderr.write(
            subprocess.run(['ls', sshdir],
                           stdout=subprocess.PIPE).stdout.decode() + '\n\n')
        sys.stderr.write(
            subprocess.run(
                ['cat', os.path.join(sshdir, 'authorized_keys')],
                stdout=subprocess.PIPE).stdout.decode() + '\n\n')
        sys.stderr.write(
            subprocess.run(['cat', os.path.join(sshdir, 'known_hosts')],
                           stdout=subprocess.PIPE).stdout.decode() + '\n\n')
        sys.stderr.write(
            subprocess.run(['ls', '-ld', sshdir],
                           stdout=subprocess.PIPE).stdout.decode() + '\n\n')
        sys.stderr.write(
            subprocess.run(['ls', '-l', sshdir],
                           stdout=subprocess.PIPE).stdout.decode() + '\n\n')

    try:
        g = dg.RemoteGenerator(simulator_class=Gauss,
                               prior=p,
                               summary_class=ds.Identity,
                               hostname=hostname,
                               username=username,
                               simulator_kwargs=simulator_kwargs,
                               use_slurm=False,
                               remote_python_executable=sys.executable,
                               seed=seed + 2)
        params, stats = g.gen(n_samples, verbose=False)
        success = True
    except Exception as e:
        success = False
        err = e

    # restore ssh to previous state etc.
    os.system('ssh-add -d ~/.ssh/test_remotegen')
    os.system('mv ~/.ssh/known_hosts_backup ~/.ssh/known_hosts')
    os.system('mv ~/.ssh/authorized_keys_backup ~/.ssh/authorized_keys')
    os.system('rm ~/.ssh/test_remotegen*')

    if not success:
        raise err

    # make sure the different models are providing different outputs
    assert np.unique(params.size) == params.size
    assert np.unique(stats.size) == stats.size
Exemple #22
0
    def run_MoG(self,
                n_train=100,
                epochs=100,
                minibatch=50,
                n_atoms=None,
                moo=None,
                train_on_all=False,
                round_cl=1,
                stop_on_nan=False,
                monitor=None,
                verbose=False,
                print_each_epoch=False,
                reuse_prior_samples=True,
                patience=20,
                monitor_every=None,
                **kwargs):

        # simulate data
        self.set_proposal(project_to_gaussian=False)
        assert isinstance(self.generator.proposal, dd.MoG)
        prop = self.generator.proposal.ztrans(self.params_mean,
                                              self.params_std)

        trn_data, n_train_round = self.gen(n_train)
        trn_data = (*trn_data, *MoG_prop_APT_training_vars(
            prop, n_train_round, prop.n_components))

        self.trn_datasets.append(trn_data)

        if train_on_all:
            prev_datasets = []
            for i, d in enumerate(self.trn_datasets):
                if self.proposal_used[i] == 'mog':
                    prev_datasets.append(d)
                elif self.proposal_used == 'prior' and reuse_prior_samples:
                    prior = self.generator.prior
                    if not isinstance(prior, dd.Uniform):
                        prior = prior.ztrans(self.params_mean, self.params_std)
                    d = (*d, *MoG_prop_APT_training_vars(prior, n_train_round))
                    prev_datasets.append(d)
                elif self.proposal_used[i] == 'gaussian':
                    params, stats, prop_m, prop_P = d
                    if np.diff(prop_m, axis=0).any() or np.diff(prop_P,
                                                                axis=0).any():
                        continue  # reusing samples with proposals that changed within a round is not yet supported
                    prop = dd.Gaussian(m=prop_m[0], P=prop_P[0])
                    d = (params, stats,
                         *MoG_prop_APT_training_vars(prop, n_train_round))
                    prev_datasets.append(d)
                else:  # can't re-use samples from this proposal
                    continue

            trn_data = combine_trn_datasets(prev_datasets)
            n_train_round = trn_data[0].shape[0]

        self.loss, trn_inputs = self.define_loss(n=n_train_round,
                                                 round_cl=round_cl,
                                                 proposal='mog')

        t = Trainer(self.network,
                    self.loss,
                    trn_data=trn_data,
                    trn_inputs=trn_inputs,
                    seed=self.gen_newseed(),
                    monitor=self.monitor_dict_from_names(monitor),
                    **kwargs)

        log = t.train(epochs=self.epochs_round(epochs),
                      minibatch=minibatch,
                      verbose=verbose,
                      print_each_epoch=print_each_epoch,
                      stop_on_nan=stop_on_nan,
                      patience=patience,
                      monitor_every=monitor_every)

        return log, trn_data