예제 #1
0
파일: APT.py 프로젝트: cunningham-lab/epi
    def run_prior(self,
                  n_train=100,
                  epochs=100,
                  minibatch=50,
                  n_atoms=None,
                  moo=None,
                  train_on_all=False,
                  round_cl=1,
                  stop_on_nan=False,
                  monitor=None,
                  verbose=False,
                  print_each_epoch=False,
                  patience=20,
                  monitor_every=None,
                  reuse_prior_samples=True,
                  **kwargs):

        # simulate data
        self.generator.proposal = self.generator.prior
        trn_data, n_train_round = self.gen(n_train)
        self.trn_datasets.append(trn_data)

        if train_on_all and reuse_prior_samples:
            prior_datasets = [
                d for i, d in enumerate(self.trn_datasets)
                if self.proposal_used[i] == 'prior'
            ]
            trn_data = combine_trn_datasets(prior_datasets)
            n_train_round = trn_data[0].shape[0]

        # train network
        self.loss, trn_inputs = self.define_loss(n=n_train_round,
                                                 round_cl=round_cl,
                                                 proposal='prior')
        t = Trainer(self.network,
                    self.loss,
                    trn_data=trn_data,
                    trn_inputs=trn_inputs,
                    seed=self.gen_newseed(),
                    monitor=self.monitor_dict_from_names(monitor),
                    **kwargs)
        log = t.train(epochs=self.epochs_round(epochs),
                      minibatch=minibatch,
                      verbose=verbose,
                      print_each_epoch=print_each_epoch,
                      stop_on_nan=stop_on_nan,
                      patience=patience,
                      monitor_every=monitor_every)

        return log, trn_data
예제 #2
0
파일: APT.py 프로젝트: stjordanis/apt_icml
    def run_atomic(self,
                   n_train=100,
                   epochs=100,
                   minibatch=50,
                   n_atoms=10,
                   moo='resample',
                   train_on_all=False,
                   reuse_prior_samples=True,
                   combined_loss=False,
                   round_cl=1,
                   stop_on_nan=False,
                   monitor=None,
                   verbose=False,
                   **kwargs):

        # activetrainer doesn't de-norm params before evaluating the prior
        assert np.all(self.params_mean == 0.0) \
               and np.all(self.params_std == 1.0), "prior_norm+atomic not OK"

        assert minibatch > 1, "minimum minibatch size 2 for atomic proposals"
        if n_atoms is None:
            n_atoms = minibatch - 1 if theano.config.device.startswith(
                'cuda') else np.minimum(minibatch - 1, 9)
        assert n_atoms < minibatch, "Minibatch too small for this many atoms"
        # simulate data
        self.set_proposal()
        trn_data, n_train_round = self.gen(n_train)
        self.trn_datasets.append(trn_data)  # don't store prior_masks

        if train_on_all:
            if reuse_prior_samples:
                trn_data = combine_trn_datasets(self.trn_datasets,
                                                max_inputs=2)
            else:
                trn_data = combine_trn_datasets([
                    td for td, pu in zip(self.trn_datasets, self.proposal_used)
                    if pu != 'prior'
                ])
            if combined_loss:
                prior_masks = \
                    [np.ones(td[0].shape[0], dtype) * (pu == 'prior')
                     for td, pu in zip(self.trn_datasets, self.proposal_used)]
                trn_data = (*trn_data, np.concatenate(prior_masks))
            n_train_round = trn_data[0].shape[0]

        # train network
        self.loss, trn_inputs = self.define_loss(n=n_train_round,
                                                 round_cl=round_cl,
                                                 proposal='atomic',
                                                 combined_loss=combined_loss)

        t = ActiveTrainer(self.network,
                          self.loss,
                          trn_data=trn_data,
                          trn_inputs=trn_inputs,
                          seed=self.gen_newseed(),
                          monitor=self.monitor_dict_from_names(monitor),
                          generator=self.generator,
                          n_atoms=n_atoms,
                          moo=moo,
                          obs=(self.obs - self.stats_mean) / self.stats_std,
                          **kwargs)

        log = t.train(epochs=self.epochs_round(epochs),
                      minibatch=minibatch,
                      stop_on_nan=stop_on_nan,
                      verbose=verbose,
                      strict_batch_size=True)

        return log, trn_data
예제 #3
0
파일: APT.py 프로젝트: stjordanis/apt_icml
    def run_gaussian(self,
                     n_train=100,
                     epochs=100,
                     minibatch=50,
                     n_atoms=None,
                     moo=None,
                     train_on_all=False,
                     round_cl=1,
                     stop_on_nan=False,
                     monitor=None,
                     verbose=False,
                     reuse_prior_samples=True,
                     **kwargs):

        # simulate data
        self.set_proposal(project_to_gaussian=True)
        prop = self.generator.proposal
        assert isinstance(prop, dd.Gaussian)
        trn_data, n_train_round = self.gen(n_train)

        # here we're just repeating the same fixed proposal, though we
        # could also introduce some variety if we wanted.
        prop_m = np.expand_dims(prop.m, 0).repeat(n_train_round, axis=0)
        prop_P = np.expand_dims(prop.P, 0).repeat(n_train_round, axis=0)
        trn_data = (*trn_data, prop_m, prop_P)
        self.trn_datasets.append(trn_data)

        if train_on_all:
            prev_datasets = []
            for i, d in enumerate(self.trn_datasets):
                if self.proposal_used[i] == 'gaussian':
                    prev_datasets.append(d)
                    continue
                elif self.proposal_used[
                        i] != 'prior' or not reuse_prior_samples:
                    continue
                # prior samples. the Gauss loss will reduce to the prior loss
                if isinstance(self.generator.prior, dd.Gaussian):
                    prop_m = self.generator.prior.mean
                    prop_P = self.generator.prior.P
                elif isinstance(self.generator.prior, dd.Uniform):
                    # model a uniform as an zero-precision Gaussian:
                    prop_m = np.zeros(self.generator.prior.ndim, dtype)
                    prop_P = np.zeros(
                        (self.generator.prior.ndim, self.generator.prior.ndim),
                        dtype)
                else:  # can't reuse prior samples unless prior is uniform or Gaussian
                    continue
                prop_m = np.expand_dims(prop_m, 0).repeat(d[0].shape[0],
                                                          axis=0)
                prop_P = np.expand_dims(prop_P, 0).repeat(d[0].shape[0],
                                                          axis=0)
                prev_datasets.append((*d, prop_m, prop_P))

            trn_data = combine_trn_datasets(prev_datasets)
            n_train_round = trn_data[0].shape[0]

        # train network
        self.loss, trn_inputs = self.define_loss(n=n_train_round,
                                                 round_cl=round_cl,
                                                 proposal='gaussian')
        t = Trainer(self.network,
                    self.loss,
                    trn_data=trn_data,
                    trn_inputs=trn_inputs,
                    seed=self.gen_newseed(),
                    monitor=self.monitor_dict_from_names(monitor),
                    **kwargs)

        log = t.train(epochs=self.epochs_round(epochs),
                      minibatch=minibatch,
                      verbose=verbose,
                      stop_on_nan=stop_on_nan)

        return log, trn_data
예제 #4
0
파일: APT.py 프로젝트: cunningham-lab/epi
    def run_MoG(self,
                n_train=100,
                epochs=100,
                minibatch=50,
                n_atoms=None,
                moo=None,
                train_on_all=False,
                round_cl=1,
                stop_on_nan=False,
                monitor=None,
                verbose=False,
                print_each_epoch=False,
                reuse_prior_samples=True,
                patience=20,
                monitor_every=None,
                **kwargs):

        # simulate data
        self.set_proposal(project_to_gaussian=False)
        assert isinstance(self.generator.proposal, dd.MoG)
        prop = self.generator.proposal.ztrans(self.params_mean,
                                              self.params_std)

        trn_data, n_train_round = self.gen(n_train)
        trn_data = (*trn_data, *MoG_prop_APT_training_vars(
            prop, n_train_round, prop.n_components))

        self.trn_datasets.append(trn_data)

        if train_on_all:
            prev_datasets = []
            for i, d in enumerate(self.trn_datasets):
                if self.proposal_used[i] == 'mog':
                    prev_datasets.append(d)
                elif self.proposal_used == 'prior' and reuse_prior_samples:
                    prior = self.generator.prior
                    if not isinstance(prior, dd.Uniform):
                        prior = prior.ztrans(self.params_mean, self.params_std)
                    d = (*d, *MoG_prop_APT_training_vars(prior, n_train_round))
                    prev_datasets.append(d)
                elif self.proposal_used[i] == 'gaussian':
                    params, stats, prop_m, prop_P = d
                    if np.diff(prop_m, axis=0).any() or np.diff(prop_P,
                                                                axis=0).any():
                        continue  # reusing samples with proposals that changed within a round is not yet supported
                    prop = dd.Gaussian(m=prop_m[0], P=prop_P[0])
                    d = (params, stats,
                         *MoG_prop_APT_training_vars(prop, n_train_round))
                    prev_datasets.append(d)
                else:  # can't re-use samples from this proposal
                    continue

            trn_data = combine_trn_datasets(prev_datasets)
            n_train_round = trn_data[0].shape[0]

        self.loss, trn_inputs = self.define_loss(n=n_train_round,
                                                 round_cl=round_cl,
                                                 proposal='mog')

        t = Trainer(self.network,
                    self.loss,
                    trn_data=trn_data,
                    trn_inputs=trn_inputs,
                    seed=self.gen_newseed(),
                    monitor=self.monitor_dict_from_names(monitor),
                    **kwargs)

        log = t.train(epochs=self.epochs_round(epochs),
                      minibatch=minibatch,
                      verbose=verbose,
                      print_each_epoch=print_each_epoch,
                      stop_on_nan=stop_on_nan,
                      patience=patience,
                      monitor_every=monitor_every)

        return log, trn_data