def offline_learning():
    """Loads the replay buffer and trains on it."""
    perf_file = os.path.join(SAVEDIR, 'offline_learning_perf.log')
    perf = perf_stats.LoggingPerfStats('deep1 offline learning', perf_file)

    replay = replay_buffer.FileReadableReplayBuffer(REPLAY_FOLDER, perf=perf)
    try:
        print(f'loaded {len(replay)} experiences for replay...')
        if not os.path.exists(MODELFILE):
            _init_model()

        network = Deep1ModelTrain.load(MODELFILE)
        teacher = MyTeacher(FFTeacher())

        train_pwl = MyPWL(replay, Deep1ModelEval.load(EVAL_MODELFILE), teacher)
        test_pwl = train_pwl

        def update_target(ctx: tnr.GenericTrainingContext, hint: str):
            ctx.logger.info('swapping target network, hint=%s', hint)
            network.save(MODELFILE, exist_ok=True)

            new_target = Deep1ModelToEval(network.fc_layers)
            for _ in range(3):
                train_pwl.mark()
                for _ in range(0, 1024, ctx.batch_size):
                    train_pwl.fill(ctx.points, ctx.labels)
                    teacher.classify_many(new_target, ctx.points,
                                          ctx.labels.unsqueeze(1))
                new_target.learning_to_current()
                train_pwl.reset()

            new_target = new_target.to_evaluative()
            new_target.save(EVAL_MODELFILE, exist_ok=True)

            train_pwl.target_model = new_target

        trainer = tnr.GenericTrainer(
            train_pwl=train_pwl,
            test_pwl=test_pwl,
            teacher=teacher,
            batch_size=32,
            learning_rate=0.0001,
            optimizer=torch.optim.Adam(
                [p for p in network.parameters() if p.requires_grad],
                lr=0.0001),
            criterion=torch.nn.MSELoss())
        (trainer.reg(tnr.EpochsTracker()).reg(tnr.EpochsStopper(100)).reg(
            tnr.InfOrNANDetecter()).reg(tnr.InfOrNANStopper()).reg(
                tnr.DecayTracker()).reg(tnr.DecayStopper(1)).reg(
                    tnr.OnEpochCaller.create_every(update_target, skip=CUTOFF))
         # smaller cutoffs require more bootstrapping
         .reg(tnr.DecayOnPlateau()))
        res = trainer.train(network,
                            target_dtype=torch.float32,
                            point_dtype=torch.float32,
                            perf=perf)
        if res['inf_or_nan']:
            print('training failed! inf or nan!')
    finally:
        replay.close()
def main():
    """Meant to be invoked for this runner"""
    words = mwords.load_custom(
        'data/commonwords/google-10000-english-no-swears.txt').subset(250)
    ssp = ussp.UniformSSP(words=words.words, char_delay=64)

    network = ss1.EncoderDecoder(input_dim=menc.INPUT_DIM,
                                 encoding_dim=64,
                                 context_dim=32,
                                 decoding_dim=64,
                                 output_dim=menc.OUTPUT_DIM,
                                 encoding_layers=1,
                                 decoding_layers=1)

    teacher = ss1.EncoderDecoderTeacher(menc.stop_failer, 30)

    trainer = stnr.SSPGenericTrainer(
        train_ssp=ssp,
        test_ssp=ssp,
        teacher=teacher,
        batch_size=10,
        learning_rate=0.003,
        optimizers=[
            torch.optim.Adam(
                [p for p in network.parameters() if p.requires_grad], lr=1)
        ],
        criterion=torch.nn.MSELoss())

    trained_model_dir = os.path.join(SAVEDIR, 'trained_models')
    if os.path.exists(trained_model_dir):
        shared.filetools.deldir(trained_model_dir)

    (trainer.reg(tnr.EpochsTracker(verbose=False)).reg(
        tnr.EpochProgress(5)).reg(tnr.DecayTracker()).reg(
            tnr.DecayOnPlateau(patience=15, verbose=False)).reg(
                tnr.DecayStopper(5)).reg(
                    tnr.LRMultiplicativeDecayer(reset_state=True)).reg(
                        tnr.OnEpochCaller.create_every(
                            tnr.save_model(trained_model_dir),
                            skip=50,
                            suppress_on_inf_or_nan=False)).reg(
                                mtnr.AccuracyTracker(5, 1000, True)))

    trainer.train(network)
    print('finished')

    _eval(ssp, teacher, network)
def train_on(network, teacher, wordlist, num_words, thisdir, patience):
    """Trains a network with the given settings"""
    thiswords = wordlist.first(num_words)
    thiswords.save(os.path.join(thisdir, 'words.txt'), True)

    ssp = ussp.UniformSSP(words=thiswords.words, char_delay=64)
    trainer = stnr.SSPGenericTrainer(
        train_ssp=ssp,
        test_ssp=ssp,
        teacher=teacher,
        batch_size=1,
        learning_rate=0.003,
        optimizers=[
            torch.optim.Adam(
                [p for p in network.parameters() if p.requires_grad], lr=1)
        ],
        criterion=torch.nn.SmoothL1Loss())

    trained_model_dir = os.path.join(thisdir, 'trained_models')
    if os.path.exists(trained_model_dir):
        shared.filetools.deldir(trained_model_dir)

    (trainer.reg(tnr.EpochsTracker(verbose=False)).reg(
        tnr.EpochProgress(5, accuracy=True)).reg(tnr.DecayTracker()).reg(
            tnr.DecayOnPlateau(
                patience=patience, verbose=False,
                initial_patience=5)).reg(tnr.DecayStopper(5)).reg(
                    tnr.LRMultiplicativeDecayer(reset_state=True)).reg(
                        tnr.OnEpochCaller.create_every(
                            tnr.save_model(trained_model_dir),
                            skip=1000,
                            suppress_on_inf_or_nan=False)).reg(
                                mtnr.AccuracyTracker(5,
                                                     100,
                                                     True,
                                                     verbose=False)))

    result = trainer.train(network)

    _eval(ssp, teacher, network)

    return result['accuracy']
Exemple #4
0
    def realize(self, values: typing.Dict[str, typing.Any], **sensitives):
        train_pwl, test_pwl = self.pwl_func()

        network = NaturalRNN.create(
            str(values['nonlinearity']), test_pwl.input_dim, int(values['hidden_size']), test_pwl.output_dim,
            input_weights=wi.OrthogonalWeightInitializer(float(values['inp_stddev']), 0),
            input_biases=wi.ZerosWeightInitializer(),
            hidden_weights=wi.SompolinskySmoothedFixedGainWeightInitializer(
                float(values['dt']), float(values['g'])),
            hidden_biases=wi.GaussianWeightInitializer(
                mean=0, vari=float(values['hidden_bias_vari']), normalize_dim=0),
            output_weights=wi.GaussianWeightInitializer(
                mean=0, vari=float(values['output_weight_vari']), normalize_dim=0),
            output_biases=wi.ZerosWeightInitializer()
        )

        trainer = tnr.GenericTrainer(
            train_pwl=train_pwl,
            test_pwl=test_pwl,
            teacher=RNNTeacher(recurrent_times=int(values['recurrent_times']), input_times=1),
            batch_size=int(sensitives['batch_size']),
            learning_rate=float(sensitives['learning_rate']),
            optimizer=torch.optim.RMSprop(
                [p for p in network.parameters() if p.requires_grad],
                lr=0.001, alpha=float(values['alpha'])),
            criterion=torch.nn.CrossEntropyLoss()
        )

        (trainer
         .reg(tnr.EpochsTracker())
         .reg(tnr.EpochsStopper(150))
         .reg(tnr.DecayTracker())
         .reg(tnr.DecayStopper(8))
         .reg(tnr.LRMultiplicativeDecayer(factor=values['lr_factor']))
         .reg(tnr.DecayOnPlateau())
         .reg(tnr.AccuracyTracker(5, 1000, True))
        )

        return trainer, network
def train_with_noise(vari, rep, ignoreme):  # pylint: disable=unused-argument
    """Entry point"""
    train_pwl = MNISTData.load_train().to_pwl().restrict_to(set(
        range(10))).rescale()
    test_pwl = MNISTData.load_test().to_pwl().restrict_to(set(
        range(10))).rescale()

    layers_and_nonlins = (
        (90, 'tanh'),
        (90, 'tanh'),
        (90, 'tanh'),
        (90, 'tanh'),
        (90, 'tanh'),
    )

    layers = [lyr[0] for lyr in layers_and_nonlins]
    nonlins = [lyr[1] for lyr in layers_and_nonlins]
    nonlins.append('tanh')  # output
    #layer_names = [f'{lyr[1]} (layer {idx})' for idx, lyr in enumerate(layers_and_nonlins)]
    layer_names = [
        f'Layer {idx+1}' for idx, lyr in enumerate(layers_and_nonlins)
    ]
    layer_names.insert(0, 'Input')
    layer_names.append('Output')

    network = FeedforwardLarge.create(input_dim=train_pwl.input_dim,
                                      output_dim=train_pwl.output_dim,
                                      weights=wi.GaussianWeightInitializer(
                                          mean=0, vari=0.3, normalize_dim=0),
                                      biases=wi.ZerosWeightInitializer(),
                                      layer_sizes=layers,
                                      nonlinearity=nonlins
                                      #layer_sizes=[500, 200]
                                      )

    _lr = 0.1
    trainer = tnr.GenericTrainer(
        train_pwl=train_pwl,
        test_pwl=test_pwl,
        teacher=FFTeacher(),
        batch_size=30,
        learning_rate=_lr,
        optimizer=torch.optim.SGD(
            [p for p in network.parameters() if p.requires_grad], lr=_lr
        ),  #torch.optim.Adam([p for p in network.parameters() if p.requires_grad], lr=0.003),
        criterion=mycrits.meansqerr  #torch.nn.CrossEntropyLoss()#
    )

    #pca3d_throughtrain.FRAMES_PER_TRAIN = 4
    #pca3d_throughtrain.SKIP_TRAINS = 0
    #pca3d_throughtrain.NUM_FRAME_WORKERS = 6

    dig = npmp.NPDigestor(f'TRMCN_{rep}_{vari}', 8)

    savedir = os.path.join(SAVEDIR, f'variance_{vari}', f'repeat_{rep}')

    dtt_training_dir = os.path.join(savedir, 'dtt')
    pca_training_dir = os.path.join(savedir, 'pca')
    pca3d_training_dir = os.path.join(savedir, 'pca3d')
    pr_training_dir = os.path.join(savedir, 'pr')
    svm_training_dir = os.path.join(savedir, 'svm')
    satur_training_dir = os.path.join(savedir, 'saturation')
    trained_net_dir = os.path.join(savedir, 'trained_model')
    pca_throughtrain_dir = os.path.join(savedir, 'pca_throughtrain')
    logpath = os.path.join(savedir, 'log.txt')
    (trainer.reg(tnr.EpochsTracker()).reg(tnr.EpochsStopper(0.2)).reg(
        tnr.DecayTracker()).reg(tnr.DecayStopper(5)).reg(
            tnr.LRMultiplicativeDecayer())
     #.reg(tnr.DecayOnPlateau())
     #.reg(tnr.DecayEvery(5))
     .reg(tnr.AccuracyTracker(1, 1000, True)).reg(
         tnr.WeightNoiser(
             wi.GaussianWeightInitializer(mean=0, vari=vari),
             (lambda ctx: ctx.model.layers[-1].weight.data.detach()), 'scale',
             (lambda noise: wi.GaussianWeightInitializer(0, noise.vari * 0.5)
              )))
     #.reg(tnr.OnEpochCaller.create_every(dtt.during_training_ff(dtt_training_dir, True, dig), skip=100))
     #.reg(tnr.OnEpochCaller.create_every(pca_3d.during_training(pca3d_training_dir, True, dig, plot_kwargs={'layer_names': layer_names}), start=500, skip=100))
     #.reg(tnr.OnEpochCaller.create_every(pca_ff.during_training(pca_training_dir, True, dig), skip=100))
     .reg(
         tnr.OnEpochCaller.create_every(pr.during_training_ff(
             pr_training_dir, True, dig),
                                        skip=1))
     #.reg(tnr.OnEpochCaller.create_every(svm.during_training_ff(svm_training_dir, True, dig), skip=100))
     #.reg(tnr.OnEpochCaller.create_every(satur.during_training(satur_training_dir, True, dig), skip=100))
     .reg(
         tnr.OnEpochCaller.create_every(tnr.save_model(trained_net_dir),
                                        skip=100))
     #.reg(pca3d_throughtrain.PCAThroughTrain(pca_throughtrain_dir, layer_names, True))
     .reg(tnr.OnFinishCaller(lambda *args, **kwargs: dig.join())).reg(
         tnr.CopyLogOnFinish(logpath)).reg(
             tnr.ZipDirOnFinish(dtt_training_dir)).reg(
                 tnr.ZipDirOnFinish(pca_training_dir)).reg(
                     tnr.ZipDirOnFinish(pca3d_training_dir)).reg(
                         tnr.ZipDirOnFinish(pr_training_dir)).reg(
                             tnr.ZipDirOnFinish(svm_training_dir)).reg(
                                 tnr.ZipDirOnFinish(satur_training_dir)).reg(
                                     tnr.ZipDirOnFinish(trained_net_dir)))

    trainer.train(network)
    dig.archive_raw_inputs(os.path.join(savedir, 'digestor_raw.zip'))
def main():
    """Entry point"""
    pwl = GaussianSpheresPWLP.create(epoch_size=2700,
                                     input_dim=INPUT_DIM,
                                     output_dim=OUTPUT_DIM,
                                     cube_half_side_len=2,
                                     num_clusters=10,
                                     std_dev=0.04,
                                     mean=0,
                                     min_sep=0.1)

    nets = cu.FluentShape(INPUT_DIM).verbose()
    network = FeedforwardComplex(INPUT_DIM, OUTPUT_DIM, [
        nets.linear_(90),
        nets.nonlin('isrlu'),
        nets.linear_(OUTPUT_DIM),
    ])

    trainer = tnr.GenericTrainer(
        train_pwl=pwl,
        test_pwl=pwl,
        teacher=FFTeacher(),
        batch_size=45,
        learning_rate=0.001,
        optimizer=torch.optim.Adam(
            [p for p in network.parameters() if p.requires_grad], lr=0.001),
        criterion=torch.nn.CrossEntropyLoss())

    dig = npmp.NPDigestor('train_one_complex', 16)
    #pca_3d.plot_ff(pca_ff.find_trajectory(network, pwl, 3), os.path.join(SAVEDIR, 'pca_3d_start'), True, dig3d)
    #dig3d.join()
    #exit()
    dtt_training_dir = os.path.join(SAVEDIR, 'dtt')
    pca_training_dir = os.path.join(SAVEDIR, 'pca')
    pr_training_dir = os.path.join(SAVEDIR, 'pr')
    svm_training_dir = os.path.join(SAVEDIR, 'svm')
    satur_training_dir = os.path.join(SAVEDIR, 'saturation')
    (trainer.reg(tnr.EpochsTracker()).reg(tnr.EpochsStopper(150)).reg(
        tnr.DecayTracker()).reg(tnr.DecayStopper(3)).reg(
            tnr.LRMultiplicativeDecayer()).reg(tnr.DecayOnPlateau()).reg(
                tnr.AccuracyTracker(5, 1000, True)).reg(
                    tnr.OnEpochCaller.create_every(dtt.during_training_ff(
                        dtt_training_dir, True),
                                                   skip=1000))
     #.reg(tnr.OnEpochCaller.create_every(pca_ff.during_training(pca_training_dir, True), skip=1000))
     .reg(
         tnr.OnEpochCaller.create_every(
             pr.during_training_ff(pr_training_dir, True), skip=1000)).reg(
                 tnr.OnEpochCaller.create_every(
                     svm.during_training_ff(svm_training_dir, True),
                     skip=1000)).reg(
                         tnr.OnEpochCaller.create_every(satur.during_training(
                             satur_training_dir, True),
                                                        skip=1000)).
     reg(tnr.ZipDirOnFinish(dtt_training_dir)).reg(
         tnr.ZipDirOnFinish(pca_training_dir)).reg(
             tnr.ZipDirOnFinish(pr_training_dir)).reg(
                 tnr.ZipDirOnFinish(svm_training_dir)).reg(
                     tnr.ZipDirOnFinish(satur_training_dir)))
    trainer.train(network)
    torch.save(network.state_dict(), os.path.join(SAVEDIR,
                                                  'trained_network.pt'))
Exemple #7
0
def main():
    """Entry point"""
    pwl = GaussianSpheresPWLP.create(epoch_size=2700,
                                     input_dim=INPUT_DIM,
                                     output_dim=OUTPUT_DIM,
                                     cube_half_side_len=2,
                                     num_clusters=10,
                                     std_dev=0.5,
                                     mean=0,
                                     min_sep=1,
                                     force_split=True)

    layers_and_nonlins = (
        (100, 'tanh'),
        #(100, 'linear'),
        #(25, 'linear'),
        #(90, 'tanh'),
        #(90, 'tanh'),
        #(90, 'linear'),
        #(25, 'linear'),
    )
    layers = [lyr[0] for lyr in layers_and_nonlins]
    nonlins = [lyr[1] for lyr in layers_and_nonlins]
    nonlins.append('tanh')  # output
    layer_names = [
        f'{lyr[1]} ({idx})' for idx, lyr in enumerate(layers_and_nonlins)
    ]
    layer_names.insert(0, 'input')
    layer_names.append('output')

    network = FeedforwardLarge.create(input_dim=INPUT_DIM,
                                      output_dim=OUTPUT_DIM,
                                      weights=wi.GaussianWeightInitializer(
                                          mean=0, vari=0.3, normalize_dim=1),
                                      biases=wi.ZerosWeightInitializer(),
                                      layer_sizes=layers,
                                      nonlinearity=nonlins)

    trainer = tnr.GenericTrainer(
        train_pwl=pwl,
        test_pwl=pwl,
        teacher=FFTeacher(),
        batch_size=20,
        learning_rate=0.001,
        optimizer=torch.optim.Adam(
            [p for p in network.parameters() if p.requires_grad], lr=0.001),
        criterion=mycrits.meansqerr  #torch.nn.CrossEntropyLoss()
    )

    pca3d_throughtrain.FRAMES_PER_TRAIN = 1
    pca3d_throughtrain.SKIP_TRAINS = 4
    pca3d_throughtrain.NUM_FRAME_WORKERS = 6

    dig = npmp.NPDigestor('train_one', 35)
    #pca_3d.plot_ff(pca_ff.find_trajectory(network, pwl, 3), os.path.join(SAVEDIR, 'pca_3d_start'), True,
    #               digestor=dig, frame_time=FRAME_TIME, layer_names=layer_names)
    dtt_training_dir = os.path.join(SAVEDIR, 'dtt')
    pca_training_dir = os.path.join(SAVEDIR, 'pca')
    pr_training_dir = os.path.join(SAVEDIR, 'pr')
    svm_training_dir = os.path.join(SAVEDIR, 'svm')
    satur_training_dir = os.path.join(SAVEDIR, 'saturation')
    pca_throughtrain_dir = os.path.join(SAVEDIR, 'pca_throughtrain')
    (trainer.reg(tnr.EpochsTracker()).reg(tnr.EpochsStopper(100)).reg(
        tnr.InfOrNANDetecter()).reg(tnr.DecayTracker()).reg(
            tnr.DecayStopper(8)).reg(tnr.LRMultiplicativeDecayer()).reg(
                tnr.DecayOnPlateau()).reg(tnr.AccuracyTracker(5, 1000, True))
     #.reg(tnr.WeightNoiser(
     #    wi.GaussianWeightInitializer(mean=0, vari=0.02, normalize_dim=None),
     #    lambda ctxt: ctxt.model.layers[-1].weight.data))
     #.reg(tnr.OnEpochCaller.create_every(satur.during_training(satur_training_dir, True, dig), skip=1000))
     #.reg(tnr.OnEpochCaller.create_every(dtt.during_training_ff(dtt_training_dir, True, dig), skip=1000))
     .reg(
         tnr.OnEpochCaller.create_every(pca_ff.during_training(
             pca_training_dir, True, dig),
                                        skip=1000))
     #.reg(tnr.OnEpochCaller.create_every(pr.during_training_ff(pr_training_dir, True, dig), skip=1000))
     #.reg(tnr.OnEpochCaller.create_every(svm.during_training_ff(svm_training_dir, True, dig), skip=1000))
     #.reg(pca3d_throughtrain.PCAThroughTrain(pca_throughtrain_dir, layer_names, True))
     .reg(tnr.OnFinishCaller(lambda *args, **kwargs: dig.join())).reg(
         tnr.ZipDirOnFinish(dtt_training_dir)).reg(
             tnr.ZipDirOnFinish(pca_training_dir)).reg(
                 tnr.ZipDirOnFinish(pr_training_dir)).reg(
                     tnr.ZipDirOnFinish(svm_training_dir)).reg(
                         tnr.ZipDirOnFinish(satur_training_dir)))
    trainer.train(network)
    #pca_3d.plot_ff(pca_ff.find_trajectory(network, pwl, 3), os.path.join(SAVEDIR, 'pca_3d_end'), True,
    #               digestor=dig, frame_time=FRAME_TIME, layer_names=layer_names)
    dig.archive_raw_inputs(os.path.join(SAVEDIR, 'raw_digestor.zip'))
def main():
    """Entry point"""
    train_pwl = MNISTData.load_train().to_pwl().restrict_to(set(range(10))).rescale()
    test_pwl = MNISTData.load_test().to_pwl().restrict_to(set(range(10))).rescale()
    network = NaturalRNN.create(
        'tanh', train_pwl.input_dim, 200, train_pwl.output_dim,
        input_weights=wi.OrthogonalWeightInitializer(0.03, 0),
        input_biases=wi.ZerosWeightInitializer(), #
        hidden_weights=wi.SompolinskySmoothedFixedGainWeightInitializer(0.001, 20),
        hidden_biases=wi.GaussianWeightInitializer(mean=0, vari=0.3, normalize_dim=0),
        output_weights=wi.GaussianWeightInitializer(mean=0, vari=0.3, normalize_dim=0),
        output_biases=wi.ZerosWeightInitializer()
    )

    trainer = tnr.GenericTrainer(
        train_pwl=train_pwl,
        test_pwl=test_pwl,
        teacher=RNNTeacher(recurrent_times=10, input_times=1),
        batch_size=30,
        learning_rate=0.0001,
        optimizer=torch.optim.RMSprop([p for p in network.parameters() if p.requires_grad], lr=0.0001, alpha=0.9),
        criterion=torch.nn.CrossEntropyLoss()
    )

    (trainer
     .reg(tnr.EpochsTracker())
     .reg(tnr.EpochsStopper(150))
     .reg(tnr.InfOrNANDetecter())
     .reg(tnr.InfOrNANDetecter())
     .reg(tnr.DecayTracker())
     .reg(tnr.DecayStopper(5))
     .reg(tnr.LRMultiplicativeDecayer())
     .reg(tnr.DecayOnPlateau())
     .reg(tnr.AccuracyTracker(5, 1000, True))
    )

    print('--saving pcs before training--')
    traj = pca.find_trajectory(network, train_pwl, 10, 2)
    savepath = os.path.join(SAVEDIR, 'pca_before_train')
    pca.plot_trajectory(traj, savepath, exist_ok=True)
    traj = pca.find_trajectory(network, test_pwl, 10, 2)
    savepath = os.path.join(SAVEDIR, 'pca_before_test')
    pca.plot_trajectory(traj, savepath, exist_ok=True)
    del traj

    # print('--saving distance through time before training--')
    # savepath = os.path.join(SAVEDIR, 'dtt_before_train')
    # dtt.measure_dtt(network, train_pwl, 10, savepath, verbose=True, exist_ok=True)
    # savepath = os.path.join(SAVEDIR, 'dtt_before_test')
    # dtt.measure_dtt(network, test_pwl, 10, savepath, verbose=True, exist_ok=True)


    print('--training--')
    result = trainer.train(network)
    print('--finished training--')
    print(result)

    print('--saving pcs after training--')
    traj = pca.find_trajectory(network, train_pwl, 10, 2)
    savepath = os.path.join(SAVEDIR, 'pca_after_train')
    pca.plot_trajectory(traj, savepath, exist_ok=True)
    traj = pca.find_trajectory(network, test_pwl, 10, 2)
    savepath = os.path.join(SAVEDIR, 'pca_after_test')
    pca.plot_trajectory(traj, savepath, exist_ok=True)
    del traj

    # print('--saving distance through time after training--')
    # savepath = os.path.join(SAVEDIR, 'dtt_after_train')
    # dtt.measure_dtt(network, train_pwl, 10, savepath, verbose=True, exist_ok=True)
    # savepath = os.path.join(SAVEDIR, 'dtt_after_test')
    # dtt.measure_dtt(network, test_pwl, 10, savepath, verbose=True, exist_ok=True)

    print('--saving 3d pca plots after training--')
    layer_names = ['Input']
    for i in range(1, trainer.teacher.recurrent_times + 1):
        layer_names.append(f'Timestep {i}')
    dig = npmp.NPDigestor('mnist_train_one_rnn', 2)
    nha = mutils.get_hidacts_rnn(network, train_pwl, trainer.teacher.recurrent_times)
    nha.torch()
    traj = pca_ff.to_trajectory(nha.sample_labels, nha.hid_acts, 3)
    pca_3d.plot_ff(traj, os.path.join(SAVEDIR, 'pca3d_after_train'), False, digestor=dig,
                   layer_names=layer_names)

    nha = mutils.get_hidacts_rnn(network, test_pwl, trainer.teacher.recurrent_times)
    nha.torch()
    traj = pca_ff.to_trajectory(nha.sample_labels, nha.hid_acts, 3)
    pca_3d.plot_ff(traj, os.path.join(SAVEDIR, 'pca3d_after_test'), False, digestor=dig,
                   layer_names=layer_names)

    print('--saving model--')
    torch.save(network, os.path.join(SAVEDIR, 'model.pt'))

    dig.join()
Exemple #9
0
def train_with_noise(vari, rep, pr_repeats, ignoreme):  # pylint: disable=unused-argument
    """Entry point"""
    train_pwl = GaussianSpheresPWLP.create(epoch_size=30000,
                                           input_dim=INPUT_DIM,
                                           output_dim=2,
                                           cube_half_side_len=2,
                                           num_clusters=10,
                                           std_dev=0.2,
                                           mean=0,
                                           min_sep=0.4,
                                           force_split=True)
    test_pwl = train_pwl
    nets = cu.FluentShape(INPUT_DIM).verbose()

    mywi = wi.WICombine([
        wi.RectangularEyeWeightInitializer(1),
        wi.GaussianWeightInitializer(mean=0, vari=0.3)
    ])

    network = FeedforwardComplex(INPUT_DIM, train_pwl.output_dim, [
        nets.linear_(DIM, weights_init=mywi),
        nets.nonlin('leakyrelu'),
        nets.linear_(DIM, weights_init=mywi),
        nets.nonlin('leakyrelu'),
        nets.linear_(DIM, weights_init=mywi),
        nets.nonlin('leakyrelu'),
        nets.linear_(DIM, weights_init=mywi),
        nets.nonlin('leakyrelu'),
        nets.linear_(train_pwl.output_dim),
        nets.nonlin('leakyrelu'),
    ])

    _lr = 0.01
    trainer = tnr.GenericTrainer(
        train_pwl=train_pwl,
        test_pwl=test_pwl,
        teacher=FFTeacher(),
        batch_size=20,
        learning_rate=_lr,
        optimizer=torch.optim.SGD(
            [p for p in network.parameters() if p.requires_grad], lr=_lr),
        criterion=mycrits.hubererr  #torch.nn.CrossEntropyLoss()#
    )

    #pca3d_throughtrain.FRAMES_PER_TRAIN = 4
    #pca3d_throughtrain.SKIP_TRAINS = 0
    #pca3d_throughtrain.NUM_FRAME_WORKERS = 6

    dig = npmp.NPDigestor(f'TRMCN_{rep}_{vari}', 4)

    savedir = os.path.join(SAVEDIR, f'variance_{vari}', f'repeat_{rep}')
    shared.filetools.deldir(savedir)

    dtt_training_dir = os.path.join(savedir, 'dtt')
    pca_training_dir = os.path.join(savedir, 'pca')
    pca3d_training_dir = os.path.join(savedir, 'pca3d')
    pr_training_dir = os.path.join(savedir, 'pr')
    svm_training_dir = os.path.join(savedir, 'svm')
    satur_training_dir = os.path.join(savedir, 'saturation')
    trained_net_dir = os.path.join(savedir, 'trained_model')
    pca_throughtrain_dir = os.path.join(savedir, 'pca_throughtrain')
    acts_training_dir = os.path.join(savedir, 'acts')
    logpath = os.path.join(savedir, 'log.txt')
    (trainer.reg(tnr.EpochsTracker()).reg(tnr.EpochsStopper(300)).reg(
        tnr.EpochProgress(5, hint_end_epoch=10)).reg(tnr.DecayTracker()).reg(
            tnr.DecayStopper(10)).reg(tnr.InfOrNANDetecter()).reg(
                tnr.InfOrNANStopper()).reg(
                    tnr.LRMultiplicativeDecayer(factor=0.9))
     #.reg(tnr.DecayOnPlateau(verbose=False))
     .reg(tnr.DecayEvery(1, verbose=False)).reg(
         tnr.AccuracyTracker(1,
                             1000,
                             True,
                             savepath=os.path.join(savedir, 'accuracy.json'))))

    if ALL_LAYERS_NOISED:
        tonoise = list(range(1, len(network.layers)))
    else:
        tonoise = [len(network.layers) - 2]

    noisestyle = 'add'

    def layer_fetcher(lyr):
        return lambda ctx: ctx.model.layers[lyr].action.weight.data.detach()

    noisedecayer = lambda noise: wi.GaussianWeightInitializer(
        0, noise.vari * 0.9)
    for lyr in tonoise:
        if network.layers[lyr].is_module:
            trainer.reg(
                tnr.WeightNoiser(
                    wi.GaussianWeightInitializer(mean=0, vari=vari),
                    layer_fetcher(lyr), noisestyle, noisedecayer))

    if rep < pr_repeats:
        trainer.reg(
            tnr.OnEpochCaller.create_every(pr.during_training_ff(
                pr_training_dir, True, dig),
                                           skip=100))
    (trainer
     #.reg(tnr.OnEpochCaller.create_every(dtt.during_training_ff(dtt_training_dir, True, dig), skip=100))
     #.reg(tnr.OnEpochCaller.create_every(pca_3d.during_training(pca3d_training_dir, True, dig, plot_kwargs={'layer_names': layer_names}), start=500, skip=100))
     #.reg(tnr.OnEpochCaller.create_every(pca_ff.during_training(pca_training_dir, True, dig), skip=100))
     #.reg(tnr.OnEpochCaller.create_every(pr.during_training_ff(pr_training_dir, True, dig), skip=100))
     #.reg(tnr.OnEpochCaller.create_every(svm.during_training_ff(svm_training_dir, True, dig), skip=100))
     #.reg(tnr.OnEpochCaller.create_every(satur.during_training(satur_training_dir, True, dig), skip=100))
     .reg(tnr.OnEpochCaller.create_every(measacts.during_training(acts_training_dir, dig, meta={'time': time.time(), 'noised_layers': tonoise, 'variance': vari, 'repeat': rep}), skip=100))
     .reg(tnr.OnEpochCaller.create_every(tnr.save_model(trained_net_dir), skip=100))
     #.reg(pca3d_throughtrain.PCAThroughTrain(pca_throughtrain_dir, layer_names, True))
     .reg(tnr.OnFinishCaller(lambda *args, **kwargs: dig.join()))
     .reg(tnr.CopyLogOnFinish(logpath))
     .reg(tnr.ZipDirOnFinish(dtt_training_dir))
     .reg(tnr.ZipDirOnFinish(pca_training_dir))
     .reg(tnr.ZipDirOnFinish(pca3d_training_dir))
     .reg(tnr.ZipDirOnFinish(pr_training_dir))
     .reg(tnr.ZipDirOnFinish(svm_training_dir))
     .reg(tnr.ZipDirOnFinish(satur_training_dir))
     .reg(tnr.ZipDirOnFinish(trained_net_dir))
    )

    result = trainer.train(network)
    dig.archive_raw_inputs(os.path.join(savedir, 'digestor_raw.zip'))

    if result['inf_or_nan']:
        print('[TMCN] Inf or NAN detected - repeating run')
        shared.filetools.deldir(savedir)
def main():
    """Entry point"""
    pwl = GaussianSpheresPWLP.create(epoch_size=1800,
                                     input_dim=200,
                                     output_dim=2,
                                     cube_half_side_len=2,
                                     num_clusters=60,
                                     std_dev=0.04,
                                     mean=0,
                                     min_sep=0.1)

    network = NaturalRNN.create(
        'tanh',
        pwl.input_dim,
        200,
        pwl.output_dim,
        input_weights=wi.OrthogonalWeightInitializer(0.03, 0),
        input_biases=wi.ZerosWeightInitializer(),  #
        hidden_weights=wi.SompolinskySmoothedFixedGainWeightInitializer(
            0.001, 20),
        hidden_biases=wi.GaussianWeightInitializer(mean=0,
                                                   vari=0.3,
                                                   normalize_dim=0),
        output_weights=wi.GaussianWeightInitializer(mean=0,
                                                    vari=0.3,
                                                    normalize_dim=0),
        output_biases=wi.ZerosWeightInitializer())

    trainer = tnr.GenericTrainer(
        train_pwl=pwl,
        test_pwl=pwl,
        teacher=RNNTeacher(recurrent_times=10, input_times=1),
        batch_size=30,
        learning_rate=0.001,
        optimizer=torch.optim.RMSprop(
            [p for p in network.parameters() if p.requires_grad],
            lr=0.001,
            alpha=0.9),
        criterion=torch.nn.CrossEntropyLoss())

    (trainer.reg(tnr.EpochsTracker()).reg(tnr.EpochsStopper(150)).reg(
        tnr.InfOrNANDetecter()).reg(tnr.DecayTracker()).reg(
            tnr.DecayStopper(8)).reg(tnr.LRMultiplicativeDecayer()).reg(
                tnr.DecayOnPlateau()).reg(tnr.AccuracyTracker(5, 1000, True)))

    print('--saving pcs before training--')
    traj = pca.find_trajectory(network, pwl, 10, 2)

    print('--saving distance through time before training--')
    savepath = os.path.join(SAVEDIR, 'dtt_before')
    dtt.measure_dtt(network, pwl, 10, savepath, verbose=True, exist_ok=True)

    savepath = os.path.join(SAVEDIR, 'pca_before')
    pca.plot_trajectory(traj, savepath, exist_ok=True)
    del traj

    print('--training--')
    result = trainer.train(network)
    print('--finished training--')
    print(result)
    print('--saving pcs after training--')

    print('--saving distance through time after training--')
    savepath = os.path.join(SAVEDIR, 'dtt_after')
    dtt.measure_dtt(network, pwl, 10, savepath, verbose=True, exist_ok=True)

    print('--saving pcs after training--')
    traj = pca.find_trajectory(network, pwl, 10, 2)
    savepath = os.path.join(SAVEDIR, 'pca_after')
    pca.plot_trajectory(traj, savepath, exist_ok=True)

    print('--saving pr after training')
    savepath = os.path.join(SAVEDIR, 'pr_after')
def main():
    """Entry point"""

    nets = cu.FluentShape(28 * 28).verbose()
    network = FeedforwardComplex(INPUT_DIM, OUTPUT_DIM, [
        nets.linear_(HIDDEN_DIM),
        nets.tanh(),
        nets.linear_(OUTPUT_DIM),
        nets.tanh()
    ])

    train_pwl = MNISTData.load_train().to_pwl().restrict_to(set(
        range(10))).rescale()
    test_pwl = MNISTData.load_test().to_pwl().restrict_to(set(
        range(10))).rescale()

    layer_names = ('Input', 'Hidden', 'Output')

    trainer = tnr.GenericTrainer(
        train_pwl=train_pwl,
        test_pwl=test_pwl,
        teacher=FFTeacher(),
        batch_size=45,
        learning_rate=0.001,
        optimizer=torch.optim.Adam(
            [p for p in network.parameters() if p.requires_grad], lr=0.001),
        criterion=mycrits.meansqerr  #torch.nn.CrossEntropyLoss()
    )

    dig = npmp.NPDigestor('train_one_complex', 35)

    dtt_training_dir = os.path.join(SAVEDIR, 'dtt')
    pca_training_dir = os.path.join(SAVEDIR, 'pca')
    pca3d_training_dir = os.path.join(SAVEDIR, 'pca3d')
    pr_training_dir = os.path.join(SAVEDIR, 'pr')
    svm_training_dir = os.path.join(SAVEDIR, 'svm')
    satur_training_dir = os.path.join(SAVEDIR, 'saturation')
    trained_net_dir = os.path.join(SAVEDIR, 'trained_model')
    pca_throughtrain_dir = os.path.join(SAVEDIR, 'pca_throughtrain')
    wds_training_dir = os.path.join(SAVEDIR, 'weightdeltas')
    logpath = os.path.join(SAVEDIR, 'log.txt')
    (trainer.reg(tnr.EpochsTracker()).reg(tnr.EpochsStopper(3)).reg(
        tnr.DecayTracker()).reg(tnr.DecayStopper(8)).reg(
            tnr.LRMultiplicativeDecayer()).reg(tnr.DecayOnPlateau()).
     reg(tnr.AccuracyTracker(5, 1000, True)).reg(
         tnr.WeightNoiser(
             wi.GaussianWeightInitializer(mean=0,
                                          vari=0.1),
             (lambda ctx: ctx.model.layers[0].action.weight.data.detach()),
             'scale',
             (lambda noise: wi.GaussianWeightInitializer(0, noise.vari * 0.5)
              ))).reg(
                  tnr.OnEpochCaller.create_every(dtt.during_training_ff(
                      dtt_training_dir, True, dig),
                                                 skip=100))
     #.reg(tnr.OnEpochCaller.create_every(pca_3d.during_training(pca3d_training_dir, True, dig, plot_kwargs={'layer_names': layer_names}), start=1000, skip=1000))
     .reg(
         tnr.OnEpochCaller.create_every(
             pca_ff.during_training(pca_training_dir, True, dig),
             skip=100)).reg(
                 tnr.OnEpochCaller.create_every(
                     pr.during_training_ff(pr_training_dir, True, dig),
                     skip=100)).reg(
                         tnr.OnEpochCaller.create_every(
                             svm.during_training_ff(svm_training_dir, True,
                                                    dig),
                             skip=100)).reg(
                                 tnr.OnEpochCaller.create_every(
                                     satur.during_training(
                                         satur_training_dir, True, dig),
                                     skip=100)).reg(
                                         tnr.OnEpochCaller.create_every(
                                             tnr.save_model(trained_net_dir),
                                             skip=100)).
     reg(
         wds.Binned2Norm(
             (lambda ctx: ctx.model.layers[0].action.weight.data.detach()),
             dig, wds_training_dir, 'Induced Changes in $W^{(1)}$'))
     #.reg(pca3d_throughtrain.PCAThroughTrain(pca_throughtrain_dir, layer_names, True, layer_indices=plot_layers))
     .reg(tnr.OnFinishCaller(lambda *args, **kwargs: dig.join())).reg(
         tnr.CopyLogOnFinish(logpath)).reg(
             tnr.ZipDirOnFinish(dtt_training_dir)).reg(
                 tnr.ZipDirOnFinish(pca_training_dir)).reg(
                     tnr.ZipDirOnFinish(pca3d_training_dir)).reg(
                         tnr.ZipDirOnFinish(pr_training_dir)).reg(
                             tnr.ZipDirOnFinish(svm_training_dir)).reg(
                                 tnr.ZipDirOnFinish(satur_training_dir)).reg(
                                     tnr.ZipDirOnFinish(trained_net_dir)))

    trainer.train(network)
    dig.archive_raw_inputs(os.path.join(SAVEDIR, 'digestor_raw.zip'))
Exemple #12
0
def main():
    """Entry point"""

    cu.DEFAULT_LINEAR_BIAS_INIT = wi.ZerosWeightInitializer()
    cu.DEFAULT_LINEAR_WEIGHT_INIT = wi.GaussianWeightInitializer(
        mean=0, vari=0.3, normalize_dim=0)

    nets = cu.FluentShape(32 * 32 * 3).verbose()
    network = FeedforwardComplex(INPUT_DIM, OUTPUT_DIM, [
        nets.linear_(32 * 32 * 6),
        nets.nonlin('isrlu'),
        nets.linear_(500),
        nets.nonlin('tanh'),
        nets.linear_(250),
        nets.nonlin('tanh'),
        nets.linear_(250),
        nets.nonlin('tanh'),
        nets.linear_(100),
        nets.tanh(),
        nets.linear_(100),
        nets.tanh(),
        nets.linear_(100),
        nets.tanh(),
        nets.linear_(OUTPUT_DIM),
        nets.nonlin('isrlu'),
    ])

    train_pwl = CIFARData.load_train().to_pwl().restrict_to(set(
        range(10))).rescale()
    test_pwl = CIFARData.load_test().to_pwl().restrict_to(set(
        range(10))).rescale()

    layer_names = ('input', 'FC -> 32*32*6 (ISRLU)', 'FC -> 500 (tanh)',
                   'FC -> 250 (tang)', 'FC -> 250 (tanh)', 'FC -> 100 (tanh)',
                   'FC -> 100 (tanh)', 'FC -> 100 (tanh)',
                   f'FC -> {OUTPUT_DIM} (ISRLU)')
    plot_layers = tuple(i for i in range(2, len(layer_names) - 1))
    trainer = tnr.GenericTrainer(
        train_pwl=train_pwl,
        test_pwl=test_pwl,
        teacher=FFTeacher(),
        batch_size=45,
        learning_rate=0.001,
        optimizer=torch.optim.Adam(
            [p for p in network.parameters() if p.requires_grad], lr=0.001),
        criterion=torch.nn.CrossEntropyLoss())

    pca3d_throughtrain.FRAMES_PER_TRAIN = 1
    pca3d_throughtrain.SKIP_TRAINS = 16
    pca3d_throughtrain.NUM_FRAME_WORKERS = 1

    dig = npmp.NPDigestor('train_one_complex', 5)

    dtt_training_dir = os.path.join(SAVEDIR, 'dtt')
    pca_training_dir = os.path.join(SAVEDIR, 'pca')
    pca3d_training_dir = os.path.join(SAVEDIR, 'pca3d')
    pr_training_dir = os.path.join(SAVEDIR, 'pr')
    svm_training_dir = os.path.join(SAVEDIR, 'svm')
    satur_training_dir = os.path.join(SAVEDIR, 'saturation')
    trained_net_dir = os.path.join(SAVEDIR, 'trained_model')
    pca_throughtrain_dir = os.path.join(SAVEDIR, 'pca_throughtrain')
    logpath = os.path.join(SAVEDIR, 'log.txt')
    (trainer.reg(tnr.EpochsTracker()).reg(tnr.EpochsStopper(STOP_EPOCH)).reg(
        tnr.DecayTracker()).reg(tnr.DecayStopper(8)).reg(
            tnr.EpochProgress(print_every=120, hint_end_epoch=STOP_EPOCH)).reg(
                tnr.LRMultiplicativeDecayer()).reg(
                    tnr.DecayOnPlateau(patience=3)).reg(
                        tnr.AccuracyTracker(1, 1000, True)).reg(
                            tnr.OnEpochCaller.create_every(
                                dtt.during_training_ff(dtt_training_dir, True,
                                                       dig),
                                skip=5)).reg(
                                    tnr.OnEpochCaller.create_every(
                                        pca_3d.during_training(
                                            pca3d_training_dir,
                                            True,
                                            dig,
                                            plot_kwargs={
                                                'layer_names': layer_names
                                            }),
                                        start=10,
                                        skip=100)).
     reg(
         tnr.OnEpochCaller.create_every(
             pca_ff.during_training(pca_training_dir, True, dig), skip=5)).reg(
                 tnr.OnEpochCaller.create_every(
                     pr.during_training_ff(pr_training_dir,
                                           True,
                                           dig,
                                           labels=False),
                     skip=5)).reg(
                         tnr.OnEpochCaller.create_every(
                             svm.during_training_ff(svm_training_dir, True,
                                                    dig),
                             skip=5)).reg(
                                 tnr.OnEpochCaller.create_every(
                                     satur.during_training(
                                         satur_training_dir, True, dig),
                                     skip=5)).reg(
                                         tnr.OnEpochCaller.create_every(
                                             tnr.save_model(trained_net_dir),
                                             skip=5))
     #.reg(pca3d_throughtrain.PCAThroughTrain(pca_throughtrain_dir, layer_names, True, layer_indices=plot_layers))
     .reg(tnr.OnFinishCaller(lambda *args, **kwargs: dig.join())).reg(
         tnr.ZipDirOnFinish(dtt_training_dir)).reg(
             tnr.ZipDirOnFinish(pca_training_dir)).reg(
                 tnr.ZipDirOnFinish(pca3d_training_dir)).reg(
                     tnr.ZipDirOnFinish(pr_training_dir)).reg(
                         tnr.ZipDirOnFinish(svm_training_dir)).reg(
                             tnr.ZipDirOnFinish(satur_training_dir)).reg(
                                 tnr.ZipDirOnFinish(trained_net_dir)).reg(
                                     tnr.CopyLogOnFinish(logpath)))

    trainer.train(network)
    dig.archive_raw_inputs(os.path.join(SAVEDIR, 'digestor_raw.zip'))
Exemple #13
0
def main():
    """Entry point"""

    nets = cu.FluentShape(28*28)
    network = FeedforwardComplex(
        INPUT_DIM, OUTPUT_DIM,
        [
            nets.unflatten_conv_(1, 28, 28),
            nets.conv_(5, 5, 5),
            nets.relu(),
            nets.maxpool_(2),
            nets.flatten_(invokes_callback=True),
            nets.linear_(nets.dims[0]),
            nets.tanh(),
            nets.linear_(OUTPUT_DIM),
            nets.tanh()
        ]
    )

    #breakpoint()

    train_pwl = MNISTData.load_train().to_pwl().restrict_to(set(range(10))).rescale()
    test_pwl = MNISTData.load_test().to_pwl().restrict_to(set(range(10))).rescale()

    layer_names = ('input', 'conv2d-relu', 'maxpool', 'tanh', 'output')
    plot_layers = (3,)

    trainer = tnr.GenericTrainer(
        train_pwl=train_pwl,
        test_pwl=test_pwl,
        teacher=FFTeacher(),
        batch_size=45,
        learning_rate=0.001,
        optimizer=torch.optim.Adam([p for p in network.parameters() if p.requires_grad], lr=0.001),
        criterion=torch.nn.CrossEntropyLoss()
    )

    pca3d_throughtrain.FRAMES_PER_TRAIN = 1
    pca3d_throughtrain.SKIP_TRAINS = 0
    pca3d_throughtrain.NUM_FRAME_WORKERS = 6

    dig = npmp.NPDigestor('train_one_complex', 35)

    dtt_training_dir = os.path.join(SAVEDIR, 'dtt')
    pca_training_dir = os.path.join(SAVEDIR, 'pca')
    pca3d_training_dir = os.path.join(SAVEDIR, 'pca3d')
    pr_training_dir = os.path.join(SAVEDIR, 'pr')
    svm_training_dir = os.path.join(SAVEDIR, 'svm')
    satur_training_dir = os.path.join(SAVEDIR, 'saturation')
    trained_net_dir = os.path.join(SAVEDIR, 'trained_model')
    pca_throughtrain_dir = os.path.join(SAVEDIR, 'pca_throughtrain')
    (trainer
     .reg(tnr.EpochsTracker())
     .reg(tnr.EpochsStopper(5))
     .reg(tnr.DecayTracker())
     .reg(tnr.DecayStopper(8))
     .reg(tnr.LRMultiplicativeDecayer())
     .reg(tnr.DecayOnPlateau())
     .reg(tnr.AccuracyTracker(5, 1000, True))
     .reg(tnr.OnEpochCaller.create_every(dtt.during_training_ff(dtt_training_dir, True, dig), skip=100))
     #.reg(tnr.OnEpochCaller.create_every(pca_3d.during_training(pca3d_training_dir, True, dig, plot_kwargs={'layer_names': layer_names}), skip=100))
     #.reg(tnr.OnEpochCaller.create_every(pca_ff.during_training(pca_training_dir, True, dig), skip=100))
     .reg(tnr.OnEpochCaller.create_every(pr.during_training_ff(pr_training_dir, True, dig), skip=100))
     .reg(tnr.OnEpochCaller.create_every(svm.during_training_ff(svm_training_dir, True, dig), skip=100))
     .reg(tnr.OnEpochCaller.create_every(satur.during_training(satur_training_dir, True, dig), skip=100))
     .reg(tnr.OnEpochCaller.create_every(tnr.save_model(trained_net_dir), skip=100))
     .reg(pca3d_throughtrain.PCAThroughTrain(pca_throughtrain_dir, layer_names, True, layer_indices=plot_layers))
     .reg(tnr.OnFinishCaller(lambda *args, **kwargs: dig.join()))
     .reg(tnr.ZipDirOnFinish(dtt_training_dir))
     .reg(tnr.ZipDirOnFinish(pca_training_dir))
     .reg(tnr.ZipDirOnFinish(pca3d_training_dir))
     .reg(tnr.ZipDirOnFinish(pr_training_dir))
     .reg(tnr.ZipDirOnFinish(svm_training_dir))
     .reg(tnr.ZipDirOnFinish(satur_training_dir))
     .reg(tnr.ZipDirOnFinish(trained_net_dir))
    )

    trainer.train(network)
    dig.archive_raw_inputs(os.path.join(SAVEDIR, 'digestor_raw.zip'))