Ejemplo n.º 1
0
def test_main():
    C = deepcopy(CONFIGS)
    with tempdir(C['traingen']['logs_dir']), \
        tempdir(C['traingen']['best_models_dir']):
        C['traingen']['epochs'] = 2
        C['traingen']['final_fig_dir'] = C['traingen']['best_models_dir']
        _test_main(C)
Ejemplo n.º 2
0
def test_predict():
    t0 = time()
    C = deepcopy(CONFIGS)
    with tempdir(C['traingen']['logs_dir']), \
        tempdir(C['traingen']['best_models_dir']):
        C['traingen'].update(
            dict(
                eval_fn='predict',
                key_metric='f1_score',
                val_metrics=('loss', 'tnr', 'tpr'),
                plot_first_pane_max_vals=1,
                metric_printskip_configs={'val': 'f1_score'},
                dynamic_predict_threshold_min_max=(.35, .95),
                class_weights={
                    0: 1,
                    1: 5
                },
                iter_verbosity=2,
                callbacks={
                    'val_end': [
                        infer_train_hist_cb, binary_preds_per_iteration_cb,
                        binary_preds_distribution_cb
                    ]
                },
            ))
        tg = init_session(C, model=model)
        tg.train()
        _test_load(tg, C)
    print("\nTime elapsed: {:.3f}".format(time() - t0))
Ejemplo n.º 3
0
def test_recursive_batch():
    C = deepcopy(CONFIGS)
    with tempdir(C['traingen']['logs_dir']), \
        tempdir(C['traingen']['best_models_dir']):
        for name in ('traingen', 'datagen', 'val_datagen'):
            C[name]['batch_size'] = 256
        C['model']['batch_shape'] = (256, width, height, channels)
        _test_main(C, new_model=True)
Ejemplo n.º 4
0
def test_predict():
    C = deepcopy(CONFIGS)
    with tempdir(C['traingen']['logs_dir']), \
        tempdir(C['traingen']['best_models_dir']):
        C['traingen']['eval_fn'] = 'predict'
        # tests misc._validate_traingen_configs
        C['traingen']['val_metrics'] = ['loss', 'acc']
        _test_main(C)
Ejemplo n.º 5
0
def test_main():
    t0 = time()
    C = deepcopy(CONFIGS)
    with tempdir(C['traingen']['logs_dir']), tempdir(
            C['traingen']['best_models_dir']):
        tg = init_session(C, model=model)
        tg.train()
        _test_load(tg, C)
    print("\nTime elapsed: {:.3f}".format(time() - t0))
Ejemplo n.º 6
0
def test_predict():
    t0 = time()
    C = deepcopy(CONFIGS)
    with tempdir(C['traingen']['logs_dir']), \
        tempdir(C['traingen']['best_models_dir']):
        C['traingen']['eval_fn'] = 'predict'
        tg = init_session(C, model=autoencoder)
        tg.train()
        _test_load(tg, C)
    print("\nTime elapsed: {:.3f}".format(time() - t0))
Ejemplo n.º 7
0
def test_start_increments():
    C = deepcopy(CONFIGS)
    with tempdir(C['traingen']['logs_dir']), tempdir(
            C['traingen']['best_models_dir']):
        C['datagen']['preprocessor_configs'] = dict(window_size=4,
                                                    start_increments=[0, 2])
        C['traingen']['epochs'] = 2
        tg = init_session(C, model=model)
        tg.train()
        _test_load(tg, C)
Ejemplo n.º 8
0
def test_main():
    t0 = time()
    C = deepcopy(CONFIGS)
    with tempdir(C['traingen']['logs_dir']), \
        tempdir(C['traingen']['best_models_dir']):
        C['datagen']['labels_path'] = None
        C['val_datagen']['labels_path'] = None
        tg = init_session(C, model=autoencoder)
        tg.train()
        _test_load(tg, C)
    print("\nTime elapsed: {:.3f}".format(time() - t0))
Ejemplo n.º 9
0
    def _test_eval_mode():
        with tempdir(C['traingen']['logs_dir']), \
            tempdir(C['traingen']['best_models_dir']):
            C['traingen']['custom_metrics'] = {'f.5_score': f05_score}
            C['traingen']['val_metrics'] = ['*', 'f.5_score']
            C['traingen']['eval_fn'] = 'evaluate'
            tg = init_session(C, model=classifier)

            # should be dropped in _validate_traingen_configs:_validate_metrics
            assert 'f.5 score' not in tg.val_metrics
            tg.train()
Ejemplo n.º 10
0
    def _test_predict_mode():
        with tempdir(C['traingen']['logs_dir']), \
            tempdir(C['traingen']['best_models_dir']):
            C['traingen']['custom_metrics'] = {'f.5_score': f05_score}
            C['traingen']['val_metrics'] = ['*', 'f.5_score']
            C['traingen']['eval_fn'] = 'predict'
            tg = init_session(C, model=classifier)

            # model metrics should be inserted at wildcard
            assert tg.val_metrics[-1] == 'f.5_score'
            tg.train()
Ejemplo n.º 11
0
def test_main(monkeypatch):
    t0 = time()
    C = deepcopy(CONFIGS)
    with tempdir(C['traingen']['logs_dir']), \
        tempdir(C['traingen']['best_models_dir']):
        C['traingen'].update(
            dict(
                val_freq={'batch': 20},
                plot_history_freq={'val': 2},
                unique_checkpoint_freq={'val': 2},
                optimizer_save_configs={'include': ['updates', 'crowbar']},
                max_one_best_save=True,
                max_checkpoints=3,
            ))
        tg = init_session(C, model=classifier)
        tg.train()
        _test_load(tg, C)

        C['traingen'].update(
            dict(
                val_freq={'iter': 20},
                temp_checkpoint_freq={'val': 3},
                optimizer_save_configs={'exclude': ['iterations']},
                optimizer_load_configs={'include': ['momentum', 'momentam']},
                eval_fn='predict',
                key_metric='catco_custom',
                custom_metrics={
                    'catco_custom': metrics.categorical_crossentropy
                },
            ))
        tg = init_session(C, model=classifier)
        with tempdir() as savedir:
            _log_init_state(tg, savedir=savedir, verbose=1)
        tg.train()
        _test_load(tg, C)

        tg = init_session(C, model=classifier)
        tg.train()
        tg._train_loop_done = True
        tg.train()

        tg.plot_configs['0']['vhlines']['v'] = 'invalid_vlines'
        pass_on_error(tg.get_history_fig)
        tg.clear_cache(reset_val_flags=True)
        tg._should_do({}, forced=True)
        tg.get_last_log('report', best=False)
        tg.get_last_log('report', best=True)
        tg.get_last_log('init_state')
        tg.val_epoch

        monkeypatch.setattr('builtins.input', lambda x: 'y')
        tg.destroy(confirm=False)

    print("\nTime elapsed: {:.3f}".format(time() - t0))
Ejemplo n.º 12
0
    def _test_get_optimizer_state():
        C = deepcopy(CONFIGS)
        with tempdir(C['traingen']['logs_dir']), \
            tempdir(C['traingen']['best_models_dir']):
            tg = init_session(C, model=classifier)

            tg.optimizer_save_configs = {'exclude': ['updates']}
            tg._get_optimizer_state()

            tg.optimizer_save_configs = {'include': ['updates']}
            tg._get_optimizer_state()
Ejemplo n.º 13
0
def test_visuals():
    def _layer_hists(model):
        pass_on_error(layer_hists, model, '*', mode='gradients')
        pass_on_error(layer_hists, model, '*', mode='outputs')
        pass_on_error(layer_hists, model, '*', mode='skeletons')

    C = deepcopy(CONFIGS)
    with tempdir(C['traingen']['logs_dir']), \
        tempdir(C['traingen']['best_models_dir']):
        tg = init_session(C, model=autoencoder)
        model = tg.model
        _layer_hists(model)
Ejemplo n.º 14
0
def test_weighted_slices():
    t0 = time()
    C = deepcopy(CONFIGS)
    C['traingen'].update(
        dict(eval_fn='predict',
             loss_weighted_slices_range=(.5, 1.5),
             pred_weighted_slices_range=(.5, 1.5)))
    with tempdir(C['traingen']['logs_dir']), \
        tempdir(C['traingen']['best_models_dir']):
        tg = init_session(C, model=model)
        tg.train()
    print("\nTime elapsed: {:.3f}".format(time() - t0))
Ejemplo n.º 15
0
def test_model_save_weights():
    C = deepcopy(CONFIGS)
    with tempdir(C['traingen']['logs_dir']), \
        tempdir(C['traingen']['best_models_dir']):
        tg = init_session(C, model=classifier)

        if 'model' in tg.saveskip_list:
            tg.saveskip_list.pop(tg.saveskip_list.index('model'))
        for name in ('model:weights', 'optimizer_state'):
            if name not in tg.saveskip_list:
                tg.saveskip_list.append(name)

        tg.train()
        _validate_save_load(tg, C)
Ejemplo n.º 16
0
def test_reset_validation():
    C = deepcopy(CONFIGS)
    with tempdir(C['traingen']['logs_dir']), \
        tempdir(C['traingen']['best_models_dir']):
        tg = init_session(C, model=classifier)
        vdg = tg.val_datagen
        val_set_nums_original = vdg.set_nums_original.copy()

        tg.train()

        tg.reset_validation()
        assert vdg.set_nums_original == val_set_nums_original
        assert vdg.set_nums_to_process == val_set_nums_original
        tg.validate(restart=True)
Ejemplo n.º 17
0
def test_tf_graph(MockClass1, MockClass2):
    """Call `_fn_graph` within `_make_grads_fn`."""
    if not (TF_KERAS and TF_EAGER):
        return

    MockClass1.executing_eagerly = lambda: False
    MockClass2.return_value = lambda *x: x
    C = deepcopy(CONFIGS)

    with tempdir(C['traingen']['logs_dir']), \
        tempdir(C['traingen']['best_models_dir']):
        tg = init_session(C, model=classifier)
        grads_fn = introspection._make_gradients_fn(tg.model, 0, 'outputs')
        grads_fn(0, 0, 0)
Ejemplo n.º 18
0
def test_main():
    C = deepcopy(AE_CONFIGS)
    C['model'      ].update({'loss': mLe,
                             'batch_shape': (128, 24, 24, 1)})
    C['datagen'    ].update({'data_loader': numpy_loader,
                             'preprocessor': RandCropPreprocessor(size=24)})
    C['val_datagen'].update({'data_loader': numpy_loader,
                             'preprocessor': RandCropPreprocessor(size=24)})
    C['traingen']['custom_metrics'] = {'mLe': mean_L_error}

    with tempdir(C['traingen']['logs_dir']), \
        tempdir(C['traingen']['best_models_dir']):
        tg = init_session(C)
        tg.train()
        _do_test_load(tg, C, init_session)
Ejemplo n.º 19
0
def test_main():
    t0 = time()
    C = deepcopy(CONFIGS)
    with tempdir(C['traingen']['logs_dir']), \
        tempdir(C['traingen']['best_models_dir']), \
            tempdir(logger_savedir):
        callbacks = [_make_logger_cb(), _make_2Dviz_cb(), *layer_hists_cbs,
                      seed_setter]
        C['traingen']['callbacks'] = callbacks

        tg = init_session(C)
        tg.train()
        _test_load(tg, C)
        # set_seeds(reset_graph=True)

    print("\nTime elapsed: {:.3f}".format(time() - t0))
Ejemplo n.º 20
0
 def _test_no_supported_file_ext():
     C = deepcopy(DATAGEN_CFG)
     with tempdir() as dirpath:
         plt.plot([0, 1])
         plt.gcf().savefig(os.path.join(dirpath, "img.png"))
         C['data_path'] = dirpath
         pass_on_error(DataGenerator, **C)
Ejemplo n.º 21
0
    def _test_numpy_data_to_numpy_sets(datadir):
        with tempdir(datadir):
            data = np.random.randn(161, 2)
            labels = np.random.randint(0, 2, (161, ))
            preprocessing.numpy_data_to_numpy_sets(data,
                                                   labels,
                                                   datadir,
                                                   batch_size=32,
                                                   shuffle=True,
                                                   data_basename='ex',
                                                   oversample_remainder=True)

            paths = [
                str(x) for x in Path(datadir).iterdir() if x.suffix == '.npy'
            ]
            assert (len(paths) == 6), ("%s paths" % len(paths))  # 160 / 32

        os.mkdir(datadir)
        data = np.random.randn(161, 2)
        labels = np.random.randint(0, 2, (161, ))

        preprocessing.numpy_data_to_numpy_sets(data,
                                               labels,
                                               datadir,
                                               batch_size=32,
                                               shuffle=True,
                                               data_basename='ex',
                                               oversample_remainder=False)
        os.remove(os.path.join(datadir, "labels.h5"))

        paths = [str(x) for x in Path(datadir).iterdir() if x.suffix == '.npy']
        assert (len(paths) == 5), ("%s paths" % len(paths))  # 160 / 32

        return paths
Ejemplo n.º 22
0
def test_traingen_logger():
    C = deepcopy(CONFIGS)
    with tempdir(C['traingen']['logs_dir']), \
        tempdir(C['traingen']['best_models_dir']), \
            tempdir(logger_savedir):
        batch_shape = (batch_size, width, height, channels)

        n_classes = C['model']['num_classes']
        callbacks = [_make_logger_cb(
            get_data_fn=lambda: np.random.randn(*batch_shape),
            get_labels_fn=lambda: np.random.randint(0, 2,
                                                    (batch_size, n_classes)),
            gather_fns={'weights': get_weights},
            )]
        C['traingen']['callbacks'] = callbacks
        tg = init_session(C)
        tg.train()
Ejemplo n.º 23
0
def test_logging():
    def _testget_unique_model_name():
        tg = TraingenDummy()
        tg.model_name_configs = {'datagen.shuffle': ''}
        os.mkdir(os.path.join(tg.logs_dir, 'M0'))
        logging.get_unique_model_name(tg)

    def _test_log_init_state():
        class SUV():
            pass

        tg = TraingenDummy()
        tg.SUV = SUV()
        logging._log_init_state(tg, source_lognames=['swordfish', 'SUV'])
        logging._log_init_state(tg, source_lognames='*', verbose=1)
        logging._log_init_state(tg, source_lognames=None)

    def _test_generate_report():
        tg = TraingenDummy()
        tg.report_configs = {'model': dict(stuff='staff'), 'saitama': None}
        pass_on_error(logging.generate_report, tg)

        tg.report_configs = {'model': {'genos': [1]}}
        pass_on_error(logging.generate_report, tg)

        tg.report_configs = {'model': {'include': [], 'exclude': []}}
        pass_on_error(logging.generate_report, tg)

        tg.report_configs = {'model': {'exclude_types': ['etc']}}
        tg.model_configs = {'a': 1}
        pass_on_error(logging.generate_report, tg)

    def _test_save_report():
        tg = TraingenDummy()
        tg.report_configs = {'model': {'include': []}}
        tg.logdir = ''
        pass_on_error(logging.save_report, tg, '')

    logs_dir = os.path.join(BASEDIR, 'tests', '_outputs', '_logs')
    best_models_dir = os.path.join(BASEDIR, 'tests', '_outputs', '_models')
    with tempdir(logs_dir), tempdir(best_models_dir):
        _testget_unique_model_name()

    _test_log_init_state()
    _test_generate_report()
    _test_save_report()
Ejemplo n.º 24
0
def test_checkpoint():
    def _get_nfiles(logdir):
        # omit dir & hidden
        return len([
            f for f in Path(logdir).iterdir()
            if f.is_file() and not f.name[0] == '.'
        ])

    C = deepcopy(CONFIGS)
    with tempdir(C['traingen']['logs_dir']), \
        tempdir(C['traingen']['best_models_dir']):
        C['traingen']['max_checkpoints'] = 2
        tg = init_session(C, model=classifier)
        tg.train()

        nfiles_1 = _get_nfiles(tg.logdir)
        tg.checkpoint(forced=True, overwrite=True)
        nfiles_2 = _get_nfiles(tg.logdir)
        assert (nfiles_2 == nfiles_1), (
            "Number of files in `logdir` changed with `overwrite`==True on "
            "second checkpoint w/ `max_checkpoints`==2 ({} -> {})".format(
                nfiles_1, nfiles_2))

        tg.checkpoint(forced=True, overwrite=False)
        nfiles_3 = _get_nfiles(tg.logdir)
        assert (nfiles_3 == 2 * nfiles_2), (
            "Number of files didn't double in `logdir` after checkpointing "
            "below `max_checkpoints` checkpoints ({} -> {})".format(
                nfiles_2, nfiles_3))

        tg.checkpoint(forced=True, overwrite=False)
        nfiles_4 = _get_nfiles(tg.logdir)
        assert (nfiles_3 == nfiles_4), (
            "Number of files changed in `logdir` after checkpointing at "
            "`max_checkpoints` checkpoints ({} -> {})".format(
                nfiles_3, nfiles_4))

        tg.max_checkpoints = 0
        tg.checkpoint(forced=True, overwrite=False)
        nfiles_5 = _get_nfiles(tg.logdir)
        assert (nfiles_5 == nfiles_1), (
            "`max_checkpoints`==0 should behave like ==1, but number of "
            "files in `logdir` differs from that in first checkpoint "
            "({} -> {})".format(nfiles_1, nfiles_5))
Ejemplo n.º 25
0
def _test_weights_gradients(model):
    x, y, _ = make_data(K.int_shape(model.input), model.layers[2].units)
    name = model.layers[1].name

    with tempdir() as dirpath:
        kws = dict(input_data=x, labels=y, mode='grads')
        if hasattr(model.layers[1], 'backward_layer'):
            kws['savepath'] = dirpath

        rnn_histogram(model, name, bins=100, **kws)
        rnn_heatmap(model, name, **kws)
Ejemplo n.º 26
0
def test_datagen():
    t0 = time()
    C = deepcopy(CONFIGS)
    with tempdir(C['traingen']['logs_dir']), \
        tempdir(C['traingen']['best_models_dir']):
        tg = init_session(C, model=autoencoder)
        tg.train()

        dg = tg.datagen
        dg.advance_batch()
        dg.batch = dg.batch[:1]
        dg.batch_loaded = False
        pass_on_error(dg.advance_batch)
        dg.batch_loaded = True
        dg.advance_batch(forced=False)

        dg.shuffle = True
        dg.all_data_exhausted = True
        dg._validate_batch()

        dg.batch = []
        dg.batch_exhausted = True
        dg._validate_batch()

        dg.set_nums_to_process = dg.set_nums_original.copy()
        pass_on_error(dg._set_set_nums, ['99', '100'], ['100', '101'])
        pass_on_error(dg._set_set_nums, ['1', '2'], ['100', '101'])
        dg.superbatch_path = None
        pass_on_error(dg._set_set_nums, ['1', '2'], ['1', '2'])

        dg._set_preprocessor(None, {})
        pass_on_error(dg._set_preprocessor, "x", {})

        C['datagen']['invalid_kwarg'] = 5
        pass_on_error(DataGenerator, **C['datagen'])

    print("\nTime elapsed: {:.3f}".format(time() - t0))
Ejemplo n.º 27
0
    def _test_misc():
        C = deepcopy(DATAGEN_CFG)
        dg = DataGenerator(**C)
        dg.superbatch = {'1': 1, '2': 2}
        dg.superbatch_set_nums = ['3']
        pass_on_error(dg._get_next_batch, set_num='3', warn=True)

        dg.all_labels = {}
        pass_on_error(dg._get_next_labels, set_num='3')

        pass_on_error(setattr, dg, 'load_data', 1)
        pass_on_error(setattr, dg, 'load_labels', 1)

        with tempdir() as dirpath:
            path = os.path.join(dirpath, "arr.npy")
            np.save(path, np.array([1]))
            C = deepcopy(DATAGEN_CFG)
            C['labels_path'] = None
            C['data_path'] = path
            pass_on_error(DataGenerator, **C)
Ejemplo n.º 28
0
    def _test_infer_and_set_info():
        C = deepcopy(DATAGEN_CFG)
        with tempdir() as dirpath:
            path = os.path.join(dirpath, "arr.npy")
            np.save(path, np.array([1]))
            C['labels_path'] = None
            C['data_loader'] = DataLoader(path, loader='numpy')
            DataGenerator(**C)

            C['labels_loader'] = DataLoader(path, loader='numpy')
            DataGenerator(**C)

        C['data_loader'] = DataGenerator
        pass_on_error(DataGenerator, **C)

        C['labels_loader'] = None
        C['data_loader'] = DataLoader
        DataGenerator(**C)

        C['labels_loader'] = DataGenerator
        pass_on_error(DataGenerator, **C)
Ejemplo n.º 29
0
def test_data_to_hdf5(monkeypatch):  # [deeptrain.preprocessing]
    """Dedicated test since it uses monkeypatch"""
    C = deepcopy(CONFIGS)
    # set preemptively in case data.h5 somehow found in dir
    monkeypatch.setattr('builtins.input', lambda x: 'y')

    with tempdir(C['traingen']['logs_dir']) as loaddir:
        with open(os.path.join(loaddir, "data.txt"), 'w') as txt:
            txt.write("etc")
        savepath = os.path.join(loaddir, "data.h5")
        pass_on_error(preprocessing.data_to_hdf5,
                      savepath.replace('.h5', ''),
                      batch_size=32,
                      loaddir=loaddir)

        data = np.random.randn(1, 32, 100)
        np.save(os.path.join(loaddir, "data.npy"), data)
        pass_on_error(preprocessing.data_to_hdf5,
                      savepath=savepath,
                      batch_size=32,
                      loaddir=loaddir)

        kw = dict(savepath=savepath, data=data, batch_size=32)
        pass_on_error(preprocessing.data_to_hdf5, **kw)

        os.remove(os.path.join(loaddir, "data.txt"))
        preprocessing.data_to_hdf5(**kw)

        monkeypatch.setattr('builtins.input', lambda x: 'y')
        preprocessing.data_to_hdf5(**kw)

        monkeypatch.setattr('builtins.input', lambda x: 'n')
        pass_on_error(preprocessing.data_to_hdf5, **kw)

        preprocessing.data_to_hdf5(overwrite=True, **kw)

        pass_on_error(preprocessing.data_to_hdf5, overwrite=False, **kw)

        pass_on_error(preprocessing.data_to_hdf5,
                      kw['savepath'],
                      kw['batch_size'],
                      loaddir=None,
                      data=None)

        pass_on_error(preprocessing.data_to_hdf5,
                      kw['savepath'],
                      kw['batch_size'],
                      loaddir=loaddir,
                      data=data)

        _data = [data[0], data[0, :31]]
        pass_on_error(preprocessing.data_to_hdf5,
                      kw['savepath'],
                      kw['batch_size'],
                      data=_data,
                      overwrite=True)

        _data = [np.vstack([data[0], data[0]])]
        pass_on_error(preprocessing.data_to_hdf5,
                      kw['savepath'],
                      kw['batch_size'],
                      data=_data,
                      overwrite=True)
Ejemplo n.º 30
0
def test_util():
    t0 = time()

    def _util_make_autoencoder(C, new_model=False):
        C['model'] = AE_CFG
        C['traingen']['model_configs'] = AE_CFG
        C['traingen']['input_as_labels'] = True
        if new_model:
            return init_session(C, model_fn=make_autoencoder)
        else:
            autoencoder.loss = 'mse'  # reset changed configs
            return init_session(C, model=autoencoder)

    def _util_make_classifier(C, new_model=False):
        C['model'] = CL_CFG
        C['traingen']['model_configs'] = CL_CFG
        C['traingen']['input_as_labels'] = False
        if new_model:
            return init_session(C, model_fn=make_timeseries_classifier)
        else:
            classifier.loss = 'binary_crossentropy'  # reset changed configs
            return init_session(C, model=classifier)

    def _save_best_model(C):  # [util.saving]
        tg = _util_make_autoencoder(C)
        tg.train()
        with mock.patch('os.remove') as mock_remove:
            mock_remove.side_effect = OSError('Permission Denied')
            tg.key_metric_history.append(-.5)  # ensure is new best
            tg._save_best_model(del_previous_best=True)
        with mock.patch('deeptrain.train_generator.TrainGenerator.save_report'
                        ) as mock_report:
            mock_report.side_effect = Exception()
            tg.key_metric_history.append(-1)  # ensure is new best
            tg._save_best_model()

    def checkpoint(C):  # [util.saving]
        tg = _util_make_autoencoder(C)
        tg.train()
        tg.max_checkpoints = -1
        with mock.patch('os.remove') as mock_remove:
            mock_remove.side_effect = OSError('Permission Denied')
            tg.checkpoint(forced=True, overwrite=False)

        tg.logdir = None
        pass_on_error(tg.checkpoint)

    def save(C):  # [util.saving]
        tg = _util_make_autoencoder(C)
        tg.checkpoint()
        tg.model.loss = 'mean_squared_error'
        tg.train()
        tg.final_fig_dir = tg.logdir

        pass_on_error(tg.load)
        pass_on_error(tg.checkpoint, overwrite="underwrite")
        tg.datagen.set_nums_to_process = [9001]
        tg.save()
        tg._save_history_fig()
        tg._save_history_fig()
        tg.optimizer_load_configs = {'exclude': ['weights']}
        tg.loadskip_list = ['optimizer_load_configs']
        tg.datagen.loadskip_list = ['stuff']
        tg.load()

        tg._history_fig = 1
        tg._save_history_fig()

        tg.loadskip_list = 'auto'
        tg.load()
        tg.loadskip_list = 'none'
        tg.load()

        tg.optimizer_save_configs = {'include': []}
        tg.save()

        with mock.patch('backend.K.get_value') as mock_get_value:
            mock_get_value.side_effect = Exception()
            tg.save()

        tg.optimizer_save_configs = {'include': ['leaking_rate']}
        tg.datagen.group_batch = []
        with mock.patch('pickle.dump') as mock_dump:
            mock_dump.side_effect = Exception()
            tg.save()

        tg.logdir = 'abc'
        pass_on_error(tg.load)
        tg.logdir = None
        pass_on_error(tg.load)

    def get_sample_weight(C):  # [util.training]
        tg = _util_make_autoencoder(C)
        labels = np.random.randint(0, 2, (32, 3))
        tg.class_weights = {0: 1, 1: 2, 2: 3}
        tg.get_sample_weight(labels)

    def _get_api_metric_name(C):  # [util.training]
        util.training._get_api_metric_name('accuracy',
                                           'categorical_crossentropy')
        util.training._get_api_metric_name('acc',
                                           'sparse_categorical_crossentropy')
        util.training._get_api_metric_name('acc', 'binary_crossentropy')

    def _get_best_subset_val_history(C):  # [util.training]
        C['traingen']['best_subset_size'] = 2
        tg = _util_make_classifier(C)

        tg.val_datagen.slices_per_batch = 4
        tg._labels_cache = np.random.randint(0, 2, (3, 4, batch_size, 1))
        tg._preds_cache = np.random.uniform(0, 1, (3, 4, batch_size, 1))
        tg._sw_cache = np.random.randint(0, 2, (3, 4, batch_size, 1))
        tg._class_labels_cache = tg._labels_cache.copy()
        tg._val_set_name_cache = ['1', '2', '3']
        tg.key_metric = 'f1_score'
        tg.val_temp_history = {'f1_score': []}
        tg.key_metric_fn = metrics.f1_score
        tg._eval_fn_name = 'predict'
        tg.dynamic_predict_threshold_min_max = None

        tg._get_best_subset_val_history()

        tg._eval_fn_name = 'superfit'
        pass_on_error(tg._get_best_subset_val_history)

    def _update_temp_history(C):  # [util.training]
        tg = _util_make_classifier(C)

        tg.val_temp_history['loss'] = (1, 2, 3)
        tg._update_temp_history(metrics=(4, ), val=True)
        tg.val_temp_history['loss'] = []
        tg._update_temp_history(metrics=(4, ), val=True)

        tg.datagen.slice_idx = 1
        tg.datagen.slices_per_batch = 2
        tg.temp_history = {'binary_accuracy': []}
        tg.train_metrics = ['binary_accuracy']
        pass_on_error(tg._update_temp_history, metrics=[1], val=False)

        pass_on_error(tg._update_temp_history,
                      metrics=[dict(a=1, b=2)],
                      val=False)

        # tg._update_temp_history([[1]], val=False)  # tests `_handle_non_scalar`

        tg.temp_history = {'f1_score': []}
        tg.train_metrics = ['f1_score']
        pass_on_error(tg._update_temp_history, metrics=[[1, 2]], val=False)

    def compute_gradient_norm(C):  # [introspection]
        pass_on_error(introspection.compute_gradient_norm,
                      0,
                      0,
                      0,
                      mode="leftput")

    def _init_optimizer(C):  # [misc]
        tg = _util_make_classifier(C)
        tg.model.loss = 1
        pass_on_error(misc._init_optimizer, tg.model)

    def metrics_getattr(C):  # [TrainGenerator]
        def _raise(Except):
            raise Except()

        import_err = lambda: _raise(ImportError)

        with mock.patch('builtins.__import__', side_effect=import_err):
            pass_on_error(getattr, metrics, 'r2_score')

    def _validate_weighted_slices_range(C):  # [util.misc]
        C['traingen']['pred_weighted_slices_range'] = (.5, 1.5)
        C['traingen']['eval_fn'] = 'evaluate'
        pass_on_error(_util_make_autoencoder, C)

        C = deepcopy(CONFIGS)
        tg = _util_make_autoencoder(C)
        tg.pred_weighted_slices_range = (.5, 1.5)
        tg._eval_fn_name = 'predict'
        tg.datagen.slices_per_batch = None
        tg.val_datagen.slices_per_batch = None
        pass_on_error(tg._validate_traingen_configs)

        C['traingen']['max_is_best'] = True
        C['traingen']['eval_fn'] = 'evaluate'
        C['traingen']['pred_weighted_slices_range'] = (.1, 1.1)
        pass_on_error(_util_make_classifier, C)

        C['traingen']['eval_fn'] = 'predict'
        pass_on_error(_util_make_classifier, C)

        C = deepcopy(CONFIGS)
        C['datagen'].pop('slices_per_batch', None)
        pass_on_error(_util_make_classifier, C)

        C = deepcopy(CONFIGS)
        C['traingen']['eval_fn'] = 'predict'
        tg = _util_make_classifier(C)
        tg.pred_weighted_slices_range = (.1, 1.1)
        tg.datagen.slices_per_batch = 1
        tg.val_datagen.slices_per_batch = 1
        tg._validate_traingen_configs()

    def _validate_metrics(C):  # [util.misc]
        C['traingen']['eval_fn'] = 'evaluate'
        C['traingen']['key_metric'] = 'hinge'
        pass_on_error(_util_make_autoencoder, C)

        C['traingen']['val_metrics'] = 'goblin'
        pass_on_error(_util_make_autoencoder, C)

        C = deepcopy(CONFIGS)
        C['traingen']['key_metric'] = 'swordfish'
        C['traingen']['key_metric_fn'] = None
        C['traingen']['eval_fn'] = 'predict'
        pass_on_error(_util_make_autoencoder, C)

        C = deepcopy(CONFIGS)
        C['traingen']['val_metrics'] = None
        pass_on_error(_util_make_autoencoder, C)

        C['traingen']['key_metric'] = 'loss'
        C['traingen']['max_is_best'] = True
        _util_make_autoencoder(C)

        C = deepcopy(CONFIGS)
        C['traingen']['eval_fn'] = 'predict'
        C['traingen']['val_metrics'] = 'cosine_similarity'
        pass_on_error(_util_make_autoencoder, C)

        C = deepcopy(CONFIGS)
        C['traingen']['eval_fn'] = 'predict'
        tg = _util_make_autoencoder(C)
        tg.model.loss = 'hl2'
        pass_on_error(tg._validate_traingen_configs)

        tg.train_metrics = ['tnr', 'tpr']
        tg.val_metrics = ['tnr', 'tpr']
        tg.key_metric = 'tnr'
        pass_on_error(tg._validate_traingen_configs)

    def _validate_directories(C):  # [util.misc]
        C['traingen']['best_models_dir'] = None
        pass_on_error(_util_make_classifier, C)

        C = deepcopy(CONFIGS)
        C['traingen']['logs_dir'] = None
        pass_on_error(_util_make_classifier, C)

        C = deepcopy(CONFIGS)
        C['traingen']['best_models_dir'] = None
        C['traingen']['logs_dir'] = None
        pass_on_error(_util_make_classifier, C)

    def _validate_optimizer_save_configs(C):  # [util.misc]
        C['traingen']['optimizer_save_configs'] = {
            'include': 'weights',
            'exclude': 'updates'
        }
        pass_on_error(_util_make_classifier, C)

    def _validate_class_weights(C):  # [util.misc]
        C['traingen']['class_weights'] = {'0': 1, 1: 2}
        pass_on_error(_util_make_classifier, C)

        C['traingen']['class_weights'] = {0: 1}
        pass_on_error(_util_make_classifier, C)

        C = deepcopy(CONFIGS)
        tg = _util_make_classifier(C)
        tg.model.loss = 'categorical_crossentropy'
        tg.class_weights = {0: 1, 2: 5, 3: 6}
        tg._validate_traingen_configs()

    def _validate_best_subset_size(C):  # [util.misc]
        C['traingen']['best_subset_size'] = 5
        C['val_datagen']['shuffle_group_samples'] = True
        pass_on_error(_util_make_classifier, C)

    def _validate_metric_printskip_configs(C):  # [util.misc]
        C['traingen']['metric_printskip_configs'] = {'val': ('loss', )}
        _util_make_autoencoder(C)

    def _validate_savelist_and_metrics(C):  # [util.misc]
        C['traingen']['savelist'] = ['{labels}']
        C['traingen']['train_metrics'] = ('loss', )
        pass_on_error(_util_make_autoencoder, C)

    def _validate_loadskip_list(C):  # [util.misc]
        C['traingen']['loadskip_list'] = 'invalid'
        pass_on_error(_util_make_autoencoder, C)

    def _validate_callbacks(C):  # [util.misc]
        C['traingen']['callbacks'] = {'invalid_stage': 1}
        pass_on_error(_util_make_autoencoder, C)

        C['traingen']['callbacks'] = {'save': 1}
        pass_on_error(_util_make_autoencoder, C)

        C['traingen']['callbacks'] = 1
        pass_on_error(_util_make_autoencoder, C)

    def _validate_input_as_labels(C):  # [util.misc]
        C['traingen']['input_as_labels'] = False
        C['datagen']['labels_path'] = None
        C['val_datagen']['labels_path'] = None
        pass_on_error(_util_make_classifier, C)

    def _validate_model_save_kw(C):  # [util.misc]
        C['traingen']['model_save_kw'] = None
        C['traingen']['model_save_weights_kw'] = None
        _util_make_classifier(C)

    def _validate_freq_configs(C):  # [util.misc]
        C['traingen']['val_freq'] = 1
        pass_on_error(_util_make_classifier, C)

        C['traingen']['val_freq'] = {'epoch': 1, 'batch': 2}
        pass_on_error(_util_make_classifier, C)

    def _traingen_callbacks(C):  # [train_generator]
        tg = _util_make_autoencoder(C)
        tg.callbacks = [1]
        pass_on_error(tg._apply_callbacks)

        from deeptrain.callbacks import TraingenCallback
        tc = TraingenCallback()

        def raise_exception(self):
            raise NotImplementedError

        tc.init_with_traingen = raise_exception
        tg.callbacks = [tc]
        pass_on_error(tg._init_callbacks)

    def _on_val_end(C):  # [train_generator]
        tg = _util_make_autoencoder(C)
        tg.batch_size = 'x'
        pass_on_error(tg._on_val_end)

        tg.epochs += 1
        tg._train_loop_done = True

        def validate(self):
            self._train_loop_done = False

        tg.validate = validate.__get__(tg)
        pass_on_error(tg.train)

        tg._train_loop_done = False
        tg._fit_fn_name = 'x'
        tg.epochs += 1
        pass_on_error(tg.train)

        tg.batch_size = 'a'
        tg._eval_fn_name = 'predict'
        pass_on_error(tg._on_val_end, 0, 0, 0)

    def _train_postiter_processing(C):  # [train_generator]
        tg = _util_make_autoencoder(C)
        tg.datagen.batch_exhausted = True
        with mock.patch('deeptrain.train_generator.TrainGenerator.'
                        '_update_train_history') as mock_update:
            mock_update.side_effect = Exception
            tg._update_temp_history = lambda x: x
            pass_on_error(tg._train_postiter_processing, [])

    def _traingen_properties(C):  # [train_generator]
        tg = _util_make_autoencoder(C)
        pass_on_error(setattr, tg, 'eval_fn', 1)

        pass_on_error(setattr, tg, 'eval_fn', tg.model.summary)

    def append_examples_dir_to_sys_path(C):  # [util.misc]
        util.misc.append_examples_dir_to_sys_path()

        with mock.patch('pathlib.Path.is_dir') as mock_dir:
            mock_dir.side_effect = lambda: False
            pass_on_error(util.misc.append_examples_dir_to_sys_path)

    names, fns = zip(*locals().items())
    for name, fn in zip(names, fns):
        if hasattr(fn, '__code__') and misc.argspec(fn)[0] == 'C':
            with tempdir(CONFIGS['traingen']['logs_dir']), \
                tempdir(CONFIGS['traingen']['best_models_dir']):
                C = deepcopy(CONFIGS)  # reset dict
                fn(C)
                print("Passed", fn.__name__)

    print("\nTime elapsed: {:.3f}".format(time() - t0))