예제 #1
0
    def test_nest(self, _model_1, _config):

        path_wd = _config.get('paths', 'path_wd')
        model_name = _config.get('paths', 'filename_ann')
        keras.models.save_model(_model_1,
                                os.path.join(path_wd, model_name + '.h5'))

        updates = {
            'tools': {'evaluate_ann': False},
            'simulation': {
                'simulator': 'nest',
                'duration': 50,
                'num_to_test': 10,
                'batch_size': 1,
                'dt': 0.1},
            'cell': {
                'tau_refrac': 0.1,
                'delay': 0.1,
                'v_thresh': 0.01},
            'output': {
                'log_vars': {'activations_n_b_l', 'spiketrains_n_b_l_t'}}}

        _config.read_dict(updates)

        initialize_simulator(_config)

        acc = run_pipeline(_config)

        assert acc[0] >= 0.95

        corr = get_correlations(_config)
        assert np.all(corr[:-1] > 0.97)
        assert corr[-1] > 0.5
예제 #2
0
    def test_brian2(self, _parsed_model, _testset, _config):
        """Needs to be tested separately because no saving function implemented.
        """

        from importlib import import_module
        from snntoolbox.bin.utils import initialize_simulator

        _config.read_dict({
            'simulation': {
                'simulator': 'brian2',
                'num_to_test': 2
            },
            'input': {
                'poisson_input': True
            }
        })
        try:
            initialize_simulator(_config)
        except ImportError:
            return

        target_sim = import_module(
            'snntoolbox.simulation.target_simulators.brian2_target_sim')
        spiking_model = target_sim.SNN(_config)
        spiking_model.build(_parsed_model)
        score = spiking_model.run(**_testset)
        assert round(100 * score, 2) >= 99.00
예제 #3
0
    def test_spinnaker_sparse(self, _model_3, _config):

        path_wd = _config.get('paths', 'path_wd')
        model_name = _config.get('paths', 'filename_ann')
        models.save_model(_model_3, os.path.join(path_wd, model_name + '.h5'))

        updates = {
            'tools': {
                'evaluate_ann': False
            },
            'input': {
                'poisson_input': True
            },
            'simulation': {
                'simulator': 'spiNNaker',
                'duration': 100,
                'num_to_test': 1,  # smaller to make more feasible
                'batch_size': 1
            },
            'output': {
                'log_vars': {'activations_n_b_l', 'spiketrains_n_b_l_t'}
            }
        }

        _config.read_dict(updates)

        initialize_simulator(_config)

        acc = run_pipeline(_config)

        assert acc[0] >= 0.95

        corr = get_correlations(_config)
        assert np.all(corr[:-1] > 0.97)
        assert corr[-1] > 0.5
예제 #4
0
    def test_parsing(self, _model_4_first):

        self.prepare_model(_model_4_first)

        updates = {
            'tools': {
                'evaluate_ann': True,
                'parse': True,
                'normalize': False,
                'convert': False,
                'simulate': False
            },
            'input': {
                'model_lib': 'pytorch'
            },
            'simulation': {
                'num_to_test': 100,
                'batch_size': 50
            }
        }

        self.config.read_dict(updates)

        initialize_simulator(self.config)

        acc = run_pipeline(self.config)

        assert acc[0] >= 0.8
예제 #5
0
    def test_pipeline(self, _model_4_first):

        self.prepare_model(_model_4_first)

        updates = {
            'tools': {
                'evaluate_ann': False
            },
            'input': {
                'model_lib': 'pytorch'
            },
            'simulation': {
                'duration': 100,
                'num_to_test': 100,
                'batch_size': 50
            },
            'output': {
                'log_vars': {'activations_n_b_l', 'spiketrains_n_b_l_t'}
            }
        }

        self.config.read_dict(updates)

        initialize_simulator(self.config)

        acc = run_pipeline(self.config)

        assert acc[0] >= 0.8

        corr = get_correlations(self.config)
        assert np.all(corr[:-1] > 0.97)
        assert corr[-1] > 0.5
예제 #6
0
    def test_loihi(self, _model_1, _config):

        path_wd = _config.get('paths', 'path_wd')
        model_name = _config.get('paths', 'filename_ann')
        keras.models.save_model(_model_1,
                                os.path.join(path_wd, model_name + '.h5'))

        updates = {
            'tools': {
                'evaluate_ann': True,
                'normalize': False
            },
            'loihi': {
                'reset_mode': 'soft',
                'desired_threshold_to_input_ratio': 1,
                'compartment_kwargs': {
                    'biasExp': 6,
                    'vThMant': 512
                },
                'connection_kwargs': {
                    'numWeightBits': 8,
                    'weightExponent': 0,
                    'numBiasBits': 12
                },
                'validate_partitions': False,
                'save_output': False,
                'do_overflow_estimate': False,
                'normalize_thresholds': True
            },
            'simulation': {
                'simulator': 'loihi',
                'duration': 512,
                'num_to_test': 100,
                'batch_size': 1
            },
            'output': {
                'log_vars': {'activations_n_b_l', 'spiketrains_n_b_l_t'}
            }
        }

        _config.read_dict(updates)

        initialize_simulator(_config)

        acc = run_pipeline(_config)

        assert acc[0] >= 0.95

        corr = get_correlations(_config)
        assert np.all(corr[:-1] > 0.97)
        assert corr[-1] > 0.5
예제 #7
0
    def test_loading(self, _model_4, _config):

        import keras
        assert keras.backend.image_data_format() == 'channels_first', \
            "Pytorch to Keras parser needs image_data_format == channel_first."

        self.prepare_model(_model_4, _config)

        updates = {
            'tools': {
                'evaluate_ann': True,
                'parse': False,
                'normalize': False,
                'convert': False,
                'simulate': False
            },
            'input': {
                'model_lib': 'pytorch'
            },
            'simulation': {
                'num_to_test': 100,
                'batch_size': 50
            }
        }

        _config.read_dict(updates)

        initialize_simulator(_config)

        normset, testset = get_dataset(_config)

        model_lib = import_module('snntoolbox.parsing.model_libs.' +
                                  _config.get('input', 'model_lib') +
                                  '_input_lib')
        input_model = model_lib.load(_config.get('paths', 'path_wd'),
                                     _config.get('paths', 'filename_ann'))

        # Evaluate input model.
        acc = model_lib.evaluate(input_model['val_fn'],
                                 _config.getint('simulation', 'batch_size'),
                                 _config.getint('simulation', 'num_to_test'),
                                 **testset)

        assert acc >= 0.8
예제 #8
0
    def test_nest(self, _model_1, _config):

        path_wd = _config.get('paths', 'path_wd')
        model_name = _config.get('paths', 'filename_ann')
        keras.models.save_model(_model_1,
                                os.path.join(path_wd, model_name + '.h5'))

        updates = {
            'tools': {
                'evaluate_ann': False
            },
            'input': {
                'poisson_input': True
            },
            'simulation': {
                'simulator': 'nest',
                'duration': 100,
                'num_to_test': 100,
                'batch_size': 1
            },
            'output': {
                'log_vars': {'activations_n_b_l', 'spiketrains_n_b_l_t'}
            }
        }

        _config.read_dict(updates)

        try:
            initialize_simulator(_config)
        except (ImportError, KeyError, ValueError):
            return

        acc = run_pipeline(_config)

        assert acc[0] >= 0.95

        corr = get_correlations(_config)
        assert np.all(corr[:-1] > 0.97)
        assert corr[-1] > 0.5
예제 #9
0
def get_parameters_for_simtests():
    from snntoolbox.bin.utils import load_config, initialize_simulator

    config_defaults = load_config(
        os.path.abspath(
            os.path.join(os.path.dirname(__file__), '..', 'snntoolbox',
                         'config_defaults')))

    config_ini = {
        'simulation': {
            'simulator': 'INI',
            'target_acc': 99.00,
            'num_to_test': 200
        }
    }
    config_nest = {
        'simulation': {
            'simulator': 'nest',
            'target_acc': 99.00,
            'num_to_test': 2
        },
        'input': {
            'poisson_input': True
        }
    }
    config_brian = {
        'simulation': {
            'simulator': 'brian',
            'target_acc': 99.00,
            'num_to_test': 2
        },
        'input': {
            'poisson_input': True
        }
    }
    config_neuron = {
        'simulation': {
            'simulator': 'Neuron',
            'target_acc': 99.00,
            'num_to_test': 2
        },
        'input': {
            'poisson_input': True
        }
    }
    config_megasim = {
        'simulation': {
            'simulator': 'MegaSim',
            'batch_size': 1,
            'target_acc': 99.00,
            'num_to_test': 2
        },
        'input': {
            'poisson_input': True
        }
    }
    configs_to_test = [
        config_ini, config_nest, config_brian, config_neuron, config_megasim
    ]
    _sm = []
    for config_to_test in configs_to_test:
        config_defaults.read_dict(config_to_test)
        try:
            initialize_simulator(config_defaults)
        except (ImportError, KeyError, ValueError):
            continue
        config_defaults['restrictions']['is_installed'] = 'True'
        _sm.append(config_defaults)
    return _sm
예제 #10
0
def test_initialize_simulator(config):
    from snntoolbox.bin.utils import initialize_simulator
    if config.getboolean('restrictions', 'is_installed'):
        assert initialize_simulator(config)
    else:
        pytest.raises(ImportError, initialize_simulator, config=config)