コード例 #1
0
    def test_loading_parsed_model(self, _model_1, _config):

        model = _model_1
        path_wd = _config.get('paths', 'path_wd')
        model_name = _config.get('paths', 'filename_ann')
        keras.models.save_model(model, os.path.join(path_wd,
                                                    model_name + '.h5'))

        # Only perform parsing step.
        updates = {
            'tools': {
                'evaluate_ann': False,
                'normalize': False,
                'convert': False,
                'simulate': False
            }
        }

        _config.read_dict(updates)

        run_pipeline(_config)

        path_parsed = os.path.join(
            path_wd,
            _config.get('paths', 'filename_parsed_model') + '.h5')
        model_parsed = keras.models.load_model(path_parsed)

        x = np.random.random_sample((5, ) + model.input_shape[1:])
        y = model.predict(x)
        y_parsed = model_parsed.predict(x)

        assert np.allclose(y, y_parsed)
コード例 #2
0
    def test_maxpool_first(self, _model_maxpool2D_1_first, _config_first):
        """Test that maxpooling fails with data format channels_first."""
        config = _config_first
        path_wd = config.get('paths', 'path_wd')
        model_name = config.get('paths', 'filename_ann')
        models.save_model(_model_maxpool2D_1_first,
                          os.path.join(path_wd, model_name + '.h5'))

        updates = {
            'tools': {
                'evaluate_ann': False,
                'normalize': False
            },
            'simulation': {
                'duration': 100,
                'num_to_test': 100,
                'batch_size': 50
            },
            'output': {
                'log_vars': {'activations_n_b_l', 'spiketrains_n_b_l_t'}
            }
        }

        config.read_dict(updates)

        run_pipeline(config)
コード例 #3
0
    def test_inisim(self, _model_2, _config):

        path_wd = _config.get('paths', 'path_wd')
        model_name = _config.get('paths', 'filename_ann')
        keras.models.save_model(_model_2,
                                os.path.join(path_wd, model_name + '.h5'))

        updates = {
            'tools': {
                'evaluate_ann': False
            },
            'simulation': {
                'simulator': 'INI',
                'duration': 100,
                'num_to_test': 100,
                'batch_size': 50,
                'keras_backend': 'tensorflow'
            },
            'output': {
                'log_vars': {'activations_n_b_l', 'spiketrains_n_b_l_t'}
            }
        }

        _config.read_dict(updates)

        acc = run_pipeline(_config)

        assert acc[0] >= 0.95

        corr = get_correlations(_config)
        assert np.all(corr[:-1] > 0.99)
        assert corr[-1] > 0.90
コード例 #4
0
ファイル: test_models.py プロジェクト: davisden/snn_toolbox
    def test_spinnaker_sparse(self, _model_3, _config):

        path_wd = _config.get('paths', 'path_wd')
        model_name = _config.get('paths', 'filename_ann')
        models.save_model(_model_3, os.path.join(path_wd, model_name + '.h5'))

        updates = {
            'tools': {
                'evaluate_ann': False
            },
            'input': {
                'poisson_input': True
            },
            'simulation': {
                'simulator': 'spiNNaker',
                'duration': 100,
                'num_to_test': 1,  # smaller to make more feasible
                'batch_size': 1
            },
            'output': {
                'log_vars': {'activations_n_b_l', 'spiketrains_n_b_l_t'}
            }
        }

        _config.read_dict(updates)

        initialize_simulator(_config)

        acc = run_pipeline(_config)

        assert acc[0] >= 0.95

        corr = get_correlations(_config)
        assert np.all(corr[:-1] > 0.97)
        assert corr[-1] > 0.5
コード例 #5
0
    def test_maxpool_fallback(self, _model_maxpool2D_1, _config):
        """Test that maxpooling falls back on average pooling."""
        path_wd = _config.get('paths', 'path_wd')
        model_name = _config.get('paths', 'filename_ann')
        models.save_model(_model_maxpool2D_1,
                          os.path.join(path_wd, model_name + '.h5'))

        updates = {
            'tools': {
                'evaluate_ann': False,
                'normalize': False
            },
            'conversion': {
                'max2avg_pool': True
            },
            'simulation': {
                'duration': 100,
                'num_to_test': 100,
                'batch_size': 50
            },
            'output': {
                'log_vars': {'activations_n_b_l', 'spiketrains_n_b_l_t'}
            }
        }

        _config.read_dict(updates)

        acc = run_pipeline(_config)

        assert acc[0] >= 0.8

        corr = get_correlations(_config)
        assert np.all(corr[:-1] > 0.99)
        assert corr[-1] > 0.90
コード例 #6
0
    def test_maxpool(self, _model_maxpool2D_1_last, _config_last):
        """Test maxpooling."""
        config = _config_last
        path_wd = config.get('paths', 'path_wd')
        model_name = config.get('paths', 'filename_ann')
        models.save_model(_model_maxpool2D_1_last,
                          os.path.join(path_wd, model_name + '.h5'))

        updates = {
            'tools': {
                'evaluate_ann': False,
                'normalize': False
            },
            'simulation': {
                'duration': 100,
                'num_to_test': 100,
                'batch_size': 50
            },
            'output': {
                'log_vars': {'activations_n_b_l', 'spiketrains_n_b_l_t'}
            }
        }

        config.read_dict(updates)

        acc = run_pipeline(config)

        acc_ann = get_ann_acc(config)
        assert acc[0] >= 0.9 * acc_ann

        corr = get_correlations(config)
        assert np.all(corr[:-1] > 0.99)
        assert corr[-1] > 0.90
コード例 #7
0
    def test_nest(self, _model_1, _config):

        path_wd = _config.get('paths', 'path_wd')
        model_name = _config.get('paths', 'filename_ann')
        keras.models.save_model(_model_1,
                                os.path.join(path_wd, model_name + '.h5'))

        updates = {
            'tools': {'evaluate_ann': False},
            'simulation': {
                'simulator': 'nest',
                'duration': 50,
                'num_to_test': 10,
                'batch_size': 1,
                'dt': 0.1},
            'cell': {
                'tau_refrac': 0.1,
                'delay': 0.1,
                'v_thresh': 0.01},
            'output': {
                'log_vars': {'activations_n_b_l', 'spiketrains_n_b_l_t'}}}

        _config.read_dict(updates)

        initialize_simulator(_config)

        acc = run_pipeline(_config)

        assert acc[0] >= 0.95

        corr = get_correlations(_config)
        assert np.all(corr[:-1] > 0.97)
        assert corr[-1] > 0.5
コード例 #8
0
    def test_parsing(self, _model_4_first):

        self.prepare_model(_model_4_first)

        updates = {
            'tools': {
                'evaluate_ann': True,
                'parse': True,
                'normalize': False,
                'convert': False,
                'simulate': False
            },
            'input': {
                'model_lib': 'pytorch'
            },
            'simulation': {
                'num_to_test': 100,
                'batch_size': 50
            }
        }

        self.config.read_dict(updates)

        initialize_simulator(self.config)

        acc = run_pipeline(self.config)

        assert acc[0] >= 0.8
コード例 #9
0
    def test_pipeline(self, _model_4_first):

        self.prepare_model(_model_4_first)

        updates = {
            'tools': {
                'evaluate_ann': False
            },
            'input': {
                'model_lib': 'pytorch'
            },
            'simulation': {
                'duration': 100,
                'num_to_test': 100,
                'batch_size': 50
            },
            'output': {
                'log_vars': {'activations_n_b_l', 'spiketrains_n_b_l_t'}
            }
        }

        self.config.read_dict(updates)

        initialize_simulator(self.config)

        acc = run_pipeline(self.config)

        assert acc[0] >= 0.8

        corr = get_correlations(self.config)
        assert np.all(corr[:-1] > 0.97)
        assert corr[-1] > 0.5
コード例 #10
0
ファイル: run.py プロジェクト: yult0821/snn_toolbox
def main(filepath=None):
    """Entry point for running the toolbox.

    Note
    ----

    There is no need to call this function directly, because python sets up an
    executable during :ref:`installation` that can be called from terminal.

    """
    from snntoolbox.bin.utils import update_setup, run_pipeline

    if filepath is not None:
        config = update_setup(filepath)
        run_pipeline(config)
        return

    parser = argparse.ArgumentParser(
        description='Run SNN toolbox to convert an analog neural network into '
        'a spiking neural network, and optionally simulate it.')
    parser.add_argument('config_filepath',
                        nargs='?',
                        help='Path to configuration file.')
    parser.add_argument('-t',
                        '--terminal',
                        action='store_true',
                        help='Set this flag to run the toolbox from terminal. '
                        'Omit this flag to open GUI.')
    args = parser.parse_args()

    _filepath = os.path.abspath(args.config_filepath)
    if _filepath is not None:
        config = update_setup(_filepath)

        if args.terminal:
            run_pipeline(config)
        else:
            from snntoolbox.bin.gui import gui
            gui.main()
    else:
        if args.terminal:
            parser.error("When using the SNN toolbox from terminal, a "
                         "config_filepath argument must be provided.")
            return
        else:
            from snntoolbox.bin.gui import gui
            gui.main()
コード例 #11
0
ファイル: test_models.py プロジェクト: RKCZ/snn_toolbox
    def test_loihi(self, _model_1, _config):

        path_wd = _config.get('paths', 'path_wd')
        model_name = _config.get('paths', 'filename_ann')
        keras.models.save_model(_model_1,
                                os.path.join(path_wd, model_name + '.h5'))

        updates = {
            'tools': {
                'evaluate_ann': True,
                'normalize': False
            },
            'loihi': {
                'reset_mode': 'soft',
                'desired_threshold_to_input_ratio': 1,
                'compartment_kwargs': {
                    'biasExp': 6,
                    'vThMant': 512
                },
                'connection_kwargs': {
                    'numWeightBits': 8,
                    'weightExponent': 0,
                    'numBiasBits': 12
                },
                'validate_partitions': False,
                'save_output': False,
                'do_overflow_estimate': False,
                'normalize_thresholds': True
            },
            'simulation': {
                'simulator': 'loihi',
                'duration': 512,
                'num_to_test': 100,
                'batch_size': 1
            },
            'output': {
                'log_vars': {'activations_n_b_l', 'spiketrains_n_b_l_t'}
            }
        }

        _config.read_dict(updates)

        initialize_simulator(_config)

        acc = run_pipeline(_config)

        assert acc[0] >= 0.95

        corr = get_correlations(_config)
        assert np.all(corr[:-1] > 0.97)
        assert corr[-1] > 0.5
コード例 #12
0
    def test_nest(self, _model_1, _config):

        path_wd = _config.get('paths', 'path_wd')
        model_name = _config.get('paths', 'filename_ann')
        keras.models.save_model(_model_1,
                                os.path.join(path_wd, model_name + '.h5'))

        updates = {
            'tools': {
                'evaluate_ann': False
            },
            'input': {
                'poisson_input': True
            },
            'simulation': {
                'simulator': 'nest',
                'duration': 100,
                'num_to_test': 100,
                'batch_size': 1
            },
            'output': {
                'log_vars': {'activations_n_b_l', 'spiketrains_n_b_l_t'}
            }
        }

        _config.read_dict(updates)

        try:
            initialize_simulator(_config)
        except (ImportError, KeyError, ValueError):
            return

        acc = run_pipeline(_config)

        assert acc[0] >= 0.95

        corr = get_correlations(_config)
        assert np.all(corr[:-1] > 0.97)
        assert corr[-1] > 0.5