_PERFORMANCE_EVAL_HANDLE =  hpsplit._performance_criteria

# A key that must appear in the `_SUMMARY_KEYWORDS` list. If `None`, the first
# entry in this list will be selected.
# The CSV file will be sorted based on this keyword. See also attribute
# `_PERFORMANCE_SORT_ASC`.
_PERFORMANCE_KEY = 'acc_avg_final'
assert(_PERFORMANCE_KEY is None or _PERFORMANCE_KEY in _SUMMARY_KEYWORDS)
# Whether the CSV should be sorted ascending or descending based on the
# `_PERFORMANCE_KEY`.
_PERFORMANCE_SORT_ASC = False

# FIXME: This attribute will vanish in future releases.
# This attribute is only required by the `hpsearch_postprocessing` script.
# A function handle to the argument parser function used by the simulation
# script. The function handle should expect the list of command line options
# as only parameter.
# Example:
# >>> from probabilistic.prob_mnist import train_args as targs
# >>> f = lambda argv : targs.parse_cmd_arguments(mode='split_mnist_bbb',
# ...                                             argv=argv)
# >>> _ARGPARSE_HANDLE = f
import probabilistic.prob_mnist.train_args as targs
_ARGPARSE_HANDLE = lambda argv : targs.parse_cmd_arguments( \
    mode='split_mnist_avb', argv=argv)

if __name__ == '__main__':
    pass


# limitations under the License.
#
# @title          :probabilistic/prob_mnist/train_split_avb_pf.py
# @author         :ch
# @contact        :[email protected]
# @created        :01/30/2020
# @version        :1.0
# @python_version :3.6.9
"""
Train implicit posterior via AVB for prior-focused SplitMNIST
----------------------------------------------------------------

The script :mod:`probabilistic.prob_mnist.train_split_avb_pf` is used to run
experiments on SplitMNIST. At the moment, it simply takes care of providing the
correct command-line arguments and default values to the end user while simply
calling: :mod:`probabilistic.prob_cifar.train_avb`, which will train a single
posterior for all tasks sequentially using the prior-focused CL approach
(i.e., the posterior of the previous task becomes the prior of the current
task).
"""
# Do not delete the following import for all executable scripts!
import __init__  # pylint: disable=unused-import

from probabilistic.prob_mnist import train_args
from probabilistic.prob_cifar import train_avb

if __name__ == '__main__':
    config = train_args.parse_cmd_arguments(mode='split_mnist_avb_pf')

    train_avb.run(config, experiment='split_mnist_avb_pf')
# A function handle, that is used to evaluate the performance of a run.
_PERFORMANCE_EVAL_HANDLE = hpsplit._performance_criteria

# A key that must appear in the `_SUMMARY_KEYWORDS` list. If `None`, the first
# entry in this list will be selected.
# The CSV file will be sorted based on this keyword. See also attribute
# `_PERFORMANCE_SORT_ASC`.
_PERFORMANCE_KEY = 'acc_avg_final'
assert (_PERFORMANCE_KEY is None or _PERFORMANCE_KEY in _SUMMARY_KEYWORDS)
# Whether the CSV should be sorted ascending or descending based on the
# `_PERFORMANCE_KEY`.
_PERFORMANCE_SORT_ASC = False

# FIXME: This attribute will vanish in future releases.
# This attribute is only required by the `hpsearch_postprocessing` script.
# A function handle to the argument parser function used by the simulation
# script. The function handle should expect the list of command line options
# as only parameter.
# Example:
# >>> from probabilistic.prob_mnist import train_args as targs
# >>> f = lambda argv : targs.parse_cmd_arguments(mode='split_mnist_bbb',
# ...                                             argv=argv)
# >>> _ARGPARSE_HANDLE = f
import probabilistic.prob_mnist.train_args as targs
_ARGPARSE_HANDLE = lambda argv : targs.parse_cmd_arguments( \
    mode='cifar_resnet_bbb', argv=argv)

if __name__ == '__main__':
    pass
Esempio n. 4
0
#
# @title          :probabilistic/prob_cifar/train_resnet_avb_pf.py
# @author         :ch
# @contact        :[email protected]
# @created        :01/30/2020
# @version        :1.0
# @python_version :3.6.9
"""
Train implicit posterior via AVB for prior-focused CIFAR-10/100 with Resnet-32
------------------------------------------------------------------------------

The script  :mod:`probabilistic.prob_cifar.train_resnet_avb_pf` is used to run a
probabilistic CL experiment on CIFAR using a Resnet-32
(:class:`mnets.resnet.ResNet`) and Adversarial-Variational-Bayes (AVB) as method
to learn a single posterior for all tasks sequentially. At the moment, it simply
takes care of providing the correct command-line arguments and default values to
the end user. Afterwards, it will simply call:
:mod:`probabilistic.prob_cifar.train_avb`.
"""
# Do not delete the following import for all executable scripts!
import __init__ # pylint: disable=unused-import

from probabilistic.prob_mnist import train_args
from probabilistic.prob_cifar import train_avb

if __name__ == '__main__':
    config = train_args.parse_cmd_arguments(mode='cifar_resnet_avb_pf')

    train_avb.run(config, experiment='cifar_resnet_avb_pf')

Esempio n. 5
0
# A function handle, that is used to evaluate the performance of a run.
_PERFORMANCE_EVAL_HANDLE = hpsplitbbb._performance_criteria

# A key that must appear in the `_SUMMARY_KEYWORDS` list. If `None`, the first
# entry in this list will be selected.
# The CSV file will be sorted based on this keyword. See also attribute
# `_PERFORMANCE_SORT_ASC`.
_PERFORMANCE_KEY = 'acc_avg_final'
assert (_PERFORMANCE_KEY is None or _PERFORMANCE_KEY in _SUMMARY_KEYWORDS)
# Whether the CSV should be sorted ascending or descending based on the
# `_PERFORMANCE_KEY`.
_PERFORMANCE_SORT_ASC = False

# FIXME: This attribute will vanish in future releases.
# This attribute is only required by the `hpsearch_postprocessing` script.
# A function handle to the argument parser function used by the simulation
# script. The function handle should expect the list of command line options
# as only parameter.
# Example:
# >>> from probabilistic.prob_mnist import train_args as targs
# >>> f = lambda argv : targs.parse_cmd_arguments(mode='split_mnist_bbb',
# ...                                             argv=argv)
# >>> _ARGPARSE_HANDLE = f
import probabilistic.prob_mnist.train_args as targs
_ARGPARSE_HANDLE = lambda argv : targs.parse_cmd_arguments( \
    mode='gmm_ssge', argv=argv)

if __name__ == '__main__':
    pass
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @title          :probabilistic/prob_cifar/train_zenke_bbb.py
# @author         :ch
# @contact        :[email protected]
# @created        :01/21/2020
# @version        :1.0
# @python_version :3.6.9
"""
CIFAR-10/100 ZenkeNet CL Experiment with BbB
--------------------------------------------

The script  :mod:`probabilistic.prob_cifar.train_zenke_bbb` is used to run a
probabilistic CL experiment on CIFAR using a ZenkeNet
(:class:`mnets.zenkenet.ZenkeNet`) and Bayes-by-Backprop as method to learn
task-specific weight posteriors. At the moment, it simply takes care of
providing the correct command-line arguments and default values to the end user.
Afterwards, it will simply call: :mod:`probabilistic.prob_mnist.train_bbb`.
"""
# Do not delete the following import for all executable scripts!
import __init__  # pylint: disable=unused-import

from probabilistic.prob_mnist import train_args
from probabilistic.prob_mnist import train_bbb

if __name__ == '__main__':
    config = train_args.parse_cmd_arguments(mode='cifar_zenke_bbb')

    train_bbb.run(config, experiment='cifar_zenke_bbb')
# A function handle, that is used to evaluate the performance of a run.
_PERFORMANCE_EVAL_HANDLE = hpsplit._performance_criteria

# A key that must appear in the `_SUMMARY_KEYWORDS` list. If `None`, the first
# entry in this list will be selected.
# The CSV file will be sorted based on this keyword. See also attribute
# `_PERFORMANCE_SORT_ASC`.
_PERFORMANCE_KEY = 'acc_avg_final'
assert (_PERFORMANCE_KEY is None or _PERFORMANCE_KEY in _SUMMARY_KEYWORDS)
# Whether the CSV should be sorted ascending or descending based on the
# `_PERFORMANCE_KEY`.
_PERFORMANCE_SORT_ASC = False

# FIXME: This attribute will vanish in future releases.
# This attribute is only required by the `hpsearch_postprocessing` script.
# A function handle to the argument parser function used by the simulation
# script. The function handle should expect the list of command line options
# as only parameter.
# Example:
# >>> from probabilistic.prob_mnist import train_args as targs
# >>> f = lambda argv : targs.parse_cmd_arguments(mode='split_mnist_bbb',
# ...                                             argv=argv)
# >>> _ARGPARSE_HANDLE = f
import probabilistic.prob_mnist.train_args as targs
_ARGPARSE_HANDLE = lambda argv : targs.parse_cmd_arguments( \
    mode='perm_mnist_ssge_pf', argv=argv)

if __name__ == '__main__':
    pass
---------------------------------------------------------------------------

In this script, we train a target network via variational inference, where the
variational family is NOT restricted to a set of Gaussian distributions with
diagonal covariance matrix (as in
:mod:`probabilistic.prob_mnist.train_bbb`).
For the training we use an implicit method, the training method for this case
is described in

    Shi, Jiaxin, Shengyang Sun, and Jun Zhu. "A spectral approach to gradient 
    estimation for implicit distributions." ICML, 2018.
    https://arxiv.org/abs/1806.02925

Specifically, we use a hypernetwork to output the weights for the target
network of each task in a continual learning setup, where tasks are presented
sequentially and forgetting of previous tasks is prevented by the
regularizer proposed in

    https://arxiv.org/abs/1906.00695
"""
# Do not delete the following import for all executable scripts!
import __init__  # pylint: disable=unused-import

from probabilistic.prob_mnist import train_args
from probabilistic.prob_cifar import train_avb

if __name__ == '__main__':
    config = train_args.parse_cmd_arguments(mode='cifar_resnet_ssge')

    train_avb.run(config, experiment='cifar_resnet_ssge')
Esempio n. 9
0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# title          :probabilistic/prob_mnist/train_perm_bbb.py
# author         :ch
# contact        :[email protected]
# created        :09/06/2019
# version        :1.0
# python_version :3.6.8
"""
Train Gaussian per-task posteriors for PermutedMNIST
----------------------------------------------------

This script is used to run experiments on PermutedMNIST. It's role is
analogous to the one of the script
:mod:`probabilistic.prob_mnist.train_split_bbb`.
"""
# Do not delete the following import for all executable scripts!
import __init__  # pylint: disable=unused-import

from probabilistic.prob_mnist import train_args
from probabilistic.prob_mnist import train_bbb

if __name__ == '__main__':
    config = train_args.parse_cmd_arguments(mode='perm_mnist_bbb')

    train_bbb.run(config, experiment='perm_bbb')
-----------------------------------------------------------------

In this script, we train a target network via variational inference, where the
variational family is NOT restricted to a set of Gaussian distributions with
diagonal covariance matrix (as in
:mod:`probabilistic.prob_mnist.train_bbb`).
For the training we use an implicit method, the training method for this case
is described in

    Shi, Jiaxin, Shengyang Sun, and Jun Zhu. "A spectral approach to gradient 
    estimation for implicit distributions." ICML, 2018.
    https://arxiv.org/abs/1806.02925

Specifically, we use a hypernetwork to output the weights for the target
network of each task in a continual learning setup, where tasks are presented
sequentially and forgetting of previous tasks is prevented by the
regularizer proposed in

    https://arxiv.org/abs/1906.00695
"""
# Do not delete the following import for all executable scripts!
import __init__  # pylint: disable=unused-import

from probabilistic.prob_mnist import train_args
from probabilistic.prob_cifar import train_avb

if __name__ == '__main__':
    config = train_args.parse_cmd_arguments(mode='split_mnist_ssge')

    train_avb.run(config, experiment='split_mnist_ssge')
_PERFORMANCE_EVAL_HANDLE =  hpsplitbbb._PERFORMANCE_EVAL_HANDLE

# A key that must appear in the `_SUMMARY_KEYWORDS` list. If `None`, the first
# entry in this list will be selected.
# The CSV file will be sorted based on this keyword. See also attribute
# `_PERFORMANCE_SORT_ASC`.
_PERFORMANCE_KEY = 'acc_avg_final'
assert(_PERFORMANCE_KEY is None or _PERFORMANCE_KEY in _SUMMARY_KEYWORDS)
# Whether the CSV should be sorted ascending or descending based on the
# `_PERFORMANCE_KEY`.
_PERFORMANCE_SORT_ASC = False

# FIXME: This attribute will vanish in future releases.
# This attribute is only required by the `hpsearch_postprocessing` script.
# A function handle to the argument parser function used by the simulation
# script. The function handle should expect the list of command line options
# as only parameter.
# Example:
# >>> from probabilistic.prob_mnist import train_args as targs
# >>> f = lambda argv : targs.parse_cmd_arguments(mode='split_mnist_bbb',
# ...                                             argv=argv)
# >>> _ARGPARSE_HANDLE = f
import probabilistic.prob_mnist.train_args as targs
_ARGPARSE_HANDLE = lambda argv : targs.parse_cmd_arguments( \
    mode='cifar_zenke_avb', argv=argv)

if __name__ == '__main__':
    pass


# limitations under the License.
#
# @title          :probabilistic/prob_gmm/train_gmm_bbb.py
# @author         :ch
# @contact        :[email protected]
# @created        :03/10/2020
# @version        :1.0
# @python_version :3.6.10
"""
GMM CL Classification Experiment with BbB
-----------------------------------------

The script  :mod:`probabilistic.prob_gmm.train_gmm_bbb` is used to run a
probabilistic CL experiment on a toy classification problem using synthetic
data (:class:`data.special.GMMData`). Bayes-by-Backprop is used to learn task-
specific weight posteriors. At the moment, the script simply takes care of
providing the correct command-line arguments and default values to the end user.
Afterwards, it will simply call: :mod:`probabilistic.prob_mnist.train_bbb`.

See :ref:`prob-gmm-bbb-readme-reference-label` for usage instructions.
"""
# Do not delete the following import for all executable scripts!
import __init__  # pylint: disable=unused-import

from probabilistic.prob_mnist import train_args
from probabilistic.prob_mnist import train_bbb

if __name__ == '__main__':
    config = train_args.parse_cmd_arguments(mode='gmm_bbb')

    train_bbb.run(config, experiment='gmm_bbb')
Esempio n. 13
0
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @title          :probabilistic/prob_gmm/train_gmm_ssge_pf.py
# @author         :ch
# @contact        :[email protected]
# @created        :03/10/2020
# @version        :1.0
# @python_version :3.6.10
"""
Train implicit posterior via SSGE for prior-focused CL with GMM tasks
---------------------------------------------------------------------

The script  :mod:`probabilistic.prob_gmm.train_gmm_ssge_pf` is used to run a
probabilistic CL experiment on a toy classification problem using synthetic
data (:class:`data.special.GMMData`). Spectral Stein Gradient Estimator (SSGE)
is used to learn a single posterior for all tasks sequentially.
"""
# Do not delete the following import for all executable scripts!
import __init__ # pylint: disable=unused-import

from probabilistic.prob_cifar import train_avb
from probabilistic.prob_mnist import train_args

if __name__ == '__main__':
    config = train_args.parse_cmd_arguments(mode='gmm_ssge_pf')

    train_avb.run(config, experiment='gmm_ssge_pf')

----------------------------------------------------------------------

In this script, we train a target network via variational inference, where the
variational family is NOT restricted to a set of Gaussian distributions with
diagonal covariance matrix (as in
:mod:`probabilistic.prob_mnist.train_bbb`).
For the training we use an implicit method, the training method for this case
is described in

    Shi, Jiaxin, Shengyang Sun, and Jun Zhu. "A spectral approach to gradient 
    estimation for implicit distributions." ICML, 2018.
    https://arxiv.org/abs/1806.02925

Specifically, we use a hypernetwork to output the weights for the target
network of each task in a continual learning setup, where tasks are presented
sequentially and forgetting of previous tasks is prevented by the
regularizer proposed in

    https://arxiv.org/abs/1906.00695
"""
# Do not delete the following import for all executable scripts!
import __init__  # pylint: disable=unused-import

from probabilistic.prob_mnist import train_args
from probabilistic.prob_cifar import train_avb

if __name__ == '__main__':
    config = train_args.parse_cmd_arguments(mode='perm_mnist_ssge_pf')

    train_avb.run(config, experiment='perm_mnist_ssge_pf')