示例#1
0
"""External configuration .gin"""

from gin import config
import jax

config.external_configurable(jax.nn.initializers.zeros,
                             'jax.nn.initializers.zeros')
config.external_configurable(jax.nn.initializers.ones,
                             'jax.nn.initializers.ones')
config.external_configurable(jax.nn.initializers.variance_scaling,
                             'jax.nn.initializers.variance_scaling')
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from gin import config
from gin.tf import external_configurables  # pylint: disable=unused-import

import tensorflow as tf

# Necessary for AdagradaDAOptimizer test.
config.external_configurable(tf.compat.v1.train.get_global_step)


@config.configurable
def fake_train_model(learning_rate, optimizer):
    global_step = tf.compat.v1.train.get_or_create_global_step()
    lr = learning_rate(global_step=global_step)
    opt = optimizer(learning_rate=lr)
    return lr, opt


@config.configurable
def configurable(**kwargs):
    return kwargs

示例#3
0
"""Other gin configurables
"""
import os
import gin
import modisco
import modisco.tfmodisco_workflow.workflow
from gin import config
import keras

# keras.optimizers
config.external_configurable(keras.optimizers.Adam, module='keras.optimizers')
config.external_configurable(keras.optimizers.RMSprop, module='keras.optimizers')
config.external_configurable(keras.optimizers.Adagrad, module='keras.optimizers')
config.external_configurable(keras.optimizers.Adadelta, module='keras.optimizers')
config.external_configurable(keras.optimizers.Adamax, module='keras.optimizers')
config.external_configurable(keras.optimizers.Nadam, module='keras.optimizers')
config.external_configurable(keras.optimizers.SGD, module='keras.optimizers')


# modisco
config.external_configurable(modisco.tfmodisco_workflow.workflow.TfModiscoWorkflow)
config.external_configurable(modisco.tfmodisco_workflow.seqlets_to_patterns.TfModiscoSeqletsToPatternsFactory)


@gin.configurable
def report_template(name, raise_error=True):
    """Evaluation report template found in ../templates/
    """
    import inspect
    filename = inspect.getframeinfo(inspect.currentframe()).filename
    this_dir = os.path.dirname(os.path.abspath(filename))
from gin import config
import tensorflow as tf

config.external_configurable(tf.keras.layers.Input, name='Input')
config.external_configurable(tf.keras.layers.Conv2D, name='Conv2D')
config.external_configurable(tf.keras.layers.Dense, name='Dense')
config.external_configurable(tf.keras.layers.Flatten, name='Flatten')
config.external_configurable(tf.keras.layers.Dropout, name='Dropout')
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Supplies a default set of configurables from core TensorFlow."""

from gin import config

import torch

# Losses.

config.external_configurable(torch.nn.modules.loss.BCELoss,
                             module='torch.nn.modules.loss')
config.external_configurable(torch.nn.modules.loss.BCEWithLogitsLoss,
                             module='torch.nn.modules.loss')
config.external_configurable(torch.nn.modules.loss.CosineEmbeddingLoss,
                             module='torch.nn.modules.loss')
config.external_configurable(torch.nn.modules.loss.CrossEntropyLoss,
                             module='torch.nn.modules.loss')
config.external_configurable(torch.nn.modules.loss.CTCLoss,
                             module='torch.nn.modules.loss')
config.external_configurable(torch.nn.modules.loss.HingeEmbeddingLoss,
                             module='torch.nn.modules.loss')
config.external_configurable(torch.nn.modules.loss.KLDivLoss,
                             module='torch.nn.modules.loss')
config.external_configurable(torch.nn.modules.loss.L1Loss,
                             module='torch.nn.modules.loss')
config.external_configurable(torch.nn.modules.loss.MarginRankingLoss,
示例#6
0
"""Supplies a default set of configurables from core TensorFlow."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import functools

from gin import config

import numpy as np
import tensorflow as tf

# Learning rate decays.

config.external_configurable(tf.compat.v1.train.exponential_decay,
                             module='tf.train')
config.external_configurable(tf.compat.v1.train.inverse_time_decay,
                             module='tf.train')
config.external_configurable(tf.compat.v1.train.natural_exp_decay,
                             module='tf.train')
config.external_configurable(tf.compat.v1.train.polynomial_decay,
                             module='tf.train')


@config.configurable(module='tf.train')
@functools.wraps(tf.compat.v1.train.piecewise_constant)
def piecewise_constant(global_step, *args, **kwargs):
    if 'boundaries' in kwargs:
        kwargs['boundaries'] = list(np.int64(kwargs['boundaries']))
    return tf.compat.v1.train.piecewise_constant(global_step, *args, **kwargs)
"""Supplies a default set of configurables from core TensorFlow."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import functools

import numpy as np
import tensorflow as tf

from gin import config

# Learning rate decays.

config.external_configurable(tf.train.exponential_decay, module='tf.train')
config.external_configurable(tf.train.inverse_time_decay, module='tf.train')
config.external_configurable(tf.train.natural_exp_decay, module='tf.train')
config.external_configurable(tf.train.polynomial_decay, module='tf.train')


@config.configurable(module='tf.train')
@functools.wraps(tf.train.piecewise_constant)
def piecewise_constant(global_step, *args, **kwargs):
    if 'boundaries' in kwargs:
        kwargs['boundaries'] = list(np.int64(kwargs['boundaries']))
    return tf.train.piecewise_constant(global_step, *args, **kwargs)


# Losses.
示例#8
0
import torch as t
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim

from gin import config

# Prefix these with '@' in gin config files.

# optimizers
config.external_configurable(optim.SGD, 'sgd', module='t.optim')
config.external_configurable(optim.Adam, 'adam', module='t.optim')
config.external_configurable(optim.Adadelta, 'adadelta', module='t.optim')
config.external_configurable(optim.Adagrad, 'adagrad', module='t.optim')
config.external_configurable(optim.SparseAdam, 'sparse_adam', module='t.optim')
config.external_configurable(optim.Adamax, 'adamax', module='t.optim')
config.external_configurable(optim.AdamW, 'adamw', module='t.optim')
config.external_configurable(optim.ASGD, 'asgd', module='t.optim')
config.external_configurable(optim.LBFGS, 'lbfgs', module='t.optim')
config.external_configurable(optim.RMSprop, 'rmsprop', module='t.optim')
config.external_configurable(optim.Rprop, 'rprop', module='t.optim')

# lr scheduler
config.external_configurable(optim.lr_scheduler.LambdaLR,
                             'lambda_lr',
                             module='t.optim.lr_scheduler')
config.external_configurable(optim.lr_scheduler.StepLR,
                             'step_lr',
                             module='t.optim.lr_scheduler')
config.external_configurable(optim.lr_scheduler.MultiStepLR,
                             'multistep_lr',
示例#9
0
from gin import config
import keras

# keras.optimizers
config.external_configurable(keras.optimizers.Adam, module='keras.optimizers')
config.external_configurable(keras.optimizers.RMSprop,
                             module='keras.optimizers')
config.external_configurable(keras.optimizers.Adagrad,
                             module='keras.optimizers')
config.external_configurable(keras.optimizers.Adadelta,
                             module='keras.optimizers')
config.external_configurable(keras.optimizers.Adamax,
                             module='keras.optimizers')
config.external_configurable(keras.optimizers.Nadam, module='keras.optimizers')
config.external_configurable(keras.optimizers.SGD, module='keras.optimizers')
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Declaration of a few external GIN configurable functions."""

from gin import config

# pylint: disable=unused-import
from gin.tf import external_configurables
# pylint: enable=unused-import

import tensorflow as tf

# Activation function.
config.external_configurable(tf.keras.activations.relu,
                             'tf.keras.activations.relu')

# Layers
config.external_configurable(tf.keras.layers.Add, 'tf.keras.layers.Add')
config.external_configurable(tf.keras.layers.Concatenate,
                             'tf.keras.layers.Concatenate')

# Loss functions
config.external_configurable(tf.keras.losses.MeanSquaredError,
                             'tf.keras.losses.MeanSquaredError')

config.external_configurable(tf.keras.losses.CategoricalCrossentropy,
                             'tf.keras.losses.CategoricalCrossentropy')
示例#11
0
    def __call__(self, true_counts, logits):
        for i in range(self.n):
            loss = multinomial_nll(true_counts[..., i], logits[..., i])
            if i == 0:
                total = loss
            else:
                total += loss
        return total

    def get_config(self):
        return {"n": self.n}


mc_multinomial_nll_1 = MultichannelMultinomialNLL(1)
mc_multinomial_nll_1.__name__ = "mc_multinomial_nll_1"
config.external_configurable(mc_multinomial_nll_1)

mc_multinomial_nll_2 = MultichannelMultinomialNLL(2)
mc_multinomial_nll_2.__name__ = "mc_multinomial_nll_2"
config.external_configurable(mc_multinomial_nll_2)

twochannel_multinomial_nll = mc_multinomial_nll_2
twochannel_multinomial_nll.__name__ = "twochannel_multinomial_nll"

AVAILABLE = [
    "multinomial_nll", "twochannel_multinomial_nll", "mc_multinomial_nll_1",
    "mc_multinomial_nll_2", "MultichannelMultinomialNLL",
    "CountsMultinomialNLL", "PoissonMultinomialNLL"
]

import torch as T
import torch.optim as optim
import torch.nn as nn
from gin import config

from .. import optim as nnt_optim
from .. import zoo

# optimizers
config.external_configurable(optim.SGD, 'sgd', module='T.optim')
config.external_configurable(optim.Adam, 'adam', module='T.optim')
config.external_configurable(optim.Adadelta, 'adadelta', module='T.optim')
config.external_configurable(optim.Adagrad, 'adagrad', module='T.optim')
config.external_configurable(optim.SparseAdam, 'sparse_adam', module='T.optim')
config.external_configurable(optim.Adamax, 'adamax', module='T.optim')
config.external_configurable(optim.AdamW, 'adamw', module='T.optim')
config.external_configurable(optim.ASGD, 'asgd', module='T.optim')
config.external_configurable(optim.LBFGS, 'lbfgs', module='T.optim')
config.external_configurable(optim.RMSprop, 'rmsprop', module='T.optim')
config.external_configurable(optim.Rprop, 'rprop', module='T.optim')
config.external_configurable(nnt_optim.AdaBound, 'adabound', module='nnt')
config.external_configurable(nnt_optim.Lookahead, 'lookahead', module='nnt')
config.external_configurable(nnt_optim.NAdam, 'nadam', module='nnt')

try:
    import apex
    config.external_configurable(apex.optimizers.FusedAdam, 'fusedadam', module='apex.optimizers')
    config.external_configurable(apex.optimizers.FusedSGD, 'fusedsgd', module='apex.optimizers')
    config.external_configurable(apex.optimizers.FusedNovoGrad, 'fusednovograd', module='apex.optimizers')
    config.external_configurable(apex.optimizers.FusedLAMB, 'fusedlamb', module='apex.optimizers')
    print('Apex Fused Optimizers is availble for GIN')
示例#13
0
"""External configuration .gin"""

from gin import config
from flax import linen as nn

config.external_configurable(nn.initializers.zeros, 'nn.initializers.zeros')
config.external_configurable(nn.initializers.ones, 'nn.initializers.ones')
config.external_configurable(nn.initializers.orthogonal,
                             'nn.initializers.orthogonal')
config.external_configurable(nn.initializers.variance_scaling,
                             'nn.initializers.variance_scaling')
示例#14
0
                out[k] = mean(v)

        # flatten everything
        out = flatten(out, separator='/')
        return out

    def save(self, file_path):
        """Save model to a file
        """
        from bpnet.utils import write_pkl, SerializableLock
        # fix the serialization of _OPERATIVE_CONFIG_LOCK
        gin.config._OPERATIVE_CONFIG_LOCK = SerializableLock()
        write_pkl(self, file_path)

    @classmethod
    def load(cls, file_path):
        """Load model from a file
        """
        from bpnet.utils import read_pkl
        return read_pkl(file_path)

    @classmethod
    def from_mdir(cls, model_dir):
        """Load the model from pkl
        """
        return cls.load(os.path.join(model_dir, 'seq_model.pkl'))


# avoid the decorator so that we can pickle it
config.external_configurable(SeqModel)
示例#15
0
def _register_schedule(module):
  config.external_configurable(module, module='tf.keras.optimizers.schedules')
示例#16
0
def _register_callables(package, module, blacklist):
    for k in dir(package):
        if k not in blacklist:
            v = getattr(package, k)
            if callable(v):
                config.external_configurable(v, name=k, module=module)
示例#17
0
from torch import optim
from torch.optim import lr_scheduler
from torch import nn
from gin import config
import gin

# Optimizer
config.external_configurable(optim.Adam, "optim.Adam")
config.external_configurable(optim.Adadelta, "optim.Adadelta")
config.external_configurable(optim.Adagrad, "optim.Adagrad")
config.external_configurable(optim.Adamax, "optim.Adamax")
config.external_configurable(optim.RMSprop, "optim.RMSprop")
config.external_configurable(optim.SGD, "optim.SGD")

# Learning rate decay
config.external_configurable(lr_scheduler.StepLR, "lr_scheduler.StepLR")
config.external_configurable(lr_scheduler.MultiStepLR,
                             "lr_scheduler.MultiStepLR")
config.external_configurable(lr_scheduler.ExponentialLR,
                             "lr_scheduler.ExponentialLR")
config.external_configurable(lr_scheduler.CosineAnnealingLR,
                             "lr_scheduler.CosineAnnealingLR")
config.external_configurable(lr_scheduler.ReduceLROnPlateau,
                             "lr_scheduler.ReduceLROnPlateau")


@gin.configurable("lr_scheduler.LambdaLR", blacklist=["optimizer"])
def create_LambdaLR(optimizer, epoch_total, epoch_decay):
    def lambda_rule(epoch):
        epoch_stable = epoch_total - epoch_decay
        lr_l = 1.0 - max(0, epoch - epoch_stable) / float(epoch_decay + 1)