コード例 #1
0
def _register_classes(module, superclass, prefix=None, sep='.'):
    for name in dir(module):
        attr = getattr(module, name)

        if isinstance(attr, type) and issubclass(attr, superclass):
            if attr is superclass:
                continue

            if prefix is not None:
                name = prefix + sep + name

            mlconfig.register(attr, name=name)
コード例 #2
0
import mlconfig
from torch import optim

from .rmsprop import TFRMSprop

mlconfig.register(optim.SGD)
mlconfig.register(optim.Adam)

mlconfig.register(optim.lr_scheduler.MultiStepLR)
mlconfig.register(optim.lr_scheduler.StepLR)
mlconfig.register(optim.lr_scheduler.ExponentialLR)
コード例 #3
0
ファイル: loss.py プロジェクト: pizard/Active-Passive-Losses
import torch
import torch.nn.functional as F
import numpy as np
import mlconfig
mlconfig.register(torch.nn.CrossEntropyLoss)

if torch.cuda.is_available():
    torch.backends.cudnn.benchmark = True
    if torch.cuda.device_count() > 1:
        device = torch.device('cuda:0')
    else:
        device = torch.device('cuda')
else:
    device = torch.device('cpu')


@mlconfig.register
class SCELoss(torch.nn.Module):
    def __init__(self, alpha, beta, num_classes=10):
        super(SCELoss, self).__init__()
        self.device = device
        self.alpha = alpha
        self.beta = beta
        self.num_classes = num_classes
        self.cross_entropy = torch.nn.CrossEntropyLoss()

    def forward(self, pred, labels):
        # CCE
        ce = self.cross_entropy(pred, labels)

        # RCE
コード例 #4
0
def registerClasses():
    global registered
    if not registered:
        mlconfig.register(ResCNN)
        mlconfig.register(CachedNonLinearSelfPlay)
        mlconfig.register(CachedLinearSelfPlay)
        mlconfig.register(RemoteEvaluationAccess)
        mlconfig.register(EvaluationWorker)
        mlconfig.register(LocalEvaluationAccess)
        mlconfig.register(FakeEvaluationAccess)
        mlconfig.register(TreeSelfPlayWorker)
        mlconfig.register(LearntThinkDecider)
        mlconfig.register(PointsGaussServerLeague)
        mlconfig.register(EloGaussServerLeague)
        mlconfig.register(LeaguePlayerAccess)
        mlconfig.register(FixedPlayerAccess)
        mlconfig.register(FixedThinkDecider)
        mlconfig.register(LeagueSelfPlayerWorker)
        mlconfig.register(NoopPolicyUpdater)
        mlconfig.register(NoopGameReporter)
        mlconfig.register(DatasetPolicyTester)
        mlconfig.register(ShuffleBatchedPolicyPlayer)
        mlconfig.register(SolverBatchedPolicyPlayer)
        mlconfig.register(PolicyPlayer)
        mlconfig.register(PolicyIteratorPlayer)
        mlconfig.register(LinearSelfPlayWorker)
        mlconfig.register(MctsPolicyIterator)
        mlconfig.register(TemperatureMoveDecider)
        mlconfig.register(MNKGameState)
        mlconfig.register(SingleProcessReporter)
        mlconfig.register(SingleProcessUpdater)
        mlconfig.register(PytorchPolicy)
        mlconfig.register(dict)
        mlconfig.register(PlayVs)
        mlconfig.register(HumanMNKInterface)
        mlconfig.register(Connect4GameState)
        mlconfig.register(HumanConnect4Interface)
        mlconfig.register(RandomPlayPolicy)
        mlconfig.register(PonsSolver)
        mlconfig.register(TestDatabaseGenerator)
        mlconfig.register(BestPlayPolicy)
        mlconfig.register(DistributedNetworkUpdater)
        mlconfig.register(DistributedReporter)
        mlconfig.register(TrainingWorker)
        mlconfig.register(ConstantTrainingWindowManager)
        mlconfig.register(StreamTrainingWorker)
        mlconfig.register(ConstantWindowSizeManager)
        mlconfig.register(LrStepSchedule)
        mlconfig.register(SemiPerfectPolicy)
        mlconfig.register(TestDatabaseGenerator2)
        mlconfig.register(DatasetPolicyTester2)
        mlconfig.register(FilePolicyUpdater)
        mlconfig.register(SupervisedNetworkTrainer)
        mlconfig.register(DistributedNetworkUpdater2)
        mlconfig.register(StreamTrainingWorker2)
        mlconfig.register(OneCycleSchedule)
        mlconfig.register(SlowWindowSizeManager)
        registered = True
コード例 #5
0
ファイル: main.py プロジェクト: HanxunH/Unlearnable-Examples
import argparse
import datetime
import os
import shutil
import time
import numpy as np
import dataset
import mlconfig
import torch
import util
import madrys
import models
from evaluator import Evaluator
from trainer import Trainer
mlconfig.register(madrys.MadrysLoss)

# General Options
parser = argparse.ArgumentParser(description='ClasswiseNoise')
parser.add_argument('--seed', type=int, default=0, help='seed')
parser.add_argument('--version', type=str, default="resnet18")
parser.add_argument('--exp_name', type=str, default="test_exp")
parser.add_argument('--config_path', type=str, default='configs/cifar10')
parser.add_argument('--load_model', action='store_true', default=False)
parser.add_argument('--data_parallel', action='store_true', default=False)
parser.add_argument('--train', action='store_true', default=False)
parser.add_argument('--save_frequency', default=-1, type=int)
# Datasets Options
parser.add_argument('--train_face', action='store_true', default=False)
parser.add_argument('--train_portion', default=1.0, type=float)
parser.add_argument('--train_batch_size',
                    default=128,
コード例 #6
0
import mlconfig
from torch import optim

mlconfig.register(optim.Adam)

mlconfig.register(optim.lr_scheduler.StepLR)
コード例 #7
0
import torch
import torch.nn as nn
import torch.nn.functional as F
import mlconfig
import torchvision
mlconfig.register(torchvision.models.resnet50)
mlconfig.register(torch.optim.SGD)
mlconfig.register(torch.optim.Adam)
mlconfig.register(torch.optim.lr_scheduler.MultiStepLR)
mlconfig.register(torch.optim.lr_scheduler.CosineAnnealingLR)
mlconfig.register(torch.optim.lr_scheduler.StepLR)
mlconfig.register(torch.optim.lr_scheduler.ExponentialLR)


class ConvBrunch(nn.Module):
    def __init__(self, in_planes, out_planes, kernel_size=3):
        super(ConvBrunch, self).__init__()
        padding = (kernel_size - 1) // 2
        self.out_conv = nn.Sequential(
            nn.Conv2d(in_planes,
                      out_planes,
                      kernel_size=kernel_size,
                      padding=padding), nn.BatchNorm2d(out_planes), nn.ReLU())

    def forward(self, x):
        return self.out_conv(x)


@mlconfig.register
class ToyModel(nn.Module):
    def __init__(self, type='CIFAR10'):
コード例 #8
0
import mlconfig
import torch
import torch.nn as nn
import torchvision

from . import DenseNet, ResNet, ToyModel, inception_resnet_v1

mlconfig.register(torch.optim.SGD)
mlconfig.register(torch.optim.Adam)
mlconfig.register(torch.optim.lr_scheduler.MultiStepLR)
mlconfig.register(torch.optim.lr_scheduler.CosineAnnealingLR)
mlconfig.register(torch.optim.lr_scheduler.StepLR)
mlconfig.register(torch.optim.lr_scheduler.ExponentialLR)
mlconfig.register(torch.nn.CrossEntropyLoss)

# Models
mlconfig.register(ResNet.ResNet)
mlconfig.register(ResNet.ResNet18)
mlconfig.register(ResNet.ResNet34)
mlconfig.register(ResNet.ResNet50)
mlconfig.register(ResNet.ResNet101)
mlconfig.register(ResNet.ResNet152)
mlconfig.register(ToyModel.ToyModel)
mlconfig.register(DenseNet.DenseNet121)
mlconfig.register(inception_resnet_v1.InceptionResnetV1)
# torchvision models
mlconfig.register(torchvision.models.resnet18)
mlconfig.register(torchvision.models.resnet50)
mlconfig.register(torchvision.models.densenet121)

# CUDA Options