Exemple #1
0
 def test_eval_logic(self, dataloader, param: Params):
     from thexp.calculate import accuracy as acc
     param.topk = param.default([1, 5])
     with torch.no_grad():
         count_dict = Meter()
         for xs, labels in dataloader:
             preds = self.predict(xs)
             total, topk_res = acc.classify(preds, labels, topk=param.topk)
             count_dict["total"] += total
             for i, topi_res in zip(param.topk, topk_res):
                 count_dict["top{}".format(i)] += topi_res
     return count_dict
Exemple #2
0
def test_trainer():
    trainer = MyTrainer(Params())

    trainer.params.eidx = 3
    fn = trainer.save_keypoint()
    trainer.train()
    assert trainer.params.eidx == trainer.params.epoch
    trainer.load_checkpoint(fn)
    assert trainer.params.eidx == 3
Exemple #3
0
 def models(self, params: Params):
     super().models(params)
     if params.pretrained:
         print("=> using pre-trained model '{}'".format(params.arch))
         model = models.__dict__[params.arch](pretrained=True)
     else:
         print("=> creating model '{}'".format(params.arch))
         model = models.__dict__[params.arch]()
     if params.gpu is not None:
         torch.cuda.set_device(params.gpu)
         model.cuda(params.gpu)
         # When using a single GPU per process and per
         # DistributedDataParallel, we need to divide the batch size
         # ourselves based on the total number of GPUs we have
         params.batch_size = int(params.batch_size / params.ngpus_per_node)
         params.workers = int((params.workers + params.ngpus_per_node - 1) /
                              params.ngpus_per_node)
         model = torch.nn.parallel.DistributedDataParallel(
             model, device_ids=[params.gpu])
     else:
         model.cuda()
         # DistributedDataParallel will divide and allocate batch_size to all
         # available GPUs if device_ids are not set
         model = torch.nn.parallel.DistributedDataParallel(model)
Exemple #4
0
        from torch.optim import SGD
        self.model = MyModel()
        self.optim = SGD(self.model.parameters(), lr=params.lr)
        self.cross = nn.CrossEntropyLoss()

    def train_batch(self, eidx, idx, global_step, batch_data, params, device):
        optim, cross = self.optim, self.cross
        meter = Meter()
        xs, ys = batch_data

        # 训练逻辑
        logits = self.model(xs)
        meter.loss = cross(logits, ys)

        # 反向传播
        meter.loss.backward()
        optim.step()
        optim.zero_grad()

        return meter


params = Params()
params.epoch = 5
params.lr = 0.1

params.dataset = 'svhn'

trainer = MyTrainer(params)
trainer.train()
Exemple #5
0
"""
import sys
sys.path.insert(0, "../")
from thexp import __VERSION__
print(__VERSION__)
import time

from thexp import Experiment

exp = Experiment("expname")
print(exp.make_exp_dir("explevel"))
print(exp.makedir("testlevel"))

from thexp import Params

params = Params()
exp.add_plugin(
    "params", dict(_param_hash=params.hash(),
                   data=params.inner_dict.jsonify()))

from thexp import Logger

logger = Logger()
fn = logger.add_log_dir(exp.makedir('logger'))
exp.add_plugin('logger', dict(fn=fn, ))

time.sleep(1)
try:
    raise Exception("dddd")
except:
    pass
Exemple #6
0
    under the GNU General Public License as published by the Free 
    Software Foundation, either Version 3 of the License, or (at your option) 
    any later version, if this derivative work is distributed to a third party.

    The copyright for the program is owned by Shandong University. 
    For commercial projects that require the ability to distribute 
    the code of this program as part of a program that cannot be 
    distributed under the GNU General Public License, please contact 
            
            [email protected]
             
    to purchase a commercial license.
"""
from thexp import Params

params = Params()
params.device = 'cuda:1'
params.epoch = 5
params.batch_size = 128
params.topk = (1, 4)
params.from_args()
params.root = '/home/share/yanghaozhe/pytorchdataset'
params.dataloader = dict(shuffle=True, batch_size=32, drop_last=True)
params.optim = dict(lr=0.01, weight_decay=0.09, momentum=0.9)
params.choice('dataset', 'mnist', 'fmnist')
params.dataset = 'mnist'
params.bind('dataset', 'mnist', 'arch', 'simple')
params.bind('dataset', 'fmnist', 'arch', 'simple')
params.bind('dataset', 'cifar10', 'arch', 'cnn13')
params.ema = True
Exemple #7
0
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
from thexp import Params, Trainer, callbacks

model_names = sorted(name for name in models.__dict__
                     if name.islower() and not name.startswith("__")
                     and callable(models.__dict__[name]))

parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
params = Params()

('model architecture: ' + ' | '.join(model_names) + ' (default: resnet18)')
params.arch = 'resnet18'
params.workers = 4  # number of data loading workers (default: 4)
params.epochs = 90  # number of total epochs to run
params.start_epoch = 0  # manual epoch number (useful on restarts)

'mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel'
params.batch_size = 12
params.learning_rate = 0.1
params.lr = params.learning_rate  # initial learning rate
params.momentum = 0.9  # momentum
params.weight_decay = 1e-4  # weight decay (default: 1e-4)
Exemple #8
0
    under the GNU General Public License as published by the Free 
    Software Foundation, either Version 3 of the License, or (at your option) 
    any later version, if this derivative work is distributed to a third party.

    The copyright for the program is owned by Shandong University. 
    For commercial projects that require the ability to distribute 
    the code of this program as part of a program that cannot be 
    distributed under the GNU General Public License, please contact 
            
            [email protected]
             
    to purchase a commercial license.
"""

from thexp import Params
params = Params()
params.epoch = 400
params.batch_size = 25
print(params)

from thexp import Params


class BaseParams(Params):
    def __init__(self):
        super().__init__()
        self.batch_size = 50
        self.topk = (1, 2, 3, 4)
        self.optim_option(lr=0.009, moment=0.9)
        # 显示声明变量可以使用:
        # self.optim = self.optim_option(lr=0.009,moment=0.9)
Exemple #9
0
"""

"""

from thexp import Params
params = Params()
print(params)

params.epoch = 400
params.batch_size = 25
print(params)

from thexp import Params


class MyParams(Params):
    def __init__(self):
        super().__init__()
        self.batch_size = 50
        self.topk = (1, 2, 3, 4)
        self.optim = dict(lr=0.009, moment=0.9)


params = MyParams()
print(params)

from thexp import Params
params = Params()
params.choice("dataset", "mnist", "cifar10", "cifar100", "svhn")
params.arange("thresh", 5, 0, 20)
print(params)
Exemple #10
0
def get_params():
    p = Params()
    p.git_commit = False
    return p
Exemple #11
0
    For commercial projects that require the ability to distribute 
    the code of this program as part of a program that cannot be 
    distributed under the GNU General Public License, please contact 
            
            [email protected]
             
    to purchase a commercial license.
"""

from thexp import Trainer,Params
import random

class myTrainer(Trainer):
    pass

trainer = myTrainer(Params())

for i in range(50):
    trainer.logger.info(i)

for i in range(20):
    trainer.writter.add_scalar("test",random.random(),i)

# trainer.saver
# trainer.rnd


# ======================================

import torch.nn as nn
class MyModel(nn.Module):
Exemple #12
0
    under the GNU General Public License as published by the Free 
    Software Foundation, either Version 3 of the License, or (at your option) 
    any later version, if this derivative work is distributed to a third party.

    The copyright for the program is owned by Shandong University. 
    For commercial projects that require the ability to distribute 
    the code of this program as part of a program that cannot be 
    distributed under the GNU General Public License, please contact 
            
            [email protected]
             
    to purchase a commercial license.
"""

from thexp import Params
params = Params()
print(params)

params.epoch = 400
params.batch_size = 25
print(params)

from thexp import Params


class MyParams(Params):
    def __init__(self):
        super().__init__()
        self.batch_size = 50
        self.topk = (1, 2, 3, 4)
        self.optim = dict(lr=0.009, moment=0.9)