Пример #1
0
 def models(self, params: Params):
     super().models(params)
     if params.pretrained:
         print("=> using pre-trained model '{}'".format(params.arch))
         model = models.__dict__[params.arch](pretrained=True)
     else:
         print("=> creating model '{}'".format(params.arch))
         model = models.__dict__[params.arch]()
     if params.gpu is not None:
         torch.cuda.set_device(params.gpu)
         model.cuda(params.gpu)
         # When using a single GPU per process and per
         # DistributedDataParallel, we need to divide the batch size
         # ourselves based on the total number of GPUs we have
         params.batch_size = int(params.batch_size / params.ngpus_per_node)
         params.workers = int((params.workers + params.ngpus_per_node - 1) /
                              params.ngpus_per_node)
         model = torch.nn.parallel.DistributedDataParallel(
             model, device_ids=[params.gpu])
     else:
         model.cuda()
         # DistributedDataParallel will divide and allocate batch_size to all
         # available GPUs if device_ids are not set
         model = torch.nn.parallel.DistributedDataParallel(model)
Пример #2
0
    under the GNU General Public License as published by the Free 
    Software Foundation, either Version 3 of the License, or (at your option) 
    any later version, if this derivative work is distributed to a third party.

    The copyright for the program is owned by Shandong University. 
    For commercial projects that require the ability to distribute 
    the code of this program as part of a program that cannot be 
    distributed under the GNU General Public License, please contact 
            
            [email protected]
             
    to purchase a commercial license.
"""
from thexp import Params

params = Params()
params.device = 'cuda:1'
params.epoch = 5
params.batch_size = 128
params.topk = (1, 4)
params.from_args()
params.root = '/home/share/yanghaozhe/pytorchdataset'
params.dataloader = dict(shuffle=True, batch_size=32, drop_last=True)
params.optim = dict(lr=0.01, weight_decay=0.09, momentum=0.9)
params.choice('dataset', 'mnist', 'fmnist')
params.dataset = 'mnist'
params.bind('dataset', 'mnist', 'arch', 'simple')
params.bind('dataset', 'fmnist', 'arch', 'simple')
params.bind('dataset', 'cifar10', 'arch', 'cnn13')
params.ema = True
Пример #3
0
    any later version, if this derivative work is distributed to a third party.

    The copyright for the program is owned by Shandong University. 
    For commercial projects that require the ability to distribute 
    the code of this program as part of a program that cannot be 
    distributed under the GNU General Public License, please contact 
            
            [email protected]
             
    to purchase a commercial license.
"""

from thexp import Params
params = Params()
params.epoch = 400
params.batch_size = 25
print(params)

from thexp import Params


class BaseParams(Params):
    def __init__(self):
        super().__init__()
        self.batch_size = 50
        self.topk = (1, 2, 3, 4)
        self.optim_option(lr=0.009, moment=0.9)
        # 显示声明变量可以使用:
        # self.optim = self.optim_option(lr=0.009,moment=0.9)