コード例 #1
0
ファイル: test_options.py プロジェクト: jleesdev/meshdnns
 def initialize(self):
     BaseOptions.initialize(self)
     self.parser.add_argument('--results_dir',
                              type=str,
                              default='./results/',
                              help='saves results here.')
     self.parser.add_argument('--phase',
                              type=str,
                              default='test',
                              help='train, val, test, etc')  #todo delete.
     self.parser.add_argument(
         '--which_epoch',
         type=str,
         default='latest',
         help='which epoch to load? set to latest to use latest cached model'
     )
     self.parser.add_argument('--num_aug',
                              type=int,
                              default=1,
                              help='# of augmentation files')
     self.parser.add_argument('--dropout_p',
                              type=float,
                              default=0,
                              help='dropout layer probability')
     self.is_train = False
コード例 #2
0
 def initialize(self):
     BaseOptions.initialize(self)
     self.is_Train = False
     self.parser.add_argument('--batchsize',
                              type=int,
                              default=3,
                              help='input batch size')
コード例 #3
0
 def initialize(self):
     BaseOptions.initialize(self)
     self.is_Train = True
     self.parser.add_argument('--batchsize', type=int, default=64, help='input batch size')
     self.parser.add_argument('--lr_G', type=float, default=0.0002, help='initial learning rate of Generator')
     self.parser.add_argument('--lr_D', type=float, default=0.0002, help='initial learning rate of Discriminator')
     self.parser.add_argument('--count_epoch', type=int, default=0, help='the starting count epoch count')
     self.parser.add_argument('--epochs', type=int, default=10000, help='number of epochs for train')
     self.parser.add_argument('--beta1', type=float, default=0.5, help='adam optimizer parameter')
     self.parser.add_argument('--beta2', type=float, default=0.999, help='adam optimizer parameter')
     self.parser.add_argument('--save_epoch_freq', type=int, default=5, help='frequency of saving the latest results')
     self.parser.add_argument('--D_interval', type=int, default=20, help='the interval of each optimization of D')
     self.parser.add_argument('--w_L1', type=int, default=1, help='the weight of the L1 loss')
コード例 #4
0
    def initialize(self, parser):
        parser = BaseOptions.initialize(self, parser)
        parser.add_argument('--batch_size',
                            type=int,
                            default=4,
                            help='input batch size')
        parser.add_argument('--mode', type=str, default='test')
        parser.add_argument('--model_load_path',
                            type=str,
                            default='checkpoints',
                            help='dir for model saving')
        parser.add_argument('--save_path',
                            type=str,
                            default='',
                            help='result saving path')

        return parser
コード例 #5
0
    def initialize(self, parser):
        parser = BaseOptions.initialize(self, parser)
        parser.add_argument('--batch_size',
                            type=int,
                            default=16,
                            help='input batch size')
        parser.add_argument('--lr',
                            type=float,
                            default=0.0001,
                            help='initial learning rate for Adam')
        parser.add_argument('--mode', type=str, default='train')
        parser.add_argument('--weight-decay',
                            '--wd',
                            default=1e-4,
                            type=float,
                            metavar='W',
                            help='weight decay (default: 1e-4) for Adam')
        parser.set_defaults(model_save_path='checkpoints')
        parser.set_defaults(epochs=21)
        parser.set_defaults(batch_size=8)

        return parser
コード例 #6
0
    def initialize(self):
        BaseOptions.initialize(self)
        self.parser.add_argument(
            '--display_freq',
            type=int,
            default=100,
            help='frequency of showing training results on screen')
        self.parser.add_argument(
            '--print_freq',
            type=int,
            default=100,
            help='frequency of showing training results on console')
        self.parser.add_argument('--save_latest_freq',
                                 type=int,
                                 default=1000,
                                 help='frequency of saving the latest results')
        self.parser.add_argument(
            '--save_epoch_freq',
            type=int,
            default=1,
            help='frequency of saving checkpoints at the end of epochs')
        self.parser.add_argument(
            '--continue_train',
            action='store_true',
            help='continue training: load the latest model')
        self.parser.add_argument('--phase',
                                 type=str,
                                 default='train',
                                 help='train, val, test, etc')
        self.parser.add_argument(
            '--which_epoch',
            type=str,
            default='latest',
            help='which epoch to load? set to latest to use latest cached model'
        )
        self.parser.add_argument('--niter',
                                 type=int,
                                 default=10,
                                 help='# of iter at starting learning rate')
        self.parser.add_argument(
            '--niter_decay',
            type=int,
            default=10,
            help='# of iter to linearly decay learning rate to zero')
        self.parser.add_argument('--beta1',
                                 type=float,
                                 default=0.5,
                                 help='momentum term of adam')
        self.parser.add_argument('--lr',
                                 type=float,
                                 default=0.0002,
                                 help='initial learning rate for adam')
        self.parser.add_argument('--TTUR',
                                 action='store_true',
                                 help='Use TTUR training scheme')
        self.parser.add_argument('--gan_mode',
                                 type=str,
                                 default='ls',
                                 help='(ls|original|hinge)')
        self.parser.add_argument(
            '--pool_size',
            type=int,
            default=1,
            help=
            'the size of image buffer that stores previously generated images')
        self.parser.add_argument(
            '--no_html',
            action='store_true',
            help=
            'do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/'
        )

        # for discriminators
        self.parser.add_argument(
            '--num_D',
            type=int,
            default=2,
            help='number of patch scales in each discriminator')
        self.parser.add_argument('--n_layers_D',
                                 type=int,
                                 default=3,
                                 help='number of layers in discriminator')
        self.parser.add_argument('--no_vgg',
                                 action='store_true',
                                 help='do not use VGG feature matching loss')
        self.parser.add_argument('--no_ganFeat',
                                 action='store_true',
                                 help='do not match discriminator features')
        self.parser.add_argument('--lambda_feat',
                                 type=float,
                                 default=10.0,
                                 help='weight for feature matching')
        self.parser.add_argument(
            '--sparse_D',
            action='store_true',
            help='use sparse temporal discriminators to save memory')

        # for temporal
        self.parser.add_argument('--lambda_T',
                                 type=float,
                                 default=10.0,
                                 help='weight for temporal loss')
        self.parser.add_argument('--lambda_F',
                                 type=float,
                                 default=10.0,
                                 help='weight for flow loss')
        self.parser.add_argument(
            '--n_frames_D',
            type=int,
            default=3,
            help='number of frames to feed into temporal discriminator')
        self.parser.add_argument(
            '--n_scales_temporal',
            type=int,
            default=2,
            help='number of temporal scales in the temporal discriminator')
        self.parser.add_argument(
            '--max_frames_per_gpu',
            type=int,
            default=1,
            help='max number of frames to load into one GPU at a time')
        self.parser.add_argument('--max_frames_backpropagate',
                                 type=int,
                                 default=1,
                                 help='max number of frames to backpropagate')
        self.parser.add_argument(
            '--max_t_step',
            type=int,
            default=1,
            help=
            'max spacing between neighboring sampled frames. If greater than 1, the network may randomly skip frames during training.'
        )
        self.parser.add_argument(
            '--n_frames_total',
            type=int,
            default=30,
            help='the overall number of frames in a sequence to train with')
        self.parser.add_argument(
            '--niter_step',
            type=int,
            default=5,
            help='how many epochs do we change training batch size again')
        self.parser.add_argument(
            '--niter_fix_global',
            type=int,
            default=0,
            help=
            'if specified, only train the finest spatial layer for the given iterations'
        )

        self.isTrain = True
コード例 #7
0
ファイル: train_options.py プロジェクト: jleesdev/meshdnns
 def initialize(self):
     BaseOptions.initialize(self)
     self.parser.add_argument(
         '--print_freq',
         type=int,
         default=10,
         help='frequency of showing training results on console')
     self.parser.add_argument('--save_latest_freq',
                              type=int,
                              default=250,
                              help='frequency of saving the latest results')
     self.parser.add_argument(
         '--save_epoch_freq',
         type=int,
         default=1,
         help='frequency of saving checkpoints at the end of epochs')
     self.parser.add_argument(
         '--run_test_freq',
         type=int,
         default=1,
         help='frequency of running test in training script')
     self.parser.add_argument(
         '--continue_train',
         action='store_true',
         help='continue training: load the latest model')
     self.parser.add_argument(
         '--epoch_count',
         type=int,
         default=1,
         help=
         'the starting epoch count, we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>, ...'
     )
     self.parser.add_argument('--phase',
                              type=str,
                              default='train',
                              help='train, val, test, etc')
     self.parser.add_argument(
         '--which_epoch',
         type=str,
         default='latest',
         help='which epoch to load? set to latest to use latest cached model'
     )
     self.parser.add_argument('--niter',
                              type=int,
                              default=150,
                              help='# of iter at starting learning rate')
     self.parser.add_argument(
         '--niter_decay',
         type=int,
         default=150,
         help='# of iter to linearly decay learning rate to zero')
     self.parser.add_argument('--beta1',
                              type=float,
                              default=0.9,
                              help='momentum term of adam')
     self.parser.add_argument('--lr',
                              type=float,
                              default=0.0005,
                              help='initial learning rate for adam')
     self.parser.add_argument(
         '--lr_policy',
         type=str,
         default='lambda',
         help='learning rate policy: lambda|step|plateau')
     self.parser.add_argument(
         '--lr_decay_iters',
         type=int,
         default=50,
         help='multiply by a gamma every lr_decay_iters iterations')
     self.parser.add_argument('--reg_weight',
                              type=float,
                              default=0,
                              help='l1 regularization weight')
     # data augmentation stuff
     self.parser.add_argument('--num_aug',
                              type=int,
                              default=5,
                              help='# of augmentation files')
     self.parser.add_argument(
         '--scale_verts',
         action='store_true',
         help='non-uniformly scale the mesh e.g., in x, y or z')
     self.parser.add_argument(
         '--slide_verts',
         type=float,
         default=0.1,
         help='percent vertices which will be shifted along the mesh surface'
     )
     self.parser.add_argument('--flip_edges',
                              type=float,
                              default=0.1,
                              help='percent of edges to randomly flip')
     # tensorboard visualization
     self.parser.add_argument('--no_vis',
                              action='store_true',
                              help='will not use tensorboard')
     self.parser.add_argument('--verbose_plot',
                              action='store_true',
                              help='plots network weights, etc.')
     self.parser.add_argument('--optim',
                              type=str,
                              default='Adam',
                              help='plots network weights, etc.')
     self.parser.add_argument('--dropout_p',
                              type=float,
                              default=0,
                              help='dropout layer probability')
     self.is_train = True
コード例 #8
0
ファイル: train.py プロジェクト: Waxyoung/face_landmark
import sys
import os
import torch.optim as optim
import torch
import collections
import numpy as np
import math
from multiprocessing import cpu_count
from base_options import BaseOptions
from my_face_dataset import FaceDataSet

SymbolWithOptimizer = collections.namedtuple('SymbolWithOptimizer',
                                             ['symbol', 'optimizer'])

if __name__ == "__main__":
    opt = BaseOptions().parse()

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    num_workers = cpu_count() if torch.cuda.is_available() else 0

    criterion = torch.nn.MSELoss()

    symbol_list = []
    from importlib import import_module
    for symbol_file_name in opt.symbols:
        symbol_module = import_module('symbols.' + symbol_file_name)
        symbol = symbol_module.get_symbol()
        if opt.load_epoch:
            mode_ends = '_latest.pth' if opt.load_epoch == 'latest' else '%05d.pth' % opt.load_epoch
            checkpoint_path = os.path.join(opt.checkpoints_dir,
コード例 #9
0
    def initialize(self):
        BaseOptions.initialize(self)

        self.parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc')

        self.isTrain = False
コード例 #10
0
ファイル: train_options.py プロジェクト: zsb87/SenGAN
    def initialize(self):
        BaseOptions.initialize(self)
        # # visdom and HTML visualization parameters
        self.parser.add_argument(
            '--display_freq',
            type=int,
            default=400,
            help='frequency of showing training results on screen')
        # self.parser.add_argument('--display_ncols', type=int, default=4, help='if positive, display all images in a single visdom web panel with certain number of images per row.')
        # self.parser.add_argument('--display_id', type=int, default=1, help='window id of the web display')
        # self.parser.add_argument('--display_server', type=str, default="http://localhost", help='visdom server of the web display')
        # self.parser.add_argument('--display_env', type=str, default='main', help='visdom display environment name (default is "main")')
        # self.parser.add_argument('--display_port', type=int, default=8097, help='visdom port of the web display')
        self.parser.add_argument(
            '--update_html_freq',
            type=int,
            default=1000,
            help='frequency of saving training results to html')
        self.parser.add_argument(
            '--print_freq',
            type=int,
            default=100,
            help='frequency of showing training results on console')
        self.parser.add_argument('--eval_freq',
                                 type=int,
                                 default=100,
                                 help='frequency of evaluating results')
        # self.parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/')

        # # network saving and loading parameters
        # self.parser.add_argument('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results')
        # self.parser.add_argument('--save_epoch_freq', type=int, default=5, help='frequency of saving checkpoints at the end of epochs')
        # self.parser.add_argument('--save_by_iter', action='store_true', help='whether saves model by iteration')
        self.parser.add_argument(
            '--continue_train',
            action='store_true',
            help='continue training: load the latest model')
        self.parser.add_argument(
            '--start_epoch',
            type=int,
            default=0,
            help=
            'the starting epoch count, we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>, ...'
        )
        # self.parser.add_argument('--epoch_count', type=int, default=1, help='the starting epoch count, we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>, ...')
        # self.parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')

        # # training parameters
        self.parser.add_argument(
            '--n_epochs',
            type=int,
            default=100,
            help='number of epochs with the initial learning rate')
        self.parser.add_argument(
            '--n_epochs_decay',
            type=int,
            default=100,
            help='number of epochs to linearly decay learning rate to zero')
        self.parser.add_argument('--beta1',
                                 type=float,
                                 default=0.5,
                                 help='momentum term of adam')
        self.parser.add_argument('--lr',
                                 type=float,
                                 default=0.0002,
                                 help='initial learning rate for adam')
        # self.parser.add_argument('--gan_mode', type=str, default='lsgan', help='the type of GAN objective. [vanilla| lsgan | wgangp]. vanilla GAN loss is the cross-entropy objective used in the original GAN paper.')
        # self.parser.add_argument('--pool_size', type=int, default=50, help='the size of image buffer that stores previously generated images')
        self.parser.add_argument(
            '--lr_policy',
            type=str,
            default='linear',
            help='learning rate policy. [linear | step | plateau | cosine]')
        self.parser.add_argument(
            '--lr_decay_iters',
            type=int,
            default=50,
            help='multiply by a gamma every lr_decay_iters iterations')
        self.parser.add_argument('--resume',
                                 type=bool,
                                 default=True,
                                 help='load pretrained model or not')

        self.initialized = True
        self.isTrain = True