Пример #1
0
 def initialize(self):
     # experiment specifics
     BaseOptions.initialize(self)
     self.argument_parser.add_argument(
         '--dataBalance',
         type=int,
         help=
         'expand all data from different classed to the same scale with augument'
     )
     self.argument_parser.add_argument(
         '--borderCropRate',
         type=float,
         default=0.5,
         help='trim images border at the given rate')
     self.argument_parser.add_argument(
         '--padBorderSize',
         type=int,
         default=500,
         help='rescale images to the given resolution')
     self.argument_parser.add_argument(
         '--massCrop',
         type=bool,
         help='use opencv to crop up the lesion without the background')
     self.argument_parser.add_argument(
         '--off',
         type=bool,
         help=
         'do nothing but move images and label from row path to processed path'
     )
     self.argument_parser.add_argument(
         '--testSamples',
         type=int,
         help='dev dataset set aside from the training dataset')
     self.initialized = False
Пример #2
0
 def initialize(self):
     # experiment specifics
     BaseOptions.initialize(self)
     self.argument_parser.add_argument('--mode',
                                       type=str,
                                       default='train',
                                       help='model mode')
     self.argument_parser.add_argument(
         '--date',
         type=str,
         help='load a trained model specified by date to continue training')
     self.argument_parser.add_argument(
         '--time',
         type=str,
         help='load a trained model specified by time to continue training')
     self.argument_parser.add_argument(
         '--autoAugments',
         type=bool,
         help='activate data auto augment,true or false')
     self.argument_parser.add_argument(
         '--optimizer',
         type=str,
         help='choices including adam,sgd,momentum',
         choices=['adam', 'sgd'])
     self.argument_parser.add_argument('--learningRate',
                                       type=float,
                                       help='learningRate')
     self.argument_parser.add_argument(
         '--lossfunction',
         type=str,
         help='choices including cross,softmax',
         choices=['cross', 'focalloss'])
     # self.argument_parser.add_argument('--centerCropSize', type=int, action='append', help='center crop size')
     self.initialized = False
Пример #3
0
 def initialize(self):
     # experiment specifics
     BaseOptions.initialize(self)
     self.argument_parser.add_argument('--lossDescendThreshold',
                                       type=float,
                                       help='tell the search when to stop')
     self.initialized = False
 def initialize(self):
     BaseOptions.initialize(self)
     self.parser.add_argument('--ntest',
                              type=int,
                              default=float("inf"),
                              help='# of test examples.')
     self.parser.add_argument('--results_dir',
                              type=str,
                              default='./results/',
                              help='saves results here.')
     self.parser.add_argument('--aspect_ratio',
                              type=float,
                              default=1.0,
                              help='aspect ratio of result images')
     self.parser.add_argument('--phase',
                              type=str,
                              default='test',
                              help='train, val, test, etc')
     self.parser.add_argument(
         '--which_epoch',
         type=str,
         default='latest',
         help='which epoch to load? set to latest to use latest cached model'
     )
     self.parser.add_argument('--how_many',
                              type=int,
                              default=50,
                              help='how many test images to run')
     self.isTrain = False
Пример #5
0
 def initialize(self, parser):
     BaseOptions.initialize(self, parser)
     parser.add_argument(
         '--display_freq',
         type=int,
         default=200,
         help='frequency of showing training results on screen')
     return parser
Пример #6
0
 def initialize(self, parser):
     BaseOptions.initialize(self, parser)
     parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
     parser.add_argument('--how_many', type=int, default=float("inf"), help='how many test images to run')       
     parser.set_defaults(serial_batches=True)
     parser.set_defaults(no_flip=True)
     parser.set_defaults(phase='test')
     parser.set_defaults(load_from_opt_file=True)
     if not torch.cuda.is_available():
         parser.set_defaults(gpu_ids="-1")
     self.isTrain = False
     return parser
Пример #7
0
def save_new_edges(filename):
    opt = BaseOptions()
    opt.initialize()
    opt.num_aug = 1
    mesh = from_scratch(filename, opt)
    print(dir(mesh))
    edges = np.array(mesh.edges)
    edge_file = filename.replace('.obj', '.edges')
    np.savetxt(edge_file, edges, fmt='%d')

    vs = np.array(mesh.vs)
    v_file = filename.replace('.obj', '.vs')
    np.savetxt(v_file, vs, fmt='%f')
Пример #8
0
 def initialize(self):
     # experiment specifics
     BaseOptions.initialize(self)
     # self.argument_parser.add_argument('--mode', type=str,  default='train', help='model mode')
     # self.argument_parser.add_argument('--cof', type=int,  help='coieffient')
     # self.argument_parser.add_argument('--originalSize', type=int,action='append', help='activate data auto augment,true or false')
     # self.argument_parser.add_argument('--downLayerNumber', type=int, help='activate data auto augment,true or false')
     # self.argument_parser.add_argument('--upLayerNumber', type=int, help='activate data auto augment,true or false')
     self.argument_parser.add_argument('--learningRate',
                                       type=float,
                                       help='learningRate')
     # self.argument_parser.add_argument('--batchSize', type=int, help='batch size')
     self.initialized = False
Пример #9
0
    def initialize(self):
        # experiment specifics
        BaseOptions.initialize(self)
        self.argument_parser.add_argument('--mode',
                                          type=str,
                                          default='test',
                                          help='model mode')
        self.argument_parser.add_argument('--date',
                                          type=str,
                                          help='the date of the trained model')
        self.argument_parser.add_argument('--time',
                                          type=str,
                                          help='the time of the trained model')

        self.initialized = False
Пример #10
0
    def initialize(self, parser):
        parser = BaseOptions.initialize(self, parser)  # define shared options
        parser.add_argument('--segmap_path',
                            required=True,
                            help='path to the segmap label image')
        parser.add_argument('--photo_path',
                            required=True,
                            help='path to the photo image')
        parser.add_argument(
            '--output_dir',
            type=str,
            required=True,
            default='./results/cityscapes_predictions',
            help='Directory the output image will be written to.')

        parser.add_argument('--aspect_ratio',
                            type=float,
                            default=1.0,
                            help='aspect ratio of result images')
        parser.add_argument('--phase',
                            type=str,
                            default='val',
                            help='train, val, test, etc')
        # Dropout and Batchnorm has different behaviour during training and test.
        parser.add_argument('--eval',
                            action='store_true',
                            help='use eval mode during test time.')
        # rewrite devalue values
        parser.set_defaults(model='test')
        # To avoid cropping, the load_size should be the same as crop_size
        parser.set_defaults(load_size=parser.get_default('crop_size'))
        self.isTrain = False
        return parser
Пример #11
0
    def initialize(self, parser):
        parser = BaseOptions.initialize(self, parser)
        parser.add_argument(
            '--store_feature',
            type=str,
            default=
            '/home/visiting/Projects/levishery/reconstruction/vangogh_features.json'
        )
        parser.add_argument(
            '--store_pca',
            type=str,
            default=
            '/home/visiting/Projects/levishery/reconstruction/utils/vangogh.pca'
        )
        parser.add_argument(
            '--store_index',
            type=str,
            default=
            '/home/visiting/Projects/levishery/reconstruction/vangogh_index.json'
        )
        parser.add_argument(
            '--result_dir',
            type=str,
            default='/home/visiting/Projects/levishery/reconstruction/result')
        parser.add_argument('--phase',
                            type=str,
                            default='test',
                            help='train, val, test, etc')

        self.isTrain = False
        return parser
Пример #12
0
 def initialize(self, parser):
     parser = BaseOptions.initialize(self, parser)
     # ---------- Define Device ---------- #
     parser.add_argument('--n', type=int, default=16)
     parser.add_argument('--port', type=str, default='/dev/cu.usbmodem1413')
     parser.add_argument('--freq', type=int, default=57600)
     parser.add_argument('--repr',
                         type=str,
                         nargs=16,
                         default=[
                             'Ax', 'Ay', 'Az', 'Gx', 'Gy', 'Gz', 'Mx', 'My',
                             'Mz', 'Q1', 'Q2', 'Q3', 'Q4', 'Y', 'P', 'R'
                         ])
     # ---------- Define Recorder ---------- #
     parser.add_argument('--action', type=str, default='stop')
     parser.add_argument('--dataDir',
                         type=str,
                         default='./data',
                         help='models are saved here')
     # ---------- Define Painter ---------- #
     parser.add_argument('--display',
                         type=int,
                         nargs='+',
                         default=list(range(16)))
     parser.add_argument('--memorySize', type=int, default=10)
     parser.add_argument('--ylim', type=int, default=200)
     # ---------- Define Parameters ---------- #
     parser.add_argument('--threshold', type=float, default=40)
     parser.add_argument('--index', type=int, nargs='*', default=[3, 4, 5])
     parser.add_argument('--nStep', type=int, default=2)
     # ---------- Experiment Setting ---------- #
     parser.set_defaults(name='record')
     return parser
    def initialize(self, parser):
        parser = BaseOptions.initialize(self, parser)
        parser.set_defaults(no_shuffle=True)
        parser.set_defaults(datamode="test")
        self.is_train = False
        parser.add_argument(
            "--result_dir",
            type=str,
            default="test_results",
            help="save test result outputs",
        )

        parser.add_argument(
            "--tryon_list",
            help="Use a CSV file to specify what cloth should go on each person."
            "The CSV should have two columns: CLOTH_PATH and PERSON_ID. "
            "Cloth_path is the path to the image of the cloth product to wear. "
            "Person_id is the identifier that corresponds to the ID under each "
            "annotation folder.",
        )
        parser.add_argument(
            "--random_tryon",
            help="Randomly choose cloth-person pairs for try-on. ",
            action="store_true",
        )
        # parser.add_argument(...)

        return parser
    def initialize(self, parser):
        parser = BaseOptions.initialize(self, parser)
        # ---------- Define Mode ---------- #
        parser.set_defaults(mode='train')
        # ---------- Define Network ---------- #
        parser.set_defaults(model='mcd')
        parser.add_argument('--net',
                            type=str,
                            default="drn_d_38",
                            help="network structure",
                            choices=[
                                'fcn', 'psp', 'segnet', 'fcnvgg', "drn_c_26",
                                "drn_c_42", "drn_c_58", "drn_d_22", "drn_d_38",
                                "drn_d_54", "drn_d_105"
                            ])
        parser.add_argument('--res',
                            type=str,
                            default='50',
                            metavar="ResnetLayerNum",
                            choices=["18", "34", "50", "101", "152"],
                            help='which resnet 18,50,101,152')
        # ---------- Define Dataset ---------- #
        parser.add_argument(
            '--sourceDataset',
            type=str,
            nargs='+',
            choices=["gta_train", "gta_val", "city_train", "city_val"],
            default=["gta_train"])
        parser.add_argument(
            '--targetDataset',
            type=str,
            nargs='+',
            choices=["gta_train", "gta_val", "city_train", "city_val"],
            default=["city_train"])
        # ---------- Optimizers ---------- #
        parser.set_defaults(opt='sgd')
        # ---------- Train Details ---------- #
        parser.add_argument(
            '--k',
            type=int,
            default=4,
            help='how many steps to repeat the generator update')
        parser.add_argument("--nTimesDLoss", type=int, default=1)
        parser.add_argument("--bgLoss",
                            action="store_true",
                            help='whether you add background loss')
        parser.add_argument('--dLoss',
                            type=str,
                            default="diff",
                            choices=['mysymkl', 'symkl', 'diff'],
                            help="choose from ['mysymkl', 'symkl', 'diff']")
        # ---------- Hyperparameters ---------- #
        parser.set_defaults(epoch=1)
        parser.set_defaults(nEpochStart=10)
        parser.set_defaults(nEpochDecay=10)
        # ---------- Experiment Setting ---------- #
        parser.set_defaults(name='mcd_da')
        parser.set_defaults(displayWidth=3)

        return parser
Пример #15
0
    def initialize(self):
        BaseOptions.initialize(self)
        self.isTrain = False

        self.parser.add_argument('--which_epoch', type=int, required=True, default=0,
                                 help='which epoch to load for testing')
        self.parser.add_argument('--batch_size', type=int, default=1, help='input batch size')
        self.parser.add_argument('--data_root', type=str, default='./datasets/extended_CMU/',
                                 help='path to CMU images')
        self.parser.add_argument('--retrieval_metric', type=str, default='L1',
                                 help='metric used for retrieval, L2 || cos || L1')
        self.parser.add_argument('--slice_list', nargs='+', default=[2, 3, 4, 5, 6, 13, 14, 15, 16, 17, 18, 19, 20, 21],
                                 type=int, help='which slice to test')
        self.parser.add_argument('--testlayers_w', nargs='+', default=[0,0,0,0,1,1,0,0], type=float,
                                 help='the weight of image representation on each scale for retrieval')
        self.parser.add_argument('--trip_layer_index', nargs='+', default=[5,6], type=int,
                                 help='which layers are used for image retrieval, counting from 1')
Пример #16
0
    def initialize(self, parser):
        parser = BaseOptions.initialize(self, parser)

        parser.add_argument('--name', type=str, help='network cfig name')
        parser.add_argument('--train',
                            type=bool,
                            default=True,
                            help='train or eval')

        # Datasets
        parser.add_argument('--workers',
                            type=int,
                            default=4,
                            help='number of workers')
        parser.add_argument('--batch_size',
                            type=int,
                            default=4,
                            help='input batch size')
        parser.add_argument('--shuffle',
                            type=bool,
                            default=True,
                            help='if shuffle the dataset')
        parser.add_argument('--image_size',
                            type=int,
                            default=256,
                            help='image size')
        parser.add_argument('--data_root',
                            type=str,
                            default='./data/dataset/Synthetic',
                            help='data root')

        # Model
        parser.add_argument('--cuda', type=bool, default=True, help='use cuda')
        parser.add_argument('--nepoch',
                            type=int,
                            default=[14, 10, 9],
                            help='number of total epochs')
        parser.add_argument('--reuse',
                            type=bool,
                            default=False,
                            help='if reuse model')
        parser.add_argument('--gpu_id',
                            type=int,
                            default=[0, 1],
                            help='gpu id for usage')
        parser.add_argument('--start_epoch',
                            type=int,
                            default=1,
                            help='the number of epoch to start')

        # Visualization and saving
        parser.add_argument('--outf',
                            type=str,
                            default='./data',
                            help='folder to output temp samples')

        return parser
Пример #17
0
def make_soft_eseg(obj_path, eseg_path, seseg_path, nclasses=4):
    if not os.path.isdir(seseg_path): os.makedirs(seseg_path)
    files = glob.glob(os.path.join(obj_path, '*.obj'))
    opt = BaseOptions()
    opt.initialize()
    opt.num_aug = 1

    for file in files:
        mesh = from_scratch(file, opt)
        gemm_edges = np.array(mesh.gemm_edges)
        obj_id = os.path.splitext(os.path.basename(file))[0]
        seg_file = os.path.join(eseg_path, obj_id + '.eseg')
        edge_seg = np.array(read_seg(seg_file).squeeze(), dtype='int32') - 1
        s_eseg = -1 * np.ones((mesh.edges_count, nclasses), dtype='float64')
        for ei in range(mesh.edges_count):
            prob = np.zeros(nclasses)
            seg_ids, counts = np.unique(edge_seg[gemm_edges[ei]],
                                        return_counts=True)
            prob[seg_ids] = counts / float(len(gemm_edges[ei]))
            s_eseg[ei, :] = prob
        s_eseg_file = os.path.join(seseg_path, obj_id + '.seseg')
        np.savetxt(s_eseg_file, s_eseg, fmt='%f')
Пример #18
0
 def initialize(self, parser):
     BaseOptions.initialize(self, parser)
     parser.add_argument(
         '--which_epoch',
         type=str,
         default='latest',
         help='which epoch to load? set to latest to use latest cached model'
     )
     parser.add_argument('--how_many',
                         type=int,
                         default=float("inf"),
                         help='how many test images to run')
     parser.set_defaults(preprocess_mode='scale_width_and_crop',
                         crop_size=256,
                         load_size=256,
                         display_winsize=256)
     parser.set_defaults(serial_batches=True)
     parser.set_defaults(no_flip=True)
     parser.set_defaults(phase='test')
     parser.set_defaults(gpu_ids='-1')
     parser.set_defaults(name='coco_pretrained')
     self.isTrain = False
     return parser
Пример #19
0
 def initialize(self, parser):
     parser = BaseOptions.initialize(self, parser)  # define shared options
     parser.add_argument('--results_dir',
                         type=str,
                         default=None,
                         required=True,
                         help='saves results here.')
     parser.add_argument('--need_profile', action='store_true')
     parser.add_argument('--num_test',
                         type=int,
                         default=float('inf'),
                         help='how many test images to run')
     parser.add_argument('--model',
                         type=str,
                         default='test',
                         help='which model do you want test')
     parser.add_argument('--netG',
                         type=str,
                         default='sub_mobile_resnet_9blocks',
                         help='specify the generator architecture')
     parser.add_argument(
         '--ngf',
         type=int,
         default=64,
         help='the base number of filters of the student generator')
     parser.add_argument('--dropout_rate',
                         type=float,
                         default=0,
                         help='the dropout rate of the generator')
     # rewrite devalue values
     parser.add_argument('--no_fid', action='store_true')
     parser.add_argument(
         '--real_stat_path',
         type=str,
         required=None,
         help=
         'the path to load the groud-truth images information to compute FID.'
     )
     parser.add_argument('--no_mIoU', action='store_true')
     parser.add_argument(
         '--times',
         type=int,
         default=100,
         help='times of forwarding the data to test the latency')
     parser.set_defaults(phase='val',
                         serial_batches=True,
                         no_flip=True,
                         load_size=parser.get_default('crop_size'),
                         batch_size=1)
     return parser
Пример #20
0
    def initialize(self):
        BaseOptions.initialize(self)

        ### for display ###
        self.parser.add_argument('--save_epoch_freq', type=int, default=10)
        self.parser.add_argument('--print_iter_freq', type=int, default=100)

        ### for training ###
        self.parser.add_argument('--epoch', type=int, default=100)
        self.parser.add_argument(
            '--epoch_decay',
            type=int,
            default=100,
            help='# of iter to linearly decay learning rate to zero')
        self.parser.add_argument('--lr_policy', type=str, default='lambda')
        self.parser.add_argument('--beta1', type=float, default=0.5)
        self.parser.add_argument('--lr', type=float, default=0.0002)
        self.parser.add_argument('--weight_decay', type=float, default=0.0001)

        ### for discriminator ###
        self.parser.add_argument('--num_D',
                                 type=int,
                                 default=2,
                                 help='number of discriminators to use')
        self.parser.add_argument(
            '--n_layer',
            type=int,
            default=3,
            help='only used if which_model_netD==n_layers')
        self.parser.add_argument('--ndf', type=int, default=64)

        ### for losses ###
        self.parser.add_argument('--lambda_vgg', type=float, default=10.0)
        self.parser.add_argument('--lambda_rec', type=float, default=10.0)

        self.isTrain = True
 def initialize(self, parser: argparse.ArgumentParser):
     parser = BaseOptions.initialize(self, parser)
     # data
     parser.add_argument("--no_shuffle",
                         action="store_true",
                         help="don't shuffle input data")
     # checkpoints
     parser.add_argument(
         "--save_count",
         type=int,
         help="how often in steps to always save a checkpoint",
         default=10000,
     )
     parser.add_argument(
         "--val_check_interval",
         "--val_frequency",
         dest="val_check_interval",
         type=str,
         default="0.125",  # parsed later into int or float based on "."
         help="If float, validate (and checkpoint) after this many epochs. "
         "If int, validate after this many batches. If 0 or 0.0, validate "
         "every step.")
     # optimization
     parser.add_argument("--lr",
                         type=float,
                         default=1e-4,
                         help="initial learning rate for adam")
     parser.add_argument(
         "--keep_epochs",
         type=int,
         help="number of epochs with initial learning rate",
         default=5,
     )
     parser.add_argument(
         "--decay_epochs",
         type=int,
         help="number of epochs to linearly decay the learning rate",
         default=5,
     )
     parser.add_argument(
         "--accumulated_batches",
         type=int,
         help=
         "number of batch gradients to accumulate before calling optimizer.step()",
         default=1)
     self.is_train = True
     return parser
 def initialize(self, parser):
     parser = BaseOptions.initialize(self, parser)
     # ---------- Define Mode ---------- #
     parser.set_defaults(mode= 'test')
     # ---------- Define Network ---------- #
     parser.set_defaults(model= 'test')
     parser.add_argument('--net', type=str, default="drn_d_38", help="network structure",
                         choices=['fcn', 'psp', 'segnet', 'fcnvgg',
                                  "drn_c_26", "drn_c_42", "drn_c_58", "drn_d_22",
                                  "drn_d_38", "drn_d_54", "drn_d_105"])
     parser.add_argument('--res', type=str, default='50', metavar="ResnetLayerNum",
                         choices=["18", "34", "50", "101", "152"], help='which resnet 18,50,101,152')
     # ---------- Define Dataset ---------- #
     parser.add_argument('--dataset', type=str, nargs = '+', choices=["gta_train", "gta_val", "city_train", "city_val"],
             default = ["city_val"])
     # ---------- Experiment Setting ---------- #
     parser.set_defaults(name= 'mcd_da')
     parser.set_defaults(displayWidth= 3)
     return parser
Пример #23
0
 def initialize(self, parser):
     parser = BaseOptions.initialize(self, parser)
     # ---------- Define Mode ---------- #
     parser.set_defaults(mode = 'train')
     # ---------- Define Dataset ---------- #
     parser.add_argument('--nThreads', default=4, type=int, help='# threads for loading data')
     parser.add_argument('--nInput', default=3, type=int, help='# threads for loading data')
     # ---------- Optimizers ---------- #
     parser.add_argument('--opt', type=str, choices=['sgd', 'adam'], default='adam',
                         help="network optimizer")
     parser.add_argument('--lr', type=float, default=1e-3,
                         help='learning rate (default: 0.001)')
     parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam')
     parser.add_argument('--momentum', type=float, default=0.9,
                         help='momentum sgd (default: 0.9)')
     parser.add_argument('--weight_decay', type=float, default=2e-5,
                         help='weight_decay (default: 2e-5)')
     parser.add_argument("--adjustLr", action="store_true",
                         help='whether you change lr')
     parser.add_argument('--lr_policy', type=str, default='lambda',
                         help='learning rate policy: lambda|step|plateau')
     parser.add_argument('--lr_decay_iters', type=int, default=50,
                         help='multiply by a gamma every lr_decay_iters iterations')
     # ---------- Hyperparameters ---------- #
     parser.add_argument('--batchSize', type=int, default=2,
                         help="batch_size")
     parser.add_argument('--epoch', type=int, default=1,
                         help='the training epoch.')
     parser.add_argument('--nEpochStart', type=int, default=100,
                         help='# of epoch at starting learning rate')
     parser.add_argument('--nEpochDecay', type=int, default=100,
                         help='# of epoch to linearly decay learning rate to zero')
     # ---------- Experiment Setting ---------- #
     parser.set_defaults(name= 'train')
     parser.add_argument('--displayInterval', type=int, default=5,
                         help='frequency of showing training results on screen')
     parser.add_argument('--saveLatestInterval', type=int, default=5000,
                         help='frequency of saving the latest results')
     parser.add_argument('--saveEpochInterval', type=int, default=5, 
                         help='frequency of saving checkpoints at the end of epochs')
     return parser
Пример #24
0
 def initialize(self, parser):
     parser = BaseOptions.initialize(self, parser)
     # ---------- Define Device ---------- #
     parser.add_argument('--n', type=int, default=16)
     parser.add_argument('--port', type=str, default='/dev/cu.usbmodem1413')
     parser.add_argument('--freq', type=int, default=115200)
     parser.add_argument('--repr',
                         type=str,
                         nargs=16,
                         default=[
                             'Ax', 'Ay', 'Az', 'Gx', 'Gy', 'Gz', 'Mx', 'My',
                             'Mz', 'Q1', 'Q2', 'Q3', 'Q4', 'Y', 'P', 'R'
                         ])
     # ---------- Define Painter ---------- #
     parser.add_argument('--display',
                         type=int,
                         nargs='+',
                         default=list(range(16)))
     parser.add_argument('--memorySize', type=int, default=10)
     parser.add_argument('--ylim', type=int, default=200)
     # ---------- Experiment Setting ---------- #
     parser.set_defaults(name='main')
     return parser
Пример #25
0
    def initialize(self):
        BaseOptions.initialize(self)

        # for displays
        self.parser.add_argument(
            '--display_freq',
            type=int,
            default=100,
            help='frequency of showing training results on screen')
        self.parser.add_argument(
            '--print_freq',
            type=int,
            default=100,
            help='frequency of showing training results on console')
        self.parser.add_argument('--save_latest_freq',
                                 type=int,
                                 default=1000,
                                 help='frequency of saving the latest results')
        self.parser.add_argument(
            '--save_epoch_freq',
            type=int,
            default=10,
            help='frequency of saving checkpoints at the end of epochs')
        self.parser.add_argument(
            '--no_html',
            action='store_true',
            help=
            'do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/'
        )
        self.parser.add_argument(
            '--debug',
            action='store_true',
            help='only do one epoch and displays at each iteration')

        # for training
        self.parser.add_argument(
            '--loadfroms',
            action='store_true',
            help='continue training: load from 32s or 16s')
        self.parser.add_argument(
            '--continue_train',
            action='store_true',
            help='continue training: load the latest model')
        self.parser.add_argument(
            '--use_softmax',
            action='store_true',
            help='if specified use softmax loss, otherwise log-softmax')
        self.parser.add_argument('--phase',
                                 type=str,
                                 default='train',
                                 help='train, val, test, etc')
        self.parser.add_argument('--nepochs',
                                 type=int,
                                 default=100,
                                 help='# of iter at starting learning rate')
        self.parser.add_argument('--iterSize',
                                 type=int,
                                 default=10,
                                 help='# of iter at starting learning rate')
        self.parser.add_argument('--maxbatchsize',
                                 type=int,
                                 default=-1,
                                 help='# of iter at starting learning rate')
        self.parser.add_argument('--warmup_iters',
                                 type=int,
                                 default=500,
                                 help='# of iter at starting learning rate')
        self.parser.add_argument('--beta1',
                                 type=float,
                                 default=0.5,
                                 help='momentum term of adam')
        self.parser.add_argument('--lr',
                                 type=float,
                                 default=0.00025,
                                 help='initial learning rate for adam')
        self.parser.add_argument('--lr_power',
                                 type=float,
                                 default=0.9,
                                 help='power of learning rate policy')
        self.parser.add_argument('--momentum',
                                 type=float,
                                 default=0.9,
                                 help='momentum for sgd')
        self.parser.add_argument('--wd',
                                 type=float,
                                 default=0.0004,
                                 help='weight decay for sgd')

        self.isTrain = True
Пример #26
0
    def initialize(self, parser):
        parser = BaseOptions.initialize(self, parser)  # define shared options
        assert isinstance(parser, argparse.ArgumentParser)
        parser.add_argument('--output_dir',
                            type=str,
                            default=None,
                            required=True,
                            help='the path to save the evaluation result.')
        parser.add_argument('--num_test',
                            type=int,
                            default=float('inf'),
                            help='how many test images to run')
        parser.add_argument('--model',
                            type=str,
                            default='test',
                            help='which model do you want test')
        parser.add_argument('--no_fid',
                            action='store_true',
                            help='whether you want to compute FID.')
        parser.add_argument('--no_mIoU',
                            action='store_true',
                            help='whether you want to compute mIoU.')

        parser.add_argument(
            '--netG',
            type=str,
            default='super_mobile_resnet_9blocks',
            choices=['super_mobile_resnet_9blocks', 'super_mobile_spade'],
            help='specify generator architecture')
        parser.add_argument(
            '--ngf',
            type=int,
            default=48,
            help='the base number of filters of the student generator')
        parser.add_argument('--dropout_rate',
                            type=float,
                            default=0,
                            help='the dropout rate of the generator')
        parser.add_argument('--budget',
                            type=float,
                            default=1e18,
                            help='the MAC budget')
        parser.add_argument(
            '--real_stat_path',
            type=str,
            default=None,
            help=
            'the path to load the ground-truth images information to compute FID.'
        )

        parser.add_argument('--max_cache_size',
                            type=int,
                            default=10000000,
                            help='the cache size to store the results')
        parser.add_argument('--population_size', type=int, default=100)
        parser.add_argument('--mutate_prob',
                            type=float,
                            default=0.2,
                            help='the probability of mutation')
        parser.add_argument(
            '--mutation_ratio',
            type=float,
            default=0.5,
            help=
            'the ratio of networks that are generated through mutation in generation n >= 2.'
        )
        parser.add_argument(
            '--parent_ratio',
            type=float,
            default=0.25,
            help=
            'the ratio of networks that are used as parents for next generation'
        )
        parser.add_argument(
            '--evolution_iters',
            type=int,
            default=500,
            help='how many generations of population to be searched')
        parser.add_argument('--criterion',
                            type=str,
                            default='fid',
                            help='the criterion for the performance',
                            choices=['fid', 'mIoU', 'accu'])
        parser.add_argument(
            '--weighted_sample',
            type=float,
            default=1,
            help='number of times of the probability of the smallest channel to '
            'that of the largest channel in a single layer. '
            '(only affect the first generation)')
        parser.add_argument(
            '--generation_base',
            type=int,
            default=1,
            help='the generation base of the evolution (used for resuming)')
        parser.add_argument('--restore_pkl_path',
                            type=str,
                            default=None,
                            help='the checkpoint to restore searching')
        parser.add_argument(
            '--only_restore_cache',
            action='store_true',
            help='whether to only restore caches in the pkl file')
        parser.add_argument(
            '--save_freq',
            type=int,
            default=60,
            help='the number of minutes to save the latest searching results')

        # rewrite devalue values
        parser.set_defaults(phase='val',
                            serial_batches=True,
                            no_flip=True,
                            load_size=parser.get_default('crop_size'),
                            load_in_memory=True)

        return parser
Пример #27
0
    def initialize(self, parser):
        BaseOptions.initialize(self, parser)
        # for displays
        parser.add_argument(
            '--display_freq',
            type=int,
            default=20000,
            help='frequency of showing training results on screen')
        parser.add_argument(
            '--print_freq',
            type=int,
            default=1000,
            help='frequency of showing training results on console')
        parser.add_argument('--save_latest_freq',
                            type=int,
                            default=3000,
                            help='frequency of saving the latest results')
        parser.add_argument(
            '--save_epoch_freq',
            type=int,
            default=1,
            help='frequency of saving checkpoints at the end of epochs')
        parser.add_argument(
            '--no_html',
            action='store_true',
            help=
            'do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/'
        )
        parser.add_argument(
            '--debug',
            action='store_true',
            help='only do one epoch and displays at each iteration')
        parser.add_argument(
            '--tf_log',
            action='store_true',
            help=
            'if specified, use tensorboard logging. Requires tensorflow installed'
        )

        # for training
        parser.add_argument('--continue_train',
                            action='store_true',
                            help='continue training: load the latest model')
        parser.add_argument(
            '--which_epoch',
            type=str,
            default='latest',
            help='which epoch to load? set to latest to use latest cached model'
        )
        parser.add_argument(
            '--niter',
            type=int,
            default=50,
            help=
            '# of iter at starting learning rate. This is NOT the total #epochs. Totla #epochs is niter + niter_decay'
        )
        parser.add_argument(
            '--niter_decay',
            type=int,
            default=25,
            help='# of iter to linearly decay learning rate to zero')
        parser.add_argument('--optimizer', type=str, default='adam')
        parser.add_argument('--beta1',
                            type=float,
                            default=0.0,
                            help='momentum term of adam')
        parser.add_argument('--beta2',
                            type=float,
                            default=0.9,
                            help='momentum term of adam')
        parser.add_argument('--no_TTUR',
                            action='store_true',
                            help='Use TTUR training scheme')
        parser.add_argument(
            '--efficient',
            action='store_true',
            help=
            'Use gradient checkpointing for memory-efficient training (at the cost of time). Only use this option if you run out of memory.'
        )

        parser.add_argument('--evaluation_start_epoch',
                            type=int,
                            default=0,
                            help='First epoch to calculate metrics')
        parser.add_argument(
            '--evaluation_freq',
            type=int,
            default=100000,
            help='How often to evaluate (in number of samples seen)')
        parser.add_argument(
            '--num_evaluation_samples',
            type=int,
            default=1000,
            help='Number of samples to use for computing metrics')
        parser.add_argument('--evaluate_val_set',
                            action='store_true',
                            help='Calculate metrics for validation set, too')

        # the default values for beta1 and beta2 differ by TTUR option
        opt, _ = parser.parse_known_args()
        if opt.no_TTUR:
            parser.set_defaults(beta1=0.5, beta2=0.999)

        parser.add_argument('--lr',
                            type=float,
                            default=0.0002,
                            help='initial learning rate')
        parser.add_argument(
            '--D_steps_per_G',
            type=int,
            default=1,
            help='number of discriminator iterations per generator iterations.'
        )

        # for discriminators
        parser.add_argument('--ndf',
                            type=int,
                            default=32,
                            help='# of discrim filters in first conv layer')
        parser.add_argument('--lambda_feat',
                            type=float,
                            default=10.0,
                            help='weight for feature matching loss')
        parser.add_argument('--lambda_vgg',
                            type=float,
                            default=10.0,
                            help='weight for vgg loss')

        parser.add_argument(
            '--no_ganFeat_loss',
            action='store_true',
            help=
            'if specified, do *not* use discriminator feature matching loss')
        parser.add_argument(
            '--no_vgg_loss',
            action='store_true',
            help='if specified, do *not* use VGG feature matching loss')
        parser.add_argument('--gan_mode',
                            type=str,
                            default='hinge',
                            help='(ls|original|hinge)')
        parser.add_argument('--netD',
                            type=str,
                            default='multiscale',
                            help='(n_layers|multiscale|image)')
        parser.add_argument(
            '--gradient_clip',
            type=float,
            default=-1,
            help=
            'We clip gradients at this value. Use -1 to disable gradient clipping'
        )
        self.isTrain = True
        return parser
    def initialize(self, parser):
        parser = BaseOptions.initialize(self, parser)
        # ---------- Define Mode ---------- #
        parser.set_defaults(mode='train')
        # ---------- Define Network ---------- #
        parser.set_defaults(model='cycle_gan')
        parser.add_argument('--ngf',
                            type=int,
                            default=64,
                            help='# of gen filters in first conv layer')
        parser.add_argument('--ndf',
                            type=int,
                            default=64,
                            help='# of discrim filters in first conv layer')
        parser.add_argument('--which_model_netG',
                            type=str,
                            default='resnet_9blocks',
                            help='selects model to use for netG')
        parser.add_argument('--which_model_netD',
                            type=str,
                            default='basic',
                            help='selects model to use for netD')
        parser.add_argument('--n_layers_D',
                            type=int,
                            default=3,
                            help='only used if which_model_netD==n_layers')
        parser.add_argument(
            '--dropout',
            action='store_true',
            help=
            'do not use dropout for the generator, if specified, use dropout')
        parser.add_argument(
            '--norm',
            type=str,
            default='instance',
            help=
            'instance normalization or batch normalization, default CycleGAN did not use dropout'
        )
        parser.add_argument(
            '--init_type',
            type=str,
            default='normal',
            help='network initialization [normal|xavier|kaiming|orthogonal]')
        parser.add_argument(
            '--init_gain',
            type=float,
            default=0.02,
            help='scaling factor for normal, xavier and orthogonal.')
        # ---------- Define Dataset ---------- #
        parser.add_argument(
            '--datasetA',
            type=str,
            choices=["gta_train", "gta_val", "city_train", "city_val"],
            default="gta_train")
        parser.add_argument(
            '--datasetB',
            type=str,
            choices=["gta_train", "gta_val", "city_train", "city_val"],
            default="city_train")
        # ---------- Optimizers ---------- #
        parser.set_defaults(opt='adam')
        parser.set_defaults(lr=2E-4)
        parser.set_defaults(adjustLr=True)
        # ---------- Hyperparameters ---------- #
        parser.add_argument(
            '--lsgan',
            action='store_false',
            help='do not use least square GAN, if specified, use vanilla GAN')
        parser.add_argument(
            '--pool_size',
            type=int,
            default=50,
            help=
            'the size of image buffer that stores previously generated images')
        parser.add_argument('--lambdaA',
                            type=float,
                            default=10.0,
                            help='weight for cycle loss (A -> B -> A)')
        parser.add_argument('--lambdaB',
                            type=float,
                            default=10.0,
                            help='weight for cycle loss (B -> A -> B)')
        parser.add_argument(
            '--lambdaIdentity',
            type=float,
            default=0.5,
            help=
            'use identity mapping. Setting lambda_identity other than 0 has an effect of scaling the weight of the identity mapping loss. For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set lambda_identity = 0.1'
        )
        parser.set_defaults(epoch=1)
        parser.set_defaults(nEpochStart=100)
        parser.set_defaults(nEpochDecay=100)
        # ---------- Optional Hyperparameters ---------- #
        parser.set_defaults(augment=True)
        # ---------- Experiment Setting ---------- #
        parser.set_defaults(name='cycle_gan')
        parser.set_defaults(displayWidth=4)

        return parser
Пример #29
0
    def initialize(self):
        BaseOptions.initialize(self)

        self.isTrain = False
Пример #30
0
    def initialize(self, parser):
        parser = BaseOptions.initialize(self, parser)
        # ---------- Define Mode ---------- #
        parser.set_defaults(mode='train')
        # ---------- Define Network ---------- #
        parser.set_defaults(model='cycle_mcd')
        parser.add_argument('--segNet',
                            type=str,
                            default="drn_d_38",
                            help="network structure",
                            choices=[
                                'fcn', 'psp', 'segnet', 'fcnvgg', "drn_c_26",
                                "drn_c_42", "drn_c_58", "drn_d_22", "drn_d_38",
                                "drn_d_54", "drn_d_105"
                            ])
        parser.add_argument('--ngf',
                            type=int,
                            default=64,
                            help='# of gen filters in first conv layer')
        parser.add_argument('--ndf',
                            type=int,
                            default=64,
                            help='# of discrim filters in first conv layer')
        parser.add_argument('--which_model_netG',
                            type=str,
                            default='resnet_9blocks',
                            help='selects model to use for netG')
        parser.add_argument('--which_model_netD',
                            type=str,
                            default='basic',
                            help='selects model to use for netD')
        parser.add_argument('--n_layers_D',
                            type=int,
                            default=3,
                            help='only used if which_model_netD==n_layers')
        parser.add_argument(
            '--dropout',
            action='store_true',
            help=
            'do not use dropout for the generator, if specified, use dropout')
        parser.add_argument(
            '--norm',
            type=str,
            default='instance',
            help=
            'instance normalization or batch normalization, default CycleGAN did not use dropout'
        )
        parser.add_argument(
            '--init_type',
            type=str,
            default='normal',
            help='network initialization [normal|xavier|kaiming|orthogonal]')
        parser.add_argument(
            '--init_gain',
            type=float,
            default=0.02,
            help='scaling factor for normal, xavier and orthogonal.')
        # ---------- Define Dataset ---------- #
        parser.add_argument(
            '--supervisedADataset',
            type=str,
            choices=["gta_train", "gta_val", "city_train", "city_val"],
            default="gta_train")
        parser.add_argument(
            '--unsupervisedADataset',
            type=str,
            choices=["gta_train", "gta_val", "city_train", "city_val"],
            default="gta_val")
        parser.add_argument(
            '--supervisedBDataset',
            type=str,
            choices=["gta_train", "gta_val", "city_train", "city_val"],
            default="city_val")
        parser.add_argument(
            '--unsupervisedBDataset',
            type=str,
            choices=["gta_train", "gta_val", "city_train", "city_val"],
            default="city_train")
        parser.add_argument('--domainA',
                            type=str,
                            default="GTA",
                            choices=['GTA', 'City'],
                            help="Domain A Name")
        parser.add_argument('--domainB',
                            type=str,
                            default="City",
                            choices=['GTA', 'City'],
                            help="Domain B Name")
        # ---------- Optimizers ---------- #
        parser.add_argument('--cycleOpt',
                            type=str,
                            default="adam",
                            choices=['sgd', 'adam'],
                            help="cycle gan network optimizer")
        parser.add_argument('--mcdOpt',
                            type=str,
                            default="sgd",
                            choices=['sgd', 'adam'],
                            help="mcd network optimizer")
        parser.set_defaults(adjustLr=True)
        # ---------- Train Details ---------- #
        parser.add_argument(
            '--k',
            type=int,
            default=4,
            help='how many steps to repeat the generator update')
        parser.add_argument("--nTimesDLoss", type=int, default=1)
        parser.add_argument("--bgLoss",
                            action="store_true",
                            help='whether you add background loss')
        parser.add_argument('--dLoss',
                            type=str,
                            default="diff",
                            choices=['mysymkl', 'symkl', 'diff'],
                            help="choose from ['mysymkl', 'symkl', 'diff']")
        # ---------- Hyperparameters ---------- #
        parser.add_argument(
            '--lsgan',
            action='store_false',
            help='do not use least square GAN, if specified, use vanilla GAN')
        parser.add_argument(
            '--pool_size',
            type=int,
            default=50,
            help=
            'the size of image buffer that stores previously generated images')
        parser.add_argument('--lambdaA',
                            type=float,
                            default=10.0,
                            help='weight for cycle loss (A -> A -> A)')
        parser.add_argument('--lambdaB',
                            type=float,
                            default=10.0,
                            help='weight for cycle loss (B -> A -> B)')
        parser.add_argument(
            '--lambdaIdentity',
            type=float,
            default=0.5,
            help=
            'use identity mapping. Setting lambda_identity other than 0 has an effect of scaling the weight of the identity mapping loss. For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set lambda_identity = 0.1'
        )
        parser.set_defaults(epoch=1)
        parser.set_defaults(nEpochStart=10)
        parser.set_defaults(nEpochDecay=10)
        # ---------- Experiment Setting ---------- #
        parser.set_defaults(name='cycle_mcd_da')
        parser.set_defaults(displayWidth=4)

        return parser