コード例 #1
0
def get_models_parsers():
    """
    Create a dict {model_name: parser} in which each parser hold arguments
    specific to a model
    """
    model_names = get_models_names()
    model_parsers = {}

    for model_name in model_names:
        parser = argparse.ArgumentParser()
        model_option_setter = get_option_setter(model_name)

        try:
            model_parsers[model_name] = model_option_setter(parser, True)
        except:
            pass

    return model_parsers
コード例 #2
0
    def get_schema(self, allow_nan=False):
        if not self.initialized:
            parser = argparse.ArgumentParser(
                formatter_class=argparse.ArgumentDefaultsHelpFormatter)
            parser = self.initialize(parser)

        opt = argparse.Namespace()
        self._json_parse_known_args(parser, opt, {})

        named_parsers = {"": parser}
        for model_name in models.get_models_names():
            if self.isTrain and model_name in ["test"]:
                continue
            setter = models.get_option_setter(model_name)
            model_parser = argparse.ArgumentParser()
            setter(model_parser)
            self._json_parse_known_args(model_parser, opt, {})
            named_parsers[model_name] = model_parser

        self.opt = opt
        self.parser = parser
        json_vals = self.to_json()

        for k in json_vals:
            if json_vals[k] is None:
                json_vals[k] = "None"
            if not allow_nan:
                if json_vals[k] == float("inf"):
                    json_vals[k] = 1e100
                if json_vals[k] == float("-inf"):
                    json_vals[k] = -1e100
                if type(json_vals[k]) == float and math.isnan(json_vals[k]):
                    json_vals[k] = 0

        from pydantic import create_model
        schema = create_model(type(self).__name__, **json_vals).schema()

        option_tags = defaultdict(list)

        for parser_name in named_parsers:
            current_parser = named_parsers[parser_name]

            for action_group in current_parser._action_groups:
                for action in action_group._group_actions:
                    if isinstance(action, _HelpAction):
                        continue

                    if len(parser_name) > 0:
                        option_tags[action.dest].append(parser_name)

                    if action.dest in schema["properties"]:
                        field = schema["properties"][action.dest]
                        description = action.help if action.help is not None else ""
                        for c in "#*<>":
                            description = description.replace(c, "\\" + c)
                        field["description"] = description
                        if "title" in field:
                            del field["title"]

        for tagged in option_tags:
            tags = " | ".join(option_tags[tagged])
            schema["properties"][tagged][
                "description"] = "[" + tags + "]\n\n" + schema["properties"][
                    tagged]["description"]

        return schema
コード例 #3
0
    def initialize(self, parser):
        """Define the common options that are used in both training and test."""
        # basic parameters
        parser.add_argument(
            '--dataroot',
            required=True,
            help=
            'path to images (should have subfolders trainA, trainB, valA, valB, etc)'
        )
        parser.add_argument(
            '--name',
            type=str,
            default='experiment_name',
            help=
            'name of the experiment. It decides where to store samples and models'
        )
        parser.add_argument('--gpu_ids',
                            type=str,
                            default='0',
                            help='gpu ids: e.g. 0  0,1,2, 0,2. use -1 for CPU')
        parser.add_argument('--checkpoints_dir',
                            type=str,
                            default='./checkpoints',
                            help='models are saved here')
        # model parameters
        parser.add_argument('--model',
                            type=str,
                            default='cycle_gan',
                            help='chooses which model to use. [' +
                            " | ".join(models.get_models_names()) + ']')
        parser.add_argument(
            '--input_nc',
            type=int,
            default=3,
            help='# of input image channels: 3 for RGB and 1 for grayscale')
        parser.add_argument(
            '--output_nc',
            type=int,
            default=3,
            help='# of output image channels: 3 for RGB and 1 for grayscale')
        parser.add_argument('--ngf',
                            type=int,
                            default=64,
                            help='# of gen filters in the last conv layer')
        parser.add_argument(
            '--ndf',
            type=int,
            default=64,
            help='# of discrim filters in the first conv layer')
        parser.add_argument(
            '--netD',
            type=str,
            default='basic',
            help=
            'specify discriminator architecture [basic | n_layers | pixel] or any torchvision model [resnet18...]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator'
        )
        parser.add_argument(
            '--netD_global',
            type=str,
            default='none',
            help=
            'specify discriminator architecture, any torchvision model can be used [resnet18...]. By default no global discriminator will be used.'
        )
        parser.add_argument(
            '--netG',
            type=str,
            default='resnet_9blocks',
            help=
            'specify generator architecture [resnet_9blocks | resnet_6blocks | resnet_attn | unet_256 | unet_128 | stylegan2 | smallstylegan2]'
        )
        parser.add_argument('--n_layers_D',
                            type=int,
                            default=3,
                            help='only used if netD==n_layers')
        parser.add_argument(
            '--norm',
            type=str,
            default='instance',
            help=
            'instance normalization or batch normalization [instance | batch | none]'
        )
        parser.add_argument(
            '--init_type',
            type=str,
            default='normal',
            help=
            'network initialization [normal | xavier | kaiming | orthogonal]')
        parser.add_argument(
            '--init_gain',
            type=float,
            default=0.02,
            help='scaling factor for normal, xavier and orthogonal.')
        parser.add_argument('--no_dropout',
                            action='store_true',
                            help='no dropout for the generator')
        parser.add_argument('--D_dropout',
                            action='store_true',
                            help='whether to use dropout in the discriminator')
        parser.add_argument(
            '--D_spectral',
            action='store_true',
            help='whether to use spectral norm in the discriminator')
        parser.add_argument(
            '--G_spectral',
            action='store_true',
            help='whether to use spectral norm in the generator')
        parser.add_argument(
            '--G_padding_type',
            type=str,
            help='whether to use padding in the generator, zeros or reflect',
            default='reflect')
        parser.add_argument(
            '--D_projected_interp',
            type=int,
            default=-1,
            help=
            'whether to force projected discriminator interpolation to a value > 224, -1 means no interpolation'
        )
        parser.add_argument(
            '--G_ema',
            action='store_true',
            help='whether to build G via exponential moving average')
        parser.add_argument('--ema_beta',
                            type=float,
                            default=0.999,
                            help='exponential decay for ema')

        # dataset parameters
        parser.add_argument(
            '--dataset_mode',
            type=str,
            default='unaligned',
            help=
            'chooses how datasets are loaded. [unaligned | aligned | single | colorization]'
        )
        parser.add_argument('--direction',
                            type=str,
                            default='AtoB',
                            help='AtoB or BtoA')
        parser.add_argument(
            '--serial_batches',
            action='store_true',
            help=
            'if true, takes images in order to make batches, otherwise takes them randomly'
        )
        parser.add_argument('--num_threads',
                            default=4,
                            type=int,
                            help='# threads for loading data')
        parser.add_argument('--batch_size',
                            type=int,
                            default=1,
                            help='input batch size')
        parser.add_argument('--load_size',
                            type=int,
                            default=286,
                            help='scale images to this size')
        parser.add_argument('--crop_size',
                            type=int,
                            default=256,
                            help='then crop to this size')
        parser.add_argument(
            '--max_dataset_size',
            type=int,
            default=float("inf"),
            help=
            'Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.'
        )
        parser.add_argument(
            '--preprocess',
            type=str,
            default='resize_and_crop',
            help=
            'scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]'
        )
        parser.add_argument(
            '--no_flip',
            action='store_true',
            help='if specified, do not flip the images for data augmentation')
        parser.add_argument(
            '--no_rotate',
            action='store_true',
            help='if specified, do not rotate the images for data augmentation'
        )
        parser.add_argument(
            '--affine',
            type=float,
            default=0.0,
            help=
            'if specified, apply random affine transforms to the images for data augmentation'
        )
        parser.add_argument(
            '--affine_translate',
            type=float,
            default=0.2,
            help=
            'if random affine specified, translation range (-value*img_size,+value*img_size) value'
        )
        parser.add_argument(
            '--affine_scale_min',
            type=float,
            default=0.8,
            help='if random affine specified, min scale range value')
        parser.add_argument(
            '--affine_scale_max',
            type=float,
            default=1.2,
            help='if random affine specified, max scale range value')
        parser.add_argument(
            '--affine_shear',
            type=int,
            default=45,
            help='if random affine specified, shear range (0,value)')
        parser.add_argument('--imgaug',
                            action='store_true',
                            help='whether to apply random image augmentation')
        parser.add_argument(
            '--display_winsize',
            type=int,
            default=256,
            help='display window size for both visdom and HTML')
        # additional parameters
        parser.add_argument(
            '--epoch',
            type=str,
            default='latest',
            help='which epoch to load? set to latest to use latest cached model'
        )
        parser.add_argument(
            '--load_iter',
            type=int,
            default='0',
            help=
            'which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]'
        )
        parser.add_argument(
            '--verbose',
            action='store_true',
            help='if specified, print more debugging information')
        parser.add_argument(
            '--suffix',
            default='',
            type=str,
            help=
            'customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}'
        )
        parser.add_argument(
            '--semantic_nclasses',
            default=10,
            type=int,
            help='number of classes of the semantic loss classifier')
        parser.add_argument(
            '--semantic_threshold',
            default=1.0,
            type=float,
            help=
            'threshold of the semantic classifier loss below with semantic loss is applied'
        )
        parser.add_argument(
            '--display_networks',
            action='store_true',
            help='Set True if you want to display networks on port 8000')
        parser.add_argument('--compute_fid', action='store_true')
        parser.add_argument('--fid_every', type=int, default=1000)

        #CUT options
        parser.add_argument(
            '--normG',
            type=str,
            default='instance',
            choices=['instance', 'batch', 'none'],
            help='instance normalization or batch normalization for G')
        parser.add_argument(
            '--normD',
            type=str,
            default='instance',
            choices=['instance', 'batch', 'none'],
            help='instance normalization or batch normalization for D')
        parser.add_argument(
            '--no_antialias',
            action='store_true',
            help=
            'if specified, use stride=2 convs instead of antialiased-downsampling (sad)'
        )
        parser.add_argument(
            '--no_antialias_up',
            action='store_true',
            help=
            'if specified, use [upconv(learned filter)] instead of [upconv(hard-coded [1,3,3,1] filter), conv]'
        )

        parser.add_argument(
            '--stylegan2_G_num_downsampling',
            default=1,
            type=int,
            help='Number of downsampling layers used by StyleGAN2Generator')

        parser.add_argument(
            '--D_label_smooth',
            action='store_true',
            help='whether to use one-sided label smoothing with discriminator')
        parser.add_argument(
            '--D_noise',
            type=float,
            default=0.0,
            help='whether to add instance noise to discriminator inputs')

        #Online dataset creation options
        parser.add_argument(
            '--online_creation_crop_size_A',
            type=int,
            default=512,
            help=
            'crop to this size during online creation, it needs to be greater than bbox size for domain A'
        )
        parser.add_argument(
            '--online_creation_crop_delta_A',
            type=int,
            default=50,
            help=
            'size of crops are random, values allowed are online_creation_crop_size more or less online_creation_crop_delta for domain A'
        )
        parser.add_argument(
            '--online_creation_mask_delta_A',
            type=int,
            default=0,
            help=
            'mask offset to allow genaration of a bigger object in domain B (for semantic loss) for domain A'
        )
        parser.add_argument(
            '--online_creation_mask_square_A',
            action='store_true',
            help='whether masks should be squared for domain A')

        parser.add_argument(
            '--online_creation_crop_size_B',
            type=int,
            default=512,
            help=
            'crop to this size during online creation, it needs to be greater than bbox size for domain B'
        )
        parser.add_argument(
            '--online_creation_crop_delta_B',
            type=int,
            default=50,
            help=
            'size of crops are random, values allowed are online_creation_crop_size more or less online_creation_crop_delta for domain B'
        )
        parser.add_argument(
            '--online_creation_mask_delta_B',
            type=int,
            default=0,
            help=
            'mask offset to allow genaration of a bigger object in domain B (for semantic loss) for domain B'
        )
        parser.add_argument(
            '--online_creation_mask_square_B',
            action='store_true',
            help='whether masks should be squared for domain B')

        parser.add_argument(
            '--sanitize_paths',
            action='store_true',
            help=
            'if true, wrong images or labels paths will be removed before training'
        )
        parser.add_argument(
            '--sanitize_paths_vebose',
            action='store_true',
            help='if true, wrong images or labels paths will be printed')
        parser.add_argument(
            '--relative_paths',
            action='store_true',
            help='whether paths to images are relative to dataroot')

        #D accuracy
        parser.add_argument('--compute_D_accuracy', action='store_true')
        parser.add_argument('--D_accuracy_every', type=int, default=1000)

        self.initialized = True
        return parser