Exemplo n.º 1
0
    def __init__(self, config):
        super(SRGANDiscriminator, self).__init__(config)

        ch = util.ConfigHandler(config)
        n = 1
        if config['global settings']['controller'] == 'pix2pix':
            n = 2  # for image pooling
        input_nc = ch.get_number_of_output_image_channels() * n
        self.conv1 = nn.Conv2d(input_nc, 64, 3, stride=1, padding=1)
        self.conv2 = nn.Conv2d(64, 64, 3, stride=2, padding=1)
        self.bn2 = nn.BatchNorm2d(64)
        self.conv3 = nn.Conv2d(64, 128, 3, stride=1, padding=1)
        self.bn3 = nn.BatchNorm2d(128)
        self.conv4 = nn.Conv2d(128, 128, 3, stride=2, padding=1)
        self.bn4 = nn.BatchNorm2d(128)
        self.conv5 = nn.Conv2d(128, 256, 3, stride=1, padding=1)
        self.bn5 = nn.BatchNorm2d(256)
        self.conv6 = nn.Conv2d(256, 256, 3, stride=2, padding=1)
        self.bn6 = nn.BatchNorm2d(256)
        self.conv7 = nn.Conv2d(256, 512, 3, stride=1, padding=1)
        self.bn7 = nn.BatchNorm2d(512)
        self.conv8 = nn.Conv2d(512, 512, 3, stride=2, padding=1)
        self.bn8 = nn.BatchNorm2d(512)

        # Replaced original paper FC layers with FCN
        self.conv9 = nn.Conv2d(512, 1, 1, stride=1, padding=1)
Exemplo n.º 2
0
 def __init__(self, config):
     super(ResNetForClassificationModel, self).__init__()
     ch = util.ConfigHandler(config)
     n_trim = int(config['ResNet settings']['nTrim'])
     self.net = ch.get_res_net()
     if config['ResNet settings'].getboolean('trimFullConnectedLayer'):
         self.net = nn.Sequential(*list(self.net.children())[:-n_trim])
Exemplo n.º 3
0
    def __init__(self,
                 config):

        super(StarGANDiscriminatorModel, self).__init__(config)
        self.config = config
        ch = util.ConfigHandler(config)
        self.input_nc    = int(config['starGANDiscriminator settings']['numberOfInputImageChannels'])
        self.output_nc   = int(config['starGANDiscriminator settings']['numberOfOutputImageChannels'])
        image_size       = int(config['starGANDiscriminator settings']['imageSize'])
        self.c_dim       = len(self.config['starGAN settings']['typeNames'].strip().split(","))
        self.gpu_ids     = ch.get_GPU_ID()
        self.pad_factory = PadFactory(config)
        #self.norm_layer  = NormalizeFactory(config).create(
        #                        config['ResNetForGenerator settings']['normalizeLayer'])
        conv_dim        = int(config['starGANDiscriminator settings']['convDim'])
        self.repeat_num = int(config['starGANDiscriminator settings']['repeatNum'])

        layers = []
        layers.append(nn.Conv2d(self.input_nc,  conv_dim, kernel_size=4, stride=2, padding=1))
        layers.append(nn.LeakyReLU(0.01, inplace=True))

        curr_dim = conv_dim
        for i in range(1, self.repeat_num):
            layers.append(nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1))
            layers.append(nn.LeakyReLU(0.01, inplace=True))
            curr_dim = curr_dim * 2
        k_size = int(image_size / power(2, self.repeat_num))
        self.main = nn.Sequential(*layers)
        self.conv1 = nn.Conv2d(curr_dim, 1, kernel_size=3, stride=1, padding=1, bias=False)
        self.conv2 = nn.Conv2d(curr_dim, self.c_dim, kernel_size=k_size, bias=False)
        self.sigmoid = nn.Sigmoid()
Exemplo n.º 4
0
    def __init__(self, config_path="setting.ini"):
        config = configparser.ConfigParser()
        config.read(config_path)
        self.config = config

        self.ch = util.ConfigHandler(config)

        self.controller = self.ch.get_controller()
        self.input_transform = self.ch.get_input_transform()
Exemplo n.º 5
0
    def __init__(self, config):
        import aimaker.models.model_factory as mf
        import aimaker.loss.loss_factory as lf
        import aimaker.optimizers.optimizer_factory as of

        self.config          = config
        ch                   = util.ConfigHandler(config)
        self.gpu_ids         = ch.get_GPU_ID()
        self.checkpoints_dir = ch.get_check_points_dir()

        model_factory        = mf.ModelFactory(config)
        loss_factory         = lf.LossFactory(config)
        optimizer_factory    = of.OptimizerFactory(config)
        self.n_batch         = ch.get_batch_size()

        self.cDim            = len(self.config['starGAN settings']['typeNames'].strip().split(","))

        name                 = config['starGAN settings']['generatorModel']

        if not self.loadModels():
            self.generator       = model_factory.create(name) 
            if config['dataset settings'].getboolean('isTrain'):
                name = config['starGAN settings']['discriminatorModel']
                self.discriminator = model_factory.create(name) 

        self.reconstruct_criterion = loss_factory\
                                       .create(config['starGAN settings']['reconstructCriterion'])
        self.cls_criterion         = loss_factory\
                                       .create(config['starGAN settings']['clsCriterion'])
                                       
        self.lambda_rec = float(config['starGAN settings']['lambdaRec'])
        self.lambda_cls = float(config['starGAN settings']['lambdaCls'])
        self.lambda_gp  = float(config['starGAN settings']['lambdaGp'])

        if len(self.gpu_ids):
            self.generator = self.generator.cuda(self.gpu_ids[0])

        if config['dataset settings'].getboolean('isTrain'):
            if len(self.gpu_ids):
                self.discriminator = self.discriminator.cuda(self.gpu_ids[0])
                
            self.discriminator_criterion = loss_factory.create(config['starGAN settings']['discriminatorCriterion'])
            if len(self.gpu_ids):
                #self.generator_criterion     = self.generator_criterion.cuda(self.gpu_ids[0])   
                self.discriminator_criterion = self.discriminator_criterion.cuda(self.gpu_ids[0])   

            self.generator_optimizer     = optimizer_factory.create(\
                                               config['starGAN settings']['generatorOptimizer'])\
                                               (self.generator.parameters(), config)
            self.discriminator_optimizer = optimizer_factory.create(config['starGAN settings']['discriminatorOptimizer'])\
                                               (self.discriminator.parameters(), config)

        if config['ui settings'].getboolean('isShowModelInfo'):
            self.showModel()
    def __init__(self, config):
        import aimaker.models.model_factory as mf
        import aimaker.loss.loss_factory as lf
        import aimaker.optimizers.optimizer_factory as of

        self.config = config
        self.ch = ch = util.ConfigHandler(config)
        self.gpu_ids = ch.get_GPU_ID()
        self.checkpoints_dir = ch.get_check_points_dir()
        self.pool = util.ImagePool(
            int(ch.settings['controller']['voxcelFlow']['imagePoolSize']))

        self.lambda_1 = float(config['controller']['voxcelFlow']['lambda1'])
        self.lambda_2 = float(config['controller']['voxcelFlow']['lambda2'])

        model_factory = mf.ModelFactory(config)
        loss_factory = lf.LossFactory(config)
        optimizer_factory = of.OptimizerFactory(config)

        name = config['controller']['voxcelFlow']['generatorModel']
        self.generator = model_factory.create(name)
        self.generator_criterion  = loss_factory.create(\
                                       config['controller']['voxcelFlow']['generatorCriterion'])

        if len(self.gpu_ids):
            self.generator = self.generator.cuda(self.gpu_ids[0])

        if config['data']['isTrain']:
            name = config['controller']['voxcelFlow']['discriminatorModel']
            self.discriminator = model_factory.create(name)
            if len(self.gpu_ids):
                self.discriminator = self.discriminator.cuda(self.gpu_ids[0])

        self.loadModels()

        if config['data']['isTrain']:

            self.discriminator_criterion = loss_factory.create(\
                                               config['controller']['voxcelFlow']\
                                                     ['discriminatorCriterion'])
            if len(self.gpu_ids):
                self.generator_criterion = self.generator_criterion.cuda(
                    self.gpu_ids[0])
                self.discriminator_criterion = self.discriminator_criterion.cuda(
                    self.gpu_ids[0])

            self.generator_optimizer     = optimizer_factory.create(\
                                               config['controller']['voxcelFlow']['generatorOptimizer'])\
                                               (self.generator.parameters(), config)
            self.discriminator_optimizer = optimizer_factory.create(config['controller']['voxcelFlow']['discriminatorOptimizer'])\
                                               (self.discriminator.parameters(), config)

        if config['ui settings']['isShowModelInfo']:
            self.showModel()
Exemplo n.º 7
0
    def __init__(self, settings: EasyDict):
        import aimaker.models.model_factory as mf
        import aimaker.loss.loss_factory as lf
        import aimaker.optimizers.optimizer_factory as of

        self.settings = settings
        ch = util.ConfigHandler(settings)
        self.gpu_ids = ch.get_GPU_ID()
        self.checkpoints_dir = ch.get_check_points_dir()
        self.pool = util.ImagePool(
            int(ch.settings['annotation']['imagePoolSize']))

        model_factory = mf.ModelFactory(settings)
        loss_factory = lf.LossFactory(settings)
        optimizer_factory = of.OptimizerFactory(settings)

        name = settings['annotation']['generatorModel']

        self.downsampler = nn.AvgPool2d(8)
        self.upsampler = nn.Upsample(scale_factor=8)

        if not self.load_models():
            self.generator = model_factory.create(name)
            if settings['dataset'].getboolean('isTrain'):
                name = settings['annotation']['discriminatorModel']
                self.discriminator = model_factory.create(name)

        self.generator_criterion = loss_factory \
            .create(settings['annotation']['generatorCriterion'])

        if len(self.gpu_ids):
            self.generator = self.generator.cuda(self.gpu_ids[0])

        if settings['dataset'].getboolean('isTrain'):
            if len(self.gpu_ids):
                self.discriminator = self.discriminator.cuda(self.gpu_ids[0])

            self.discriminator_criterion = loss_factory.create(
                settings['annotation']['discriminatorCriterion'])
            if len(self.gpu_ids):
                self.generator_criterion = self.generator_criterion.cuda(
                    self.gpu_ids[0])
                self.discriminator_criterion = self.discriminator_criterion.cuda(
                    self.gpu_ids[0])

            self.generator_optimizer = optimizer_factory.create( \
                settings['annotation']['generatorOptimizer']) \
                (self.generator.parameters(), settings)
            self.discriminator_optimizer = optimizer_factory.create(settings['annotation']['discriminatorOptimizer']) \
                (self.discriminator.parameters(), settings)

        if settings['ui'].getboolean('isShowModelInfo'):
            self.show_model()
Exemplo n.º 8
0
    def __init__(self, config):
        super(LongBottleModel, self).__init__()

        num_input_feature = int(
            config['longBottle settings']['numInputFeature'])
        num_feature = int(config['longBottle settings']['numFeature'])
        num_bottleneck = int(config['longBottle settings']['numBottleneck'])
        self.gpu_ids = util.ConfigHandler(config).get_GPU_ID()

        num_downsampled_feature = num_feature * 2
        self.downsample = nn.Sequential(
            torch.nn.ReflectionPad2d(3),
            torch.nn.Conv2d(num_input_feature, num_feature, 7, 1),
            torch.nn.ReLU(),
            torch.nn.BatchNorm2d(num_feature),
            torch.nn.ReflectionPad2d(1),
            torch.nn.Conv2d(num_feature, num_feature * 2, 3, 2),
            torch.nn.ReLU(),  # div2
            torch.nn.ReflectionPad2d(1),
            torch.nn.Conv2d(num_feature * 2, num_downsampled_feature, 3, 2),
            torch.nn.ReLU())  # div 2

        bottlenecks = []
        num_layer = num_downsampled_feature
        for i in range(num_bottleneck):

            bottlenecks += [Bottleneck(config, num_layer)]

        self.bottlenecks = nn.Sequential(*bottlenecks)

        self.upsample = nn.Sequential(
            #torch.nn.ReflectionPad2d(1),
            torch.nn.ConvTranspose2d(num_downsampled_feature,
                                     num_feature * 2,
                                     3,
                                     2,
                                     padding=1,
                                     output_padding=1),
            torch.nn.ReLU(),  # mult 2
            torch.nn.BatchNorm2d(num_feature * 2),
            #torch.nn.ReflectionPad2d(1),
            torch.nn.ConvTranspose2d(num_feature * 2,
                                     num_feature,
                                     3,
                                     2,
                                     padding=1,
                                     output_padding=1),
            torch.nn.ReflectionPad2d(3),
            torch.nn.Conv2d(num_feature, num_input_feature, 7, 1),
            torch.nn.Tanh())  # div 2
Exemplo n.º 9
0
    def __init__(self,
                 config_path,
                 source,
                 transform,
                 gpu_ids='',
                 port=1234,
                 divide_size=2):
        super(StarGANPredictor, self).__init__(source, transform, gpu_ids,
                                               port)
        config = configparser.ConfigParser()
        config.read(config_path)
        self.controller = util.ConfigHandler(config).get_controller()

        self.divide_image = util.DivideImage(divide_size)
        self.aggregate_image = util.AggregateImage(divide_size)
        self.to_pil_image = transforms.ToPILImage()
        self.image_writer_factory = iwf.ImageWriterFactory(self.config)
Exemplo n.º 10
0
    def __init__(self, config):

        super(BGModel, self).__init__(config)
        self.config = config
        ch = util.ConfigHandler(config)
        input_nc = int(config['global settings']['numberOfInputImageChannels'])
        ngf = int(config['ResNetForGenerator settings']['filterSize'])
        use_bias = config['ResNetForGenerator settings'].getboolean('useBias')
        self.gpu_ids = ch.get_GPU_ID()
        self.pad_factory = PadFactory(config)
        self.norm_layer = NormalizeFactory(config).create(
            config['ResNetForGenerator settings']['normalizeLayer'])

        model = [
            nn.ReflectionPad2d(3),
            nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
            self.norm_layer(ngf),
            nn.ReLU(True)
        ]

        n_downsampling = 2
        for i in range(n_downsampling):
            mult = 2**i
            model += [
                nn.Conv2d(ngf * mult,
                          ngf * mult * 2,
                          kernel_size=3,
                          stride=2,
                          padding=1,
                          bias=use_bias),
                self.norm_layer(ngf * mult * 2),
                nn.ReLU(True)
            ]
        mult = 2**n_downsampling

        for i in range(int(config['ResNetForGenerator settings']['nBlocks'])):
            model += [ResnetBlock(ngf * mult, config)]

        for i in range(n_downsampling):
            mult = 2**(n_downsampling - i)
            model += [
                nn.ConvTranspose2d(ngf * mult,
                                   int(ngf * mult / 2),
                                   kernel_size=3,
                                   stride=2,
                                   padding=1,
                                   output_padding=1,
                                   bias=use_bias),
                self.norm_layer(int(ngf * mult / 2)),
                nn.ReLU(True)
            ]

        model += [
            self.pad_factory.create(
                config['ResNetForGenerator settings']['paddingType'])(7)
        ]
        model += [
            nn.Conv2d(ngf,
                      ngf * 2,
                      kernel_size=(5, 1),
                      padding=0,
                      stride=(2, 2))
        ]
        model += [
            nn.Conv2d(ngf * 2,
                      ngf * 4,
                      kernel_size=(13, 1),
                      padding=0,
                      stride=(2, 2))
        ]
        model += [
            nn.Conv2d(ngf * 4,
                      ngf * 8,
                      kernel_size=(13, 7),
                      padding=0,
                      stride=(2, 1))
        ]
        model += [
            nn.Conv2d(ngf * 8,
                      ngf * 12,
                      kernel_size=(13, 15),
                      padding=0,
                      stride=(2, 1))
        ]
        #model += [nn.Conv2d(ngf*12, ngf*12, kernel_size=(3, 3), padding=0, stride=(2,2))]
        model += [nn.Conv2d(ngf * 12, 1, kernel_size=(23, 5), padding=0)]
        #model += [nn.Tanh()]
        #model += [nn.Sigmoid()]

        self.model = nn.Sequential(*model)
        self.act = nn.Sigmoid()