Exemplo n.º 1
0
def get_style_model_and_losses(cnn, device, normalization_mean, normalization_std,
                               style_img, content_img,
                               content_layers=content_layers_default,
                               style_layers=style_layers_default):
    cnn = copy.deepcopy(cnn)

    # normalization module
    normalization = Normalization(normalization_mean, normalization_std).to(device)

    # just in order to have an iterable access to or list of content/syle
    # losses
    content_losses = []
    style_losses = []

    # assuming that cnn is a nn.Sequential, so we make a new nn.Sequential
    # to put in modules that are supposed to be activated sequentially
    model = nn.Sequential(normalization)

    i = 0  # increment every time we see a conv
    for layer in cnn.children():
        if isinstance(layer, nn.Conv2d):
            i += 1
            name = 'conv_{}'.format(i)
        elif isinstance(layer, nn.ReLU):
            name = 'relu_{}'.format(i)
            # The in-place version doesn't play very nicely with the ContentLoss
            # and StyleLoss we insert below. So we replace with out-of-place
            # ones here.
            layer = nn.ReLU(inplace=False)
        elif isinstance(layer, nn.MaxPool2d):
            name = 'pool_{}'.format(i)
        elif isinstance(layer, nn.BatchNorm2d):
            name = 'bn_{}'.format(i)
        else:
            raise RuntimeError('Unrecognized layer: {}'.format(layer.__class__.__name__))

        model.add_module(name, layer)

        if name in content_layers:
            # add content loss:
            target = model(content_img).detach()
            content_loss = ContentLoss(target)
            model.add_module("content_loss_{}".format(i), content_loss)
            content_losses.append(content_loss)

        if name in style_layers:
            # add style loss:
            target_feature = model(style_img).detach()
            style_loss = StyleLoss(target_feature)
            model.add_module("style_loss_{}".format(i), style_loss)
            style_losses.append(style_loss)

    # now we trim off the layers after the last content and style losses
    for i in range(len(model) - 1, -1, -1):
        if isinstance(model[i], ContentLoss) or isinstance(model[i], StyleLoss):
            break

    model = model[:(i + 1)]

    return model, style_losses, content_losses
Exemplo n.º 2
0
 def _attempt_insert_content_loss(self):
     if self.layer_count in self.content_layer:
         self.content_layer.remove(self.layer_count)
         target_features = self._model(self.content_tensor).detach()
         layer = ContentLoss(target_features)
         self._model.add_module('content_loss_{}'.format(self.layer_count),
                                layer)
         self._content_losses.append(layer)
Exemplo n.º 3
0
def get_style_model_and_losses(
    cnn,
    cnn_normalization_mean,
    cnn_normalization_std,
    style_img,
    content_img,
):

    cnn = copy.deepcopy(cnn)

    content_layers = ['conv_4']
    style_layers = ['conv_1', 'conv_2', 'conv_3', 'conv_4', 'conv_5']

    normalization = Normalization(cnn_normalization_mean,
                                  cnn_normalization_std).to(device)

    content_losses = []
    style_losses = []

    model = nn.Sequential(normalization)

    i = 0
    for layer in cnn.children():
        if isinstance(layer, nn.Conv2d):
            i += 1
            name = 'conv_{}'.format(i)
        elif isinstance(layer, nn.ReLU):
            name = 'relu_{}'.format(i)
            layer = nn.ReLU(inplace=False)
        elif isinstance(layer, nn.MaxPool2d):
            name = 'pool_{}'.format(i)
        elif isinstance(layer, nn.BatchNorm2d):
            name = 'bn_{}'.format(i)
        else:
            raise RuntimeError('Unrecognized layer: {}'.format(
                layer.__class__.__name__))

        model.add_module(name, layer)

        if name in content_layers:
            target = model(content_img).detach()
            content_loss = ContentLoss(target)
            model.add_module('content_loss_{}'.format(i), content_loss)
            content_losses.append(content_loss)

        if name in style_layers:
            target_feature = model(style_img).detach()
            style_loss = StyleLoss(target_feature)
            model.add_module('style_loss_{}'.format(i), style_loss)
            style_losses.append(style_loss)

    for i in range(len(model) - 1, -1, -1):
        if isinstance(model[i], ContentLoss) or isinstance(
                model[i], StyleLoss):
            break
    model = model[:(i + 1)]

    return model, style_losses, content_losses
def get_style_model_and_losses(parent_model,
                               style_img, content_img,
                               content_layers=content_layers_default,
                               style_layers=style_layers_default):
    parent_model = copy.deepcopy(parent_model)

    content_losses = []
    style_losses = []

    model = nn.Sequential(normalization)

    i = 0  # increment every time we see a conv
    for layer in parent_model.children():
        if isinstance(layer, nn.Conv2d):
            i += 1
            name = 'conv_{}'.format(i)
        elif isinstance(layer, nn.ReLU):
            name = 'relu_{}'.format(i)
            layer = nn.ReLU(inplace=False)
        elif isinstance(layer, nn.MaxPool2d):
            name = 'pool_{}'.format(i)
        elif isinstance(layer, nn.BatchNorm2d):
            name = 'bn_{}'.format(i)
        else:
            raise RuntimeError('Unrecognized layer: {}'
                               .format(layer.__class__.__name__))

        model.add_module(name, layer)

        if name in content_layers:
            # add content loss:
            target = model(content_img).detach()
            content_loss = ContentLoss(target)
            model.add_module("content_loss_{}".format(i), content_loss)
            content_losses.append(content_loss)

        if name in style_layers:
            # add style loss:
            target_feature = model(style_img).detach()
            style_loss = StyleLoss(target_feature)
            model.add_module("style_loss_{}".format(i), style_loss)
            style_losses.append(style_loss)

    for i in range(len(model) - 1, -1, -1):
        if isinstance(model[i], ContentLoss) or \
           isinstance(model[i], StyleLoss):
            break

    model = model[:(i + 1)]

    return model, style_losses, content_losses
Exemplo n.º 5
0
def train_single_img(lr_img,
                     model,
                    upsample,
                    data_sampler,
                    trans,
                    optimizer,
                    num_batches,
                    device="cuda"):

    sr_factor = config["sr_factor"] if config is not None else 2
    model.train()
    avg_loss = 0
    loss_type = config["loss_type"] if config is not None else "l1"
    l1_loss = F.l1_loss

    if loss_type == "content" or loss_type == "hybrid":
        content_loss = ContentLoss()
        lr_downsampled = lr_img.resize([lr_img.size[0] // sr_factor, lr_img.size[1] // sr_factor],
                       resample=Image.BICUBIC)
        lr_upsampled = lr_downsampled.resize([lr_downsampled.size[0] * sr_factor, lr_downsampled.size[1] * sr_factor],
                       resample=Image.BICUBIC)
        lr_upsampled = transforms.ToTensor()(lr_upsampled).unsqueeze(0).to(device)
        lr_img_tensor = transforms.ToTensor()(lr_img).unsqueeze(0).to(device)

    for iter, (hr, lr) in enumerate(data_sampler):
        optimizer.zero_grad()
        scale = hr.size[0] // lr.size[0]
        lr = upsample(lr, scale)
        hr, lr = trans((hr, lr))
        hr, lr = hr.unsqueeze(0).to(device), lr.unsqueeze(0).to(device)
        hr_pred = model(lr)
        if loss_type == "content":
            loss = content_loss(model(lr_upsampled), lr_img_tensor)
        elif loss_type == "hybrid":
            loss = config["l1_loss_coff"] * l1_loss(hr_pred, hr)
            loss += config["content_loss_coff"] * content_loss(model(lr_upsampled), lr_img_tensor)
        else:
            loss = l1_loss(hr_pred, hr)


        avg_loss += loss.item()
        loss.backward()
        optimizer.step()

        if iter > num_batches:
            print('Done training.')
            avg_loss /= iter
            print(f'Avg training loss is {avg_loss}')
            break
Exemplo n.º 6
0
def build_model(pretrained,
                style,
                content,
                device,
                content_layers=[4],
                style_layers=[0, 1, 2, 3, 4, 5, 6, 7]):
    '''build style transfer model from pretrained model
    '''

    model = nn.Sequential()
    count = 0

    # use device
    style = style.to(device)
    content = content.to(device)
    pretrained = pretrained.to(device)

    # max cnn layer
    max_layer = max(max(content_layers), max(style_layers))

    # gets weird color pixels if not noralized
    model.add_module('normalize', Normalize(device))

    # loop through pretrained model and add loss
    for idx, layer in enumerate(pretrained.children()):

        if isinstance(layer, nn.Conv2d):

            # return model if max layer reached
            if count > max_layer: continue

            model.add_module(f"Conv2D_{count}", layer)
            count += 1

            # add content loss
            if count in content_layers:
                content_output = model(content).detach()
                model.add_module(f"Content_loss", ContentLoss(content_output))

            # add style loss
            if count in style_layers:
                style_output = model(style).detach()
                model.add_module(f"Loss_{count}", StyleLoss(style_output))

        else:
            model.add_module(f"{idx}", layer)

    return model
Exemplo n.º 7
0
    def __init__(self, config):
        super().__init__('SRModel', config)

        # generator input: [gray(1) + edge(1)]
        # discriminator input: [gray(1)]
        generator = SRGenerator()
        discriminator = Discriminator(
            in_channels=1, use_sigmoid=config.GAN_LOSS != 'hinge')  # 3-->1

        if len(config.GPU) > 1:
            generator = nn.DataParallel(generator, config.GPU)
            discriminator = nn.DataParallel(discriminator, config.GPU)

        l1_loss = nn.L1Loss()
        content_loss = ContentLoss()
        style_loss = StyleLoss()
        adversarial_loss = AdversarialLoss(type=config.GAN_LOSS)

        kernel = np.zeros((self.config.SCALE, self.config.SCALE))
        kernel[0, 0] = 1
        #kernel_weight = torch.tensor(np.tile(kernel, (3, 1, 1, 1))).float().to(config.DEVICE)     # (out_channels, in_channels/groups, height, width)

        #self.add_module('scale_kernel', kernel_weight)
        #self.scale_kernel = torch.tensor(np.tile(kernel, (1, 1, 1, 1))).float().to(config.DEVICE)  #3-->1

        self.add_module('generator', generator)
        self.add_module('discriminator', discriminator)

        self.add_module('l1_loss', l1_loss)
        self.add_module('content_loss', content_loss)
        self.add_module('style_loss', style_loss)
        self.add_module('adversarial_loss', adversarial_loss)

        self.gen_optimizer = optim.Adam(params=generator.parameters(),
                                        lr=float(config.LR),
                                        betas=(config.BETA1, config.BETA2))

        self.dis_optimizer = optim.Adam(params=discriminator.parameters(),
                                        lr=float(config.LR),
                                        betas=(config.BETA1, config.BETA2))
Exemplo n.º 8
0
def get_style_model_and_loss(base_model,
                             base_mean,
                             base_std,
                             style_img,
                             content_img,
                             content_layers=content_layers,
                             style_layers=style_layers):

    base_model = copy.deepcopy(
        base_model)  # Separate Object that doesn't affect each other
    norm = Normalization(base_mean, base_std).to(device)

    content_losses, style_losses = [], []

    model = nn.Sequential(norm)  # First Layer = Normalization Layer

    i = 0  # Count CNN layers
    for layer in base_model.children():
        if isinstance(layer, nn.Conv2d):
            i += 1
            name = "conv_{}".format(i)
        elif isinstance(layer, nn.ReLU):
            name = "relu_{}".format(i)
            layer = nn.ReLU(inplace=False)
        elif isinstance(layer, nn.MaxPool2d):
            name = "pool_{}".format(i)
        elif isinstance(layer, nn.BatchNorm2d):
            name = "bn_{}".format(i)
        else:
            raise RuntimeError("Unrecognized layer: {}".format(
                layer.__class__.__name__))

        model.add_module(
            name, layer
        )  # Sequentially stack layers of VGG to our new model (Copy most of them, and insert Losses in the right place)

        if name in content_layers:
            target = model(
                content_img).detach()  # Feature map of content img so far
            content_loss = ContentLoss(target)  # input is directly fed
            model.add_module(
                "content_loss_{}".format(i), content_loss
            )  # Add a layer that computes loss and returns the original input (Like identity operation in a sense)
            content_losses.append(content_loss)

        if name in style_layers:
            target_feature = model(style_img).detach()
            style_loss = StyleLoss(target_feature)
            model.add_module(
                "style_loss_{}".format(i), style_loss
            )  # Again, compute the gradient and returns input as is
            style_losses.append(style_loss)

        # Get rid of unnecessary layers after style and content loss
        for i in range(len(model) - 1, -1, -1):
            if isinstance(model[i], ContentLoss) or isinstance(
                    model[i], StyleLoss):
                break

        model = model[:(i + 1)]

        return model, style_losses, content_losses
Exemplo n.º 9
0
def get_style_model_and_losses(cnn,
                               style_img,
                               content_img,
                               style_weight=1000,
                               content_weight=1,
                               content_layers=content_layers_default,
                               style_layers=style_layers_default):
    cnn = copy.deepcopy(cnn)

    # just in order to have an iterable access to or list of content/syle
    # losses
    content_losses = []
    style_losses = []

    model = nn.Sequential()  # the new Sequential module network
    gram = GramMatrix(
    )  # we need a gram module in order to compute style targets

    # move these modules to the GPU if possible:
    if use_cuda:
        model = model.cuda()
        gram = gram.cuda()

    i = 1
    for layer in list(cnn):
        if isinstance(layer, nn.Conv2d):
            name = "conv_" + str(i)
            model.add_module(name, layer)

            if name in content_layers:
                # add content loss:
                target = model(content_img).clone()
                content_loss = ContentLoss(target, content_weight)
                model.add_module("content_loss_" + str(i), content_loss)
                content_losses.append(content_loss)

            if name in style_layers:
                # add style loss:
                target_feature = model(style_img).clone()
                target_feature_gram = gram(target_feature)
                style_loss = StyleLoss(target_feature_gram, style_weight)
                model.add_module("style_loss_" + str(i), style_loss)
                style_losses.append(style_loss)

        if isinstance(layer, nn.ReLU):
            name = "relu_" + str(i)
            model.add_module(name, layer)

            if name in content_layers:
                # add content loss:
                target = model(content_img).clone()
                content_loss = ContentLoss(target, content_weight)
                model.add_module("content_loss_" + str(i), content_loss)
                content_losses.append(content_loss)

            if name in style_layers:
                # add style loss:
                target_feature = model(style_img).clone()
                target_feature_gram = gram(target_feature)
                style_loss = StyleLoss(target_feature_gram, style_weight)
                model.add_module("style_loss_" + str(i), style_loss)
                style_losses.append(style_loss)

            i += 1

        if isinstance(layer, nn.MaxPool2d):
            name = "pool_" + str(i)
            model.add_module(name, layer)  # ***

    return model, style_losses, content_losses
Exemplo n.º 10
0
def get_style_model_and_losses(
        model,
        style_img,
        content_img,
        device,
        normalization_mean=[0.485, 0.456, 0.406],
        normalization_std=[0.229, 0.224, 0.225],
        content_layers=['conv_4'],
        style_layers=['conv_1', 'conv_2', 'conv_3', 'conv_4', 'conv_5']):
    """Create a new model from pretrained model by adding content loss and style loss layers.
    Parameters
    ----------
    model(torchvision model): pretrained model. In NST paper VGG19 is used.
    style_img (tensor): style image
    content_img (tensor): content image
    device (str): device to run model 
    normalization_mean (list): default mean of VGG networks
    normalization_std (list): default standard deviation of VGG networks
    content_layers (list): add content loss after the convolutional layers are detected
    style_layers (list):  add style loss after the convolutional layers are detected
    """
    cnn = model.features.to(device).eval()
    cnn = copy.deepcopy(
        cnn
    )  # for more information, refer https://www.programiz.com/python-programming/shallow-deep-copy

    # normalization module
    normalization_mean = torch.tensor(normalization_mean).to(device)
    normalization_std = torch.tensor(normalization_std).to(device)
    normalization = Normalization(normalization_mean,
                                  normalization_std).to(device)

    content_losses = []
    style_losses = []

    # assuming that cnn is a nn.Sequential, so we make a new nn.sequential to put in
    # modules that are supposed to be activated sequentially
    model = nn.Sequential(normalization)

    i = 0  # increment every time we see a conv
    for layer in cnn.children():
        if isinstance(layer, nn.Conv2d):
            i += 1
            name = 'conv_{}'.format(i)
        elif isinstance(layer, nn.ReLU):
            name = 'relu_{}'.format(i)
            # The in-place version doesn't play very nicely with the ContentLoss
            # and StyleLoss we insert below. So we replace with out-of-place
            # ones here. (Not really understanding this...)
            layer = nn.ReLU(inplace=False)
        elif isinstance(layer, nn.MaxPool2d):
            name = 'pool_{}'.format(i)
        elif isinstance(layer, nn.BatchNorm2d):
            name = 'bn_{}'.format(i)
        else:
            raise RuntimeError('Unrecognized layer: {}'.format(
                layer.__class__.__name__))

        model.add_module(name, layer)

        if name in content_layers:
            # add content loss
            target = model(content_img).detach()
            content_loss = ContentLoss(target)
            model.add_module('content_loss_{}'.format(i), content_loss)
            content_losses.append(content_loss)

        if name in style_layers:
            # add style loss
            target = model(style_img).detach()
            style_loss = StyleLoss(target)
            model.add_module('style_loss_{}'.format(i), style_loss)
            style_losses.append(style_loss)

    # trim off the layers after the last content and style losses
    for i in range(len(model) - 1, -1, -1):
        if isinstance(model[i], ContentLoss) or isinstance(
                model[i], StyleLoss):
            break

    model = model[:(i + 1)]

    return model, style_losses, content_losses
Exemplo n.º 11
0
                                   extract_layers=arguments.style_layers)
    if output_content is not None:
        content_response = model.forward(
            output_content, extract_layers=arguments.content_layers)
    else:
        content_response = None

    # initialize loss
    style_loss = StyleLoss(style_response,
                           input_map,
                           output_map,
                           arguments.style_layers,
                           arguments.map_channel_weight,
                           stride=1)
    if content_response is not None:
        content_loss = ContentLoss(content_response, arguments.content_layers)
    else:
        content_loss = None

    # setup optimizer
    optimizer = torch.optim.LBFGS([target], lr=1.0, history_size=100)

    # setup live plot
    if arguments.plot_interval is not None:
        _, _, height, width = input_style.size()
        live_plot = LivePlot(width, height)

    logging_info("Start generating the image.")

    # main loop
    t0 = time.time()
Exemplo n.º 12
0
def get_style_model_and_losses(cnn,
                               style_img,
                               content_img,
                               device=torch.device('cpu')):
    """
    We need to add our content loss and style loss layers immediately
    after the convolution layer they are detecting.
    To do this we must create a new Sequential module
    that has content loss and style loss modules correctly inserted.
    """

    # desired depth layers to compute style/content losses:
    content_layers = ['conv_4']
    style_layers = ['conv_1', 'conv_2', 'conv_3', 'conv_4', 'conv_5']

    cnn = copy.deepcopy(cnn)
    normalization = Normalization().to(device)

    content_losses, style_losses = [], []

    model = nn.Sequential(normalization)

    n_conv = 0
    for layer in cnn.children():
        if isinstance(layer, nn.Conv2d):
            n_conv += 1
            name = 'conv_{}'.format(n_conv)
        elif isinstance(layer, nn.ReLU):
            name = 'relu_{}'.format(n_conv)
            layer = nn.ReLU(inplace=False)
        elif isinstance(layer, nn.MaxPool2d):
            name = 'pool_{}'.format(n_conv)
        elif isinstance(layer, nn.BatchNorm2d):
            name = 'bn_{}'.format(n_conv)
        else:
            raise RuntimeError('Unrecognized layer:{}'.format(
                layer.__class__.__name__))
        model.add_module(name, layer)

        if name in content_layers:
            # add content loss
            target_feature = model(content_img).detach()
            content_loss = ContentLoss(target_feature)
            model.add_module('content_loss_{}'.format(n_conv), content_loss)
            content_losses.append(content_loss)

        if name in style_layers:
            # add style loss
            target_feature = model(style_img).detach()
            style_loss = StyleLoss(target_feature)
            model.add_module('style_loss_{}'.format(n_conv), style_loss)
            style_losses.append(style_loss)

    for i in range(len(model) - 1, -1, -1):
        if isinstance(model[i], ContentLoss) or isinstance(
                model[i], StyleLoss):
            break

    model = model[:(i + 1)]

    return model, style_losses, content_losses
Exemplo n.º 13
0
photo = imgloader(args.photo_dir, ip_tfs, 512).type(dtype)
art = imgloader(args.art_dir, ip_tfs, 512).type(dtype)
print(photo.size())
print(art.size())
assert (photo.size() == art.size())
i = 1
# build the neural network to optimize
for layer in list(vgg19):
    if isinstance(layer, nn.Conv2d):
        conv = 'conv_' + str(i)
        model.add_module(conv, layer)

        if conv in content_layers:
            target = model.forward(photo).clone()
            cl = ContentLoss(args.w_content, target)
            model.add_module('content_loss_' + str(i), cl)
            cl_list.append(cl)

        if conv in style_layers:
            fm = model.forward(art).clone()
            target_fm = gm.forward(fm)
            sl = StyleLoss(args.w_style, target_fm)
            model.add_module('style_loss_' + str(i), sl)
            sl_list.append(sl)
        i += 1

    if isinstance(layer, nn.ReLU):
        relu = 'relu_' + str(i)
        model.add_module(relu, layer)
Exemplo n.º 14
0
def get_style_model_and_losses(cnn, normalization_mean, normalization_std,
                               style_img, content_img, content_layers,
                               style_layers, device):
    cnn = copy.deepcopy(cnn)
    # normalization module
    normalization = Normalization(normalization_mean,
                                  normalization_std).to(device)
    # just in order to have an iterable access to or list of content/syle
    # losses
    content_losses = []
    style_losses = []
    # assuming that cnn is a nn.Sequential, so we make a new nn.Sequential
    # to put in modules that are supposed to be activated sequentially
    '''
    (1)给自定义的model建立了一个Sequential,万一以后有其他用处就方便点,如果没有这个nn.Sequential(),
    都不知道下面能不能用那个model.add_module(name, layer) ,为了方便以后自己也得有nn.Sequential();有个问题就是我怎么自定义呢?
    像该文件代码那样不建立类情况下,自定义出来一个像nn.features()和nn.classifier()一样的东西。下面代码亲自试验有效,可以任意订制模型:
    model = nn.Sequential()
    y = nn.Sequential( nn.Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
                        nn.Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)))
    y.add_module('haha', nn.ReLU())
    model.add_module('features', y)
    z = nn.Sequential(nn.Dropout(p=0.5), nn.Dropout(p=0.5))
    model.add_module('classifier', z)
    print(model)
    print(model.features)
    至于怎么运行,就像下面代码一样,定义好结构,然后不用定义forward,直接model(input),就执行了,记得把model各个块(各层)
    的参数放进优化器即可,其他什么反向传播等等都和调用模型时语法流程一样的。
    (2)建立类时:
    class LeNet(nn.Module):
        def __init__(self):
            super(LeNet, self).__init__()
            self.features = nn.Sequential(
                nn.Conv2d(3, 6, 5),
                nn.ReLU(),
                nn.MaxPool2d(2, 2),
                nn.Conv2d(6, 16, 5),
                nn.ReLU(),
                nn.MaxPool2d(2, 2)
            )
            # 由于调整shape并不是一个class层,
            # 所以在涉及这种操作(非nn.Module操作)需要拆分为多个模型
            self.classifiter = nn.Sequential(
                nn.Linear(16*5*5, 120),
                nn.ReLU(),
                nn.Linear(120, 84),
                nn.ReLU(),
                nn.Linear(84, 10)
            )
        def forward(self, x):
            x = self.features(x)
            x = x.view(-1, 16*5*5)
            x = self.classifiter(x)
            return x
    '''

    model = nn.Sequential(normalization)  #normalization才是model[0]
    '''相当于我自己要从头开始一块块(一个积木,一层,一个零件)建立一个模型了'''
    '''
    思想是在已有模型基础上进行分拆,添加,组合;而到底怎么做,要根据你利用的已有模型的结构来分析;
    比如VGG模型就是Conv,Relu,BN,Pool交替循环,所以才有了下面为什么这么写。
    '''
    i = 0  # increment every time we see a conv
    for layer in cnn.children():
        if isinstance(layer, nn.Conv2d):
            i += 1
            name = 'conv_%d' % i
        elif isinstance(layer, nn.ReLU):
            name = 'relu_%d' % i
            # The in-place version doesn't play very nicely with the ContentLoss
            # and StyleLoss we insert below. So we replace with out-of-place
            # ones here.
            layer = nn.ReLU(inplace=False)
        elif isinstance(layer, nn.BatchNorm2d):
            name = 'bn_%d' % i
        elif isinstance(layer, nn.MaxPool2d):
            name = 'pool_%d' % i
        else:
            raise RuntimeError('Unrecognized layer: %s' %
                               (layer.__class__.__name__))

        model.add_module(name, layer)  # 把layer添加到model并取名为name

        if name in content_layers:
            # add content loss:
            target = model(content_img).detach(
            )  # 跑一下模型,现在定义了多少多长跑多长,就可以得到中间的C x features map
            content_loss = ContentLoss(
                target)  # 对ContentLoss这个模型类进行实例化,但还没有赋值进行forward
            model.add_module("content_loss_%d" % i, content_loss)  # 将这个实例添加进模型
            content_losses.append(content_loss)  # 实例添加进列表

        if name in style_layers:
            # add style loss:
            target_feature = model(style_img).detach()
            style_loss = StyleLoss(target_feature)
            model.add_module("style_loss_%d" % i, style_loss)
            style_losses.append(style_loss)

    # now we trim off the layers after the last content and style losses
    for i in range(len(model) - 1, -1, -1):  # 从最后一层遍历到第一层
        if isinstance(model[i], ContentLoss) or isinstance(
                model[i], StyleLoss):
            break
    model = model[:(i + 1)]  # 这个模型后面用不到的几层既可以截掉啦

    return model, style_losses, content_losses
checkpoint_path = 'checkpoints'

transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
traindata = TrainDataset(train_data_path, transform)
traindata_loader = DataLoader(traindata, batch_size=batchsize, shuffle=True)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
netG = Generator().to(device)
netD = Discriminator().to(device)
optimizerG = optim.Adam(netG.parameters(), lr=learning_rate)
optimizerD = optim.Adam(netD.parameters(), lr=learning_rate)
bce = nn.BCELoss()
contentLoss = ContentLoss().to(device)
adversarialLoss = AdversarialLoss()
# print(netG)
# print(netD)

if not os.path.exists(checkpoint_path):
    os.mkdir(checkpoint_path)

torch.save(netG, 'netG-epoch_000.pth')
for epoch in range(1, epochs + 1):
    for idx, (lr, hr) in enumerate(traindata_loader):
        lr = lr.to(device)
        hr = hr.to(device)

        # 更新判别器
        netD.zero_grad()