def train(epoch):
    model.train()
    sum_content_loss=0.0
    sum_pixel_loss=0.0

    for iteration, sample in enumerate(dataloader):#difference between (dataloader) &(dataloader,1)
        inputs,label=sample['inputs'],sample['label']

        #Wrap with torch Variable
        inputs,label=wrap_variable(inputs, label, use_gpu)

        #clear the optimizer
        optimizer.zero_grad()

        # forward propagation
        outputs = model(inputs)

        #Calculate the pixel loss at the output of model
        pixel_loss =criterion(outputs, label)

        #normalize data before vgg (not sure)
        outputs = myutils.normalize_batch(outputs)
        label = myutils.normalize_batch(label)

        # Pass the label and the previous output through vgg and get their features
        #reference in model:vggout = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3)
        features_outputs = vgg(outputs)
        features_label = vgg(label)

        #use mse to calculate the content loss at layer relu2_2 of vgg
        content_loss =criterion(features_outputs.relu2_2, features_label.relu2_2)

        #get the loss for backward
        loss=args.pixelweight*pixel_loss+args.contentweight*content_loss#not sure

        #backward propagation and optimize
        loss.backward()
        optimizer.step()

        if iteration%100==0:
            print("===> Epoch[{}]({}/{}):pixle Loss: {:.4f} content loss{:.4f}".format(epoch, iteration, len(dataloader), pixel_loss.data[0],content_loss.data[0]))   
        
        #caculate the average loss
        sum_content_loss += content_loss.data[0]
        sum_pixel_loss += pixel_loss.data[0]

    return sum_pixel_loss/len(dataloader),sum_content_loss/len(dataloader)
def test():
    model.eval()
    #input and label
    avg_psnr1 = 0
    avg_ssim1 = 0
    #output and label
    avg_psnr2 = 0
    avg_ssim2 = 0

    for i, sample in enumerate(dataloader):
        input_image, label_image, name = sample['input_image'], sample[
            'label_image'], sample['name'][0]  #tuple to str

        #Wrap with torch Variable
        input_image, label_image = wrap_variable(input_image, label_image,
                                                 use_gpu)
        #predict

        vgg_feature = vgg(myutils.normalize_batch(input_image)).relu4_3
        #vgg_feature = vgg(input_image).relu2_2
        output_image = model(input_image, vgg_feature)
        #clamp in[0,1]
        output_image = output_image.clamp(0.0, 1.0)

        #calculate psnr
        psnr1 = myutils.psnr(input_image, label_image)
        psnr2 = myutils.psnr(output_image, label_image)
        #psnr2=0
        # ssim is calculated with the normalize (range [0, 1]) image
        ssim1 = torch.sum(
            (myutils.ssim(input_image, label_image,
                          size_average=False)).data) / 1.0  #batch_size
        ssim2 = torch.sum((myutils.ssim(
            output_image, label_image, size_average=False)).data) / 1.0
        #ssim2=0
        avg_ssim1 += ssim1
        avg_psnr1 += psnr1
        avg_ssim2 += ssim2
        avg_psnr2 += psnr2

        #save output and record
        checkpoint(name, psnr1, psnr2, ssim1, ssim2)
        save(output_image, name)

    #print and save
    avg_psnr1 = avg_psnr1 / len(dataloader)
    avg_ssim1 = avg_ssim1 / len(dataloader)
    avg_psnr2 = avg_psnr2 / len(dataloader)
    avg_ssim2 = avg_ssim2 / len(dataloader)

    print('Avg. PSNR: {:.4f}->{:.4f} Avg. SSIM: {:.4f}->{:.4f}'.format(
        avg_psnr1, avg_psnr2, avg_ssim1, avg_ssim2))
    output = open(os.path.join(Image_folder, 'test_result.txt'), 'a+')
    output.write('Avg. PSNR: {:.4f}->{:.4f} Avg. SSIM: {:.4f}->{:.4f}'.format(
        avg_psnr1, avg_psnr2, avg_ssim1, avg_ssim2) + '\r\n')
    output.close()
示例#3
0
def train(epoch):
    model.train()
    sum_loss = 0.0

    for iteration, sample in enumerate(
            dataloader):  #difference between (dataloader) &(dataloader,1)
        inputs, label = sample['inputs'], sample['label']

        #Wrap with torch Variable
        inputs, label = wrap_variable(inputs, label, use_gpu, False)

        #clear the optimizer
        optimizer.zero_grad()

        # forward propagation

        #normalize data before vgg (not sure)
        #inputs=myutils.normalize_batch(inputs)
        vgg_feature = vgg(myutils.normalize_batch(inputs)).relu4_3
        #vgg_feature = vgg(inputs).relu4_3

        outputs = model(inputs, vgg_feature)

        #get the loss for backward
        loss = criterion(outputs, label)

        #backward propagation and optimize
        loss.backward()
        optimizer.step()

        if iteration % 100 == 0:
            print("===> Epoch[{}]({}/{}):loss: {:.6f}".format(
                epoch, iteration, len(dataloader), loss.data[0]))
        #if iteration==101:
        #    break

        #caculate the average loss
        sum_loss += loss.data[0]

    return sum_loss / len(dataloader)
def test():
    model.eval()
    avg_psnr = 0
    avg_ssim = 0
    avg_mse = 0
    for iteration, sample in enumerate(test_dataloader):
        inputs, label = sample['inputs'], sample['label']
        #Wrap with torch Variable
        inputs, label = wrap_variable(inputs, label, use_gpu, True)

        #inputs=myutils.normalize_batch(inputs)
        vgg_feature = vgg(myutils.normalize_batch(inputs)).relu4_3

        outputs = model(inputs, vgg_feature)
        mse = criterion(outputs, label).data[0]
        psnr = myutils.psnr(outputs, label)
        ssim = torch.sum((myutils.ssim(
            outputs, label, size_average=False)).data) / args.testbatchsize
        avg_ssim += ssim
        avg_psnr += psnr
        avg_mse += mse
    return (avg_psnr / len(test_dataloader)), (
        avg_ssim / len(test_dataloader)), (avg_mse / len(test_dataloader))