コード例 #1
0
    def __init__(self, config=Encoder_Localizer_config()):
        super(LocalizeNetwork, self).__init__()
        self.config = config
        channels = int(self.config.Width*self.config.Height/self.config.block_size/self.config.block_size)
        self.initialR3 = nn.Sequential(
            ConvBNRelu(3, self.config.decoder_channels),
            nn.MaxPool2d(2),
            ConvBNRelu(self.config.decoder_channels, self.config.decoder_channels),
            nn.MaxPool2d(2),
            ConvBNRelu(self.config.decoder_channels, self.config.decoder_channels),
            nn.MaxPool2d(2),
            ConvBNRelu(self.config.decoder_channels, self.config.decoder_channels),
            nn.MaxPool2d(2),
        )
        # Size: 256->128
        self.Down1_conv = DoubleConv(3, 64)
        self.Down1_pool = nn.MaxPool2d(2)
        # Size: 128->64
        self.Down2_conv = DoubleConv(64, 128)
        self.Down2_pool = nn.MaxPool2d(2)
        # Size: 64->32
        self.Down3_conv = DoubleConv(128, 256)
        self.Down3_pool = nn.MaxPool2d(2)
        # Size: 32->16
        self.Down4_conv = DoubleConv(256, 512)
        self.Down4_pool = nn.MaxPool2d(2)

        self.last_conv = nn.Sequential(
            nn.Conv2d(512,2,kernel_size=1,stride=1),
            nn.BatchNorm2d(2),
            nn.Sigmoid()
        )
コード例 #2
0
ファイル: cropout.py プロジェクト: xfcy6/ReversibleImage
 def __init__(self,
              shape,
              config=Encoder_Localizer_config(),
              device=torch.device("cuda")):
     super(Cropout, self).__init__()
     self.config = config
     self.height_ratio_range, self.width_ratio_range = shape[0], shape[1]
     self.device = device
コード例 #3
0
    def __init__(self, config=Encoder_Localizer_config(),
                 crop_size=(0.5, 0.5)):
        super(Encoder_Recovery, self).__init__()
        self.config = config
        device = config.device
        self.encoder = EncoderNetwork(is_embed_message=False,
                                      config=config).to(device)

        self.other_noise_layers = [Identity()]
        self.other_noise_layers.append(JpegCompression(device))
        self.other_noise_layers.append(Quantization(device))
コード例 #4
0
ファイル: encoder.py プロジェクト: xfcy6/ReversibleImage
    def __init__(self,
                 is_embed_message=True,
                 config=Encoder_Localizer_config()):
        super(EncoderNetwork, self).__init__()
        self.config = config
        self.is_embed_message = is_embed_message
        # self.init = DoubleConv(3, 32)
        # Size: 256->128
        self.Down1_conv = DoubleConv(3, 64)
        self.Down1_pool = nn.MaxPool2d(2)
        # Size: 128->64
        self.Down2_conv = DoubleConv(64, 128)
        self.Down2_pool = nn.MaxPool2d(2)
        # Size: 64->32
        self.Down3_conv = DoubleConv(128, 256)
        self.Down3_pool = nn.MaxPool2d(2)
        # Size: 32->16
        self.Down4_conv = DoubleConv(256, 512)
        self.Down4_pool = nn.MaxPool2d(2)
        self.Conv5 = nn.Sequential(
            DoubleConv(512, 1024),
            DoubleConv(1024, 1024),
            DoubleConv(1024, 1024),
            DoubleConv(1024, 1024),
        )
        # Size:16->32
        self.Up4_convT = nn.ConvTranspose2d(1024, 512, 2, stride=2)
        self.Up4_conv = DoubleConv(1024, 512)

        # Size:32->64
        self.Up3_convT = nn.ConvTranspose2d(512, 256, 2, stride=2)
        self.Up3_conv = DoubleConv(512, 256)
        # Size:64->128
        self.Up2_convT = nn.ConvTranspose2d(256, 128, 2, stride=2)
        self.Up2_conv = DoubleConv(256, 128)
        # Size:128->256
        self.Up1_convT = nn.ConvTranspose2d(128, 64, 2, stride=2)
        self.Up1_conv = DoubleConv(128, 64)
        # 最后一个1x1卷积层得到输出
        self.final_conv = nn.Conv2d(64, 3, 1)
        # 随机嵌入信息卷积到图中
        self.after_concat_layer = ConvBNRelu(1024 + self.config.water_features,
                                             1024)
コード例 #5
0
            torch.save(net.state_dict(), MODELS_PATH + 'Epoch N{}.pkl'.format(epoch + 1))

            mean_train_loss = np.mean(train_losses)

            # Prints epoch average loss
            print('Epoch [{0}/{1}], Average_loss: {2:.4f}'.format(
                epoch + 1, num_epochs, mean_train_loss))

            # Debug
            # imshow(utils.make_grid(train_covers), 0, learning_rate=learning_rate, beta=beta)
            # imshow(utils.make_grid(train_hidden), 0, learning_rate=learning_rate, beta=beta)
        return net, mean_train_loss, loss_history


    # Setting
    config = Encoder_Localizer_config()
    isSelfRecovery = True
    skipTraining = False
    # Creates net object
    net = Encoder_Localizer(config).to(device)

    # Creates training set
    train_loader = torch.utils.data.DataLoader(
        datasets.ImageFolder(
            TRAIN_PATH,
            transforms.Compose([
                transforms.Scale(512),
                transforms.RandomCrop(512),
                transforms.ToTensor(),
                transforms.Normalize(mean=mean,
                                     std=std)
コード例 #6
0
 def __init__(
     self, config: Encoder_Localizer_config = Encoder_Localizer_config()):
     super(Encoder_Decoder, self).__init__()
     self.encoder = PrepNetwork().to(device)
     self.hiding = HidingNetwork().to(device)
     self.extract = RevealNetwork().to(device)