Exemplo n.º 1
0
    def __init__(self, config=Encoder_Localizer_config(),
                 crop_size=(0.5, 0.5)):
        super(Encoder_Recovery, self).__init__()
        self.config = config
        device = config.device
        self.encoder = EncoderNetwork(is_embed_message=False,
                                      config=config).to(device)

        self.other_noise_layers = [Identity()]
        self.other_noise_layers.append(JpegCompression(device))
        self.other_noise_layers.append(Quantization(device))
    def __call__(self, parser, namespace, values, option_string=None):

        layers = []
        split_commands = values[0].split('+')

        for command in split_commands:
            # remove all whitespace
            command = command.replace(' ', '')
            if command[:len('cropout')] == 'cropout':
                layers.append(parse_cropout(command))
            elif command[:len('diff_qf_jpeg2')] == 'diff_qf_jpeg2':
                layers.append(DiffQFJpegCompression2())
            elif command[:len('diff_corruptions')] == 'diff_corruptions':
                layers.append(DiffCorruptions())
            elif command[:len('crop')] == 'crop':
                layers.append(parse_crop(command))
            elif command[:len('dropout')] == 'dropout':
                layers.append(parse_dropout(command))
            elif command[:len('resize')] == 'resize':
                layers.append(parse_resize(command))
            elif command[:len('jpeg2000')] == 'jpeg2000':
                layers.append(JpegCompression2000())
            elif command[:len('jpeg2')] == 'jpeg2':
                layers.append(JpegCompression2())
            elif command[:len('jpeg')] == 'jpeg':
                layers.append('JpegPlaceholder')
            elif command[:len('quant')] == 'quant':
                layers.append('QuantizationPlaceholder')
            elif command[:len('combined2')] == 'combined2':
                layers.append('Combined2Placeholder')
            elif command[:len('identity')] == 'identity':
                layers.append(Identity())
            elif command[:len('gaussian4')] == 'gaussian4':
                layers.append(Gaussian(3, 4, 3))
            elif command[:len('gaussian')] == 'gaussian':
                layers.append(Gaussian())
            elif command[:len('diff_jpeg')] == 'diff_jpeg':
                layers.append(DiffJPEG(128, 128))
            elif command[:len('webp')] == 'webp':
                layers.append(WebP())
            elif command[:len('mpeg4')] == 'mpeg4':
                layers.append(MPEG4())
            elif command[:len('h264')] == 'h264':
                layers.append(H264())
            elif command[:len('xvid')] == 'xvid':
                layers.append(XVID())
            else:
                raise ValueError(
                    'Command not recognized: \n{}'.format(command))
        setattr(namespace, self.dest, layers)
Exemplo n.º 3
0
 def __init__(self, noise_layers: list, device):
     super(Noiser, self).__init__()
     self.noise_layers = [Identity()]
     for layer in noise_layers:
         if type(layer) is str:
             if layer == 'JpegPlaceholder':
                 #self.noise_layers.append(JpegCompression(device))
                 self.noise_layers.append(Jpeg(device, [70, 95]))
             elif layer == 'QuantizationPlaceholder':
                 self.noise_layers.append(Quantization(device))
             else:
                 raise ValueError(f'Wrong layer placeholder string in Noiser.__init__().'
                                  f' Expected "JpegPlaceholder" or "QuantizationPlaceholder" but got {layer} instead')
         else:
             self.noise_layers.append(layer)
Exemplo n.º 4
0
 def __init__(self, device, jpeg_type='jpeg2'):
     super(Combined2, self).__init__()
     self.identity = Identity()
     if jpeg_type == 'jpeg2':
         self.jpeg = JpegCompression2()
     elif jpeg_type == 'jpeg':
         self.jpeg = JpegCompression(device)
     elif jpeg_type == 'diff_jpeg':
         self.jpeg = DiffJPEG()
     else:
         self.jpeg = Quantization()
     self.dropout = Dropout([0.3, 0.3])
     self.gaussian = Gaussian()
     self.crop2 = Crop2(
         [0.187, 0.187],
         [0.187, 0.187])  # Crop2([0.547, 0.547], [0.547, 0.547])
     self.cropout = Cropout([0.547, 0.547], [0.547, 0.547])
Exemplo n.º 5
0
    def __init__(self, noise_config: list, device: torch.device):
        super(Noiser, self).__init__()

        self.noise_config = noise_config
        noise_layers = []

        for noise_layer_config in noise_config:
            layer_type = noise_layer_config['type'].lower()
            if layer_type == 'jpeg_compression':
                # TODO: Add jpeg compression level as a config option
                noise_layers.append(JpegCompression(device))
            elif layer_type == 'crop':
                noise_layers.append(
                    Crop(noise_layer_config['height_ratios'],
                         noise_layer_config['width_ratios']))
            elif layer_type == 'cropout':
                noise_layers.append(
                    Cropout(noise_layer_config['height_ratios'],
                            noise_layer_config['width_ratios']))
            elif layer_type == 'dropout':
                noise_layers.append(
                    Dropout(noise_layer_config['keep_ratio_range']))
            elif layer_type == 'resize':
                if 'interpolation_method' in noise_layer_config:
                    noise_layers.append(
                        Resize(noise_layer_config['resize_ratio_range'],
                               noise_layer_config['interpolation_method']))
                else:
                    noise_layers.append(
                        Resize(noise_layer_config['resize_ratio_range']))
            elif layer_type == 'rotate':
                pass
            elif layer_type == 'identity':
                noise_layers.append(Identity())
            elif layer_type == 'quantization':
                noise_layers.append(Quantization())
            else:
                raise ValueError('Noise layer of {} not supported'.format(
                    noise_layer_config['type']))

        self.noise_layers = nn.Sequential(*noise_layers)
    def __init__(self, config=GlobalConfig()):
        super(LinJingZhiNet, self).__init__()
        self.config = config
        """ Settings """
        self.criterionGAN = GANLoss().cuda()
        self.text_encoder = MLP_encode().cuda()
        if torch.cuda.device_count() > 1:
            self.text_encoder = torch.nn.DataParallel(self.text_encoder)
        self.text_decoder = MLP_decode().cuda()
        if torch.cuda.device_count() > 1:
            self.text_decoder = torch.nn.DataParallel(self.text_decoder)

        self.encoder = UnetInception(config=config).cuda()
        self.decoder = Prep_pureUnet(config=config).cuda()
        if torch.cuda.device_count() > 1:
            self.encoder = torch.nn.DataParallel(self.encoder)
        if torch.cuda.device_count() > 1:
            self.decoder = torch.nn.DataParallel(self.decoder)
        # print(self.encoder)
        self.optimizer_encoder = torch.optim.Adam(self.encoder.parameters())
        self.optimizer_decoder = torch.optim.Adam(self.decoder.parameters())
        """ Noise Layers """
        self.noise_layers = [Identity().cuda()]
        self.jpeg_layer_80 = DiffJPEG(256,
                                      256,
                                      quality=80,
                                      differentiable=True).cuda()
        self.jpeg_layer_90 = DiffJPEG(256,
                                      256,
                                      quality=90,
                                      differentiable=True).cuda()
        self.jpeg_layer_70 = DiffJPEG(256,
                                      256,
                                      quality=70,
                                      differentiable=True).cuda()
        self.jpeg_layer_60 = DiffJPEG(256,
                                      256,
                                      quality=60,
                                      differentiable=True).cuda()
        self.jpeg_layer_50 = DiffJPEG(256,
                                      256,
                                      quality=50,
                                      differentiable=True).cuda()
        self.gaussian = Gaussian().cuda()
        self.gaussian_blur = GaussianBlur().cuda()
        self.dropout = Dropout().cuda()
        self.resize = Resize().cuda()
        self.cropout_layer = Cropout().cuda()
        self.crop_layer = Crop().cuda()
        self.noise_layers.append(self.jpeg_layer_80)
        self.noise_layers.append(self.jpeg_layer_90)
        self.noise_layers.append(self.jpeg_layer_70)
        self.noise_layers.append(self.jpeg_layer_60)
        self.noise_layers.append(self.jpeg_layer_50)
        self.noise_layers.append(self.gaussian)
        self.noise_layers.append(self.resize)
        self.noise_layers.append(self.dropout)
        self.noise_layers.append(self.gaussian_blur)
        self.noise_layers.append(self.cropout_layer)
        self.noise_layers.append(self.crop_layer)

        # self.discriminator = Discriminator(self.config).cuda()
        # if torch.cuda.device_count() > 1:
        #     self.discriminator = torch.nn.DataParallel(self.discriminator)
        # self.discriminator_B = Discriminator(self.config).cuda()
        # if torch.cuda.device_count() > 1:
        #     self.discriminator_B = torch.nn.DataParallel(self.discriminator_B)
        self.discriminator_patchHidden = NLayerDiscriminator(input_nc=3).cuda()
        if torch.cuda.device_count() > 1:
            self.discriminator_patchHidden = torch.nn.DataParallel(
                self.discriminator_patchHidden)
        # self.discriminator_patchRecovery = NLayerDiscriminator(input_nc=1).cuda()
        # if torch.cuda.device_count() > 1:
        #     self.discriminator_patchRecovery = torch.nn.DataParallel(self.discriminator_patchRecovery)

        # self.optimizer_discrim = torch.optim.Adam(self.discriminator.parameters())
        # self.optimizer_discrim_B = torch.optim.Adam(self.discriminator.parameters())
        self.optimizer_discrim_patchHiddem = torch.optim.Adam(
            self.discriminator_patchHidden.parameters())
        # self.optimizer_discrim_patchRecovery = torch.optim.Adam(self.discriminator_patchRecovery.parameters())
        self.optimizer_text_encoder = torch.optim.Adam(
            self.text_encoder.parameters())
        self.optimizer_text_decoder = torch.optim.Adam(
            self.text_decoder.parameters())

        # self.downsample_layer = PureUpsampling(scale=64 / 256).cuda()
        # self.upsample_layer = PureUpsampling(scale=256 / 64).cuda()
        self.bce_with_logits_loss = nn.BCEWithLogitsLoss().cuda()
        self.mse_loss = nn.MSELoss().cuda()
        self.ssim_loss = pytorch_ssim.SSIM().cuda()
        self.vgg_loss = VGGLoss(3, 1, False).cuda()
        if torch.cuda.device_count() > 1:
            self.vgg_loss = torch.nn.DataParallel(self.vgg_loss)
        # Defined the labels used for training the discriminator/adversarial loss
        self.cover_label = 1
        self.encoded_label = 0
        self.roundCount = 1.0
Exemplo n.º 7
0
def main():
    ############### define global parameters ###############
    global opt, optimizer, optimizerR, writer, logPath, scheduler, schedulerR, val_loader, smallestLoss, DATA_DIR, noiser_dropout, noiser_gaussian, noiser_identity

    opt = parser.parse_args()
    opt.ngpu = torch.cuda.device_count()
    if torch.cuda.is_available() and not opt.cuda:
        print("WARNING: You have a CUDA device, "
              "so you should probably run with --cuda")

    cudnn.benchmark = True

    if opt.hostname == 'DL178':
        DATA_DIR = '/media/user/SSD1TB-2/ImageNet' 
    elif opt.hostname == 'amax':
        # (DCMMC) server 199
        DATA_DIR = '/data/xwt/Universal-Deep-Hiding/ImageNet'
    assert DATA_DIR


    ############  create the dirs to save the result #############
    if not opt.debug:
        try:
            cur_time = time.strftime('%Y-%m-%d_H%H-%M-%S', time.localtime())
            if opt.test == '':
                secret_comment = 'color' if opt.channel_secret == 3 else 'gray'
                cover_comment = 'color' if opt.channel_cover == 3 else 'gray'
                comment = str(opt.num_secret) + secret_comment + 'In' + str(opt.num_cover) + cover_comment
                experiment_dir = opt.hostname + "_" + cur_time + "_" + str(opt.imageSize)+ "_"+ str(opt.num_secret) + "_"+ str(opt.num_training)+ "_" + \
                str(opt.bs_secret)+ "_" + str(opt.ngpu)+ "_" + opt.norm+ "_" + opt.loss+ "_"+ str(opt.beta)+ "_"+ comment + "_" + opt.remark
                opt.outckpts += experiment_dir + "/checkPoints"
                opt.trainpics += experiment_dir + "/trainPics"
                opt.validationpics += experiment_dir + "/validationPics"
                opt.outlogs += experiment_dir + "/trainingLogs"
                opt.outcodes += experiment_dir + "/codes"
                if not os.path.exists(opt.outckpts):
                    os.makedirs(opt.outckpts)
                if not os.path.exists(opt.trainpics):
                    os.makedirs(opt.trainpics)
                if not os.path.exists(opt.validationpics):
                    os.makedirs(opt.validationpics)
                if not os.path.exists(opt.outlogs):
                    os.makedirs(opt.outlogs)
                if not os.path.exists(opt.outcodes):
                    os.makedirs(opt.outcodes)
                save_current_codes(opt.outcodes)
            else:
                experiment_dir = opt.test
                opt.testPics += experiment_dir + "/testPics"
                opt.validationpics = opt.testPics
                opt.outlogs += experiment_dir + "/testLogs"
                if (not os.path.exists(opt.testPics)) and opt.test != '':
                    os.makedirs(opt.testPics)
                if not os.path.exists(opt.outlogs):
                    os.makedirs(opt.outlogs)
        except OSError:
            print("mkdir failed   XXXXXXXXXXXXXXXXXXXXX") # ignore

    logPath = opt.outlogs + '/%s_%d_log.txt' % (opt.dataset, opt.bs_secret)
    if opt.debug:
        logPath = './debug/debug_logs/debug.txt'
    print_log(str(opt), logPath)


    ##################  Datasets  #################
    traindir = os.path.join(DATA_DIR, 'train')
    valdir = os.path.join(DATA_DIR, 'val')

    transforms_color = transforms.Compose([ 
                transforms.Resize([opt.imageSize, opt.imageSize]),
                transforms.ToTensor(),
            ])  

    transforms_gray = transforms.Compose([
                transforms.Grayscale(num_output_channels=1),
                transforms.Resize([opt.imageSize, opt.imageSize]),
                transforms.ToTensor(),
            ])    
    if opt.channel_cover == 1:  
        transforms_cover = transforms_gray
    else:
         transforms_cover = transforms_color

    if opt.channel_secret == 1:  
        transforms_secret = transforms_gray
    else:
         transforms_secret = transforms_color

    if opt.test == '':
        train_dataset_cover = ImageFolder(
            traindir, 
            transforms_cover)

        train_dataset_secret = ImageFolder(
            traindir, 
            transforms_secret)

        val_dataset_cover = ImageFolder(
            valdir, 
            transforms_cover)
        val_dataset_secret = ImageFolder(
            valdir, 
            transforms_secret)

        assert train_dataset_cover; assert train_dataset_secret
        assert val_dataset_cover; assert val_dataset_secret
    else:
        opt.checkpoint = "./training/" + opt.test + "/checkPoints/" + "best_checkpoint.pth.tar"
        if opt.test_diff != '':
            opt.checkpoint_diff = "./training/" + opt.test_diff + "/checkPoints/" + "best_checkpoint.pth.tar"
        testdir = valdir
        test_dataset_cover = ImageFolder(
            testdir,  
            transforms_cover)
        test_dataset_secret = ImageFolder(
            testdir,  
            transforms_secret)
        assert test_dataset_cover; assert test_dataset_secret

    ##################  Hiding and Reveal  #################
    assert opt.imageSize % 32 == 0 
    num_downs = 5 
    if opt.norm == 'instance':
        norm_layer = nn.InstanceNorm2d
    if opt.norm == 'batch':
        norm_layer = nn.BatchNorm2d
    if opt.norm == 'none':
        norm_layer = None
    if opt.cover_dependent:
        Hnet = UnetGenerator(input_nc=opt.channel_secret*opt.num_secret+opt.channel_cover*opt.num_cover, output_nc=opt.channel_cover*opt.num_cover, num_downs=num_downs, norm_layer=norm_layer, output_function=nn.Sigmoid)
    else:
        Hnet = UnetGenerator(input_nc=opt.channel_secret*opt.num_secret, output_nc=opt.channel_cover*opt.num_cover, num_downs=num_downs, norm_layer=norm_layer, output_function=nn.Tanh)
    Rnet = RevealNet(input_nc=opt.channel_cover*opt.num_cover, output_nc=opt.channel_secret*opt.num_secret, nhf=64, norm_layer=norm_layer, output_function=nn.Sigmoid)

    device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')

    p = 0.3
    noiser_dropout = Dropout([p, p])
    noiser_gaussian = gaussian_kernel()
    noiser_identity = Identity()

    if opt.cover_dependent:
        assert opt.num_training == 1
        assert opt.no_cover == False

    ##### We used kaiming normalization #####
    Hnet.apply(weights_init)
    Rnet.apply(weights_init)

    ##### Always set to multiple GPU mode  #####
    Hnet = torch.nn.DataParallel(Hnet).cuda()
    Rnet = torch.nn.DataParallel(Rnet).cuda()

    noiser_dropout = torch.nn.DataParallel(noiser_dropout).cuda()
    noiser_gaussian = torch.nn.DataParallel(noiser_gaussian).cuda()
    noiser_identity = torch.nn.DataParallel(noiser_identity).cuda()

    if opt.checkpoint != "":
        if opt.checkpoint_diff == "":
            checkpoint = torch.load(opt.checkpoint)
            Hnet.load_state_dict(checkpoint['H_state_dict'])
            Rnet.load_state_dict(checkpoint['R_state_dict'])
        else:
            checkpoint = torch.load(opt.checkpoint)
            checkpoint_diff = torch.load(opt.checkpoint_diff)
            Hnet.load_state_dict(checkpoint_diff['H_state_dict'])
            Rnet.load_state_dict(checkpoint['R_state_dict'])            

    print_network(Hnet)
    print_network(Rnet)

    # Loss and Metric
    if opt.loss == 'l1':
        criterion = nn.L1Loss().cuda()
    if opt.loss == 'l2':
        criterion = nn.MSELoss().cuda()

    # Train the networks when opt.test is empty
    if opt.test == '':
        # tensorboardX writer
        if not opt.debug:
            writer = SummaryWriter(log_dir='runs/' + experiment_dir)

        params = list(Hnet.parameters())+list(Rnet.parameters())
        optimizer = optim.Adam(params, lr=opt.lr, betas=(opt.beta1, 0.999))
        scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.2, patience=8, verbose=True)        

        train_loader_secret = DataLoader(train_dataset_secret, batch_size=opt.bs_secret*opt.num_secret,
                                  shuffle=True, num_workers=int(opt.workers))
        train_loader_cover = DataLoader(train_dataset_cover, batch_size=opt.bs_secret*opt.num_cover*opt.num_training,
                                  shuffle=True, num_workers=int(opt.workers))
        val_loader_secret = DataLoader(val_dataset_secret, batch_size=opt.bs_secret*opt.num_secret,
                                shuffle=False, num_workers=int(opt.workers))
        val_loader_cover = DataLoader(val_dataset_cover, batch_size=opt.bs_secret*opt.num_cover*opt.num_training,
                                shuffle=True, num_workers=int(opt.workers))

        smallestLoss = 10000
        print_log("training is beginning .......................................................", logPath)
        for epoch in range(opt.epochs):
            ##### get a new zipped data loader for a new epoch to aviod unnecessary coding handling #####
            adjust_learning_rate(optimizer, epoch)
            train_loader = zip(train_loader_secret, train_loader_cover)
            val_loader = zip(val_loader_secret, val_loader_cover)
            ######################## train ##########################################
            train(train_loader, epoch, Hnet=Hnet, Rnet=Rnet, criterion=criterion)

            ####################### validation  #####################################
            val_hloss, val_rloss, val_hdiff, val_rdiff = validation(val_loader, epoch, Hnet=Hnet, Rnet=Rnet, criterion=criterion)

            ####################### adjust learning rate ############################
            scheduler.step(val_rloss)

            # save the best model parameters
            sum_diff = val_hdiff + val_rdiff
            is_best = sum_diff < globals()["smallestLoss"]
            globals()["smallestLoss"] = sum_diff

            save_checkpoint({
                'epoch': epoch + 1,
                'H_state_dict': Hnet.state_dict(),
                'R_state_dict': Rnet.state_dict(),
                'optimizer' : optimizer.state_dict(),
            }, is_best, epoch, '%s/epoch_%d_Hloss_%.4f_Rloss=%.4f_Hdiff_Hdiff%.4f_Rdiff%.4f' % (opt.outckpts, epoch, val_hloss, val_rloss, val_hdiff, val_rdiff) )

        if not opt.debug:
            writer.close()

     # For testing the trained network
    else:
        test_loader_secret = DataLoader(test_dataset_secret, batch_size=opt.bs_secret*opt.num_secret,
                                 shuffle=False, num_workers=int(opt.workers))
        test_loader_cover = DataLoader(test_dataset_cover, batch_size=opt.bs_secret*opt.num_cover*opt.num_training,
                                 shuffle=True, num_workers=int(opt.workers))
        test_loader = zip(test_loader_secret, test_loader_cover)
        #validation(test_loader, 0, Hnet=Hnet, Rnet=Rnet, criterion=criterion)
        analysis(test_loader, 0, Hnet=Hnet, Rnet=Rnet, criterion=criterion)
        analysis_img_save(test_loader, 0, Hnet=Hnet, Rnet=Rnet, criterion=criterion)
    def __init__(self, config=GlobalConfig()):
        super(HighQualityNet, self).__init__()
        self.config = config
        """ Settings """
        if self.config.architecture == 'AlexNet':
            self.classification_net = models.alexnet(pretrained=True).cuda()
            print(self.classification_net)
        elif self.config.architecture == 'ResNet':
            self.classification_net = models.resnet50(pretrained=True).cuda()
            print(self.classification_net)
        elif self.config.architecture == 'VGG':
            self.classification_net = models.vgg19(pretrained=True).cuda()
            print(self.classification_net)
        elif self.config.architecture == 'DenseNet':
            self.classification_net = models.densenet121(
                pretrained=True).cuda()
            print(self.classification_net)
        elif self.config.architecture == 'ResNet':
            self.classification_net = models.resnet152(pretrained=True).cuda()
            print(self.classification_net)
        elif self.config.architecture == 'GoogleNet':
            self.classification_net = models.googlenet(pretrained=True).cuda()
            print(self.classification_net)
        else:
            self.classification_net = models.mobilenet_v2(
                pretrained=True).cuda()
            print(self.classification_net)
        if torch.cuda.device_count() > 1:
            self.classification_net = torch.nn.DataParallel(
                self.classification_net)
        self.criterion = nn.CrossEntropyLoss().cuda()

        self.encoder = Prep_pureUnet(config=config).cuda()
        if torch.cuda.device_count() > 1:
            self.encoder = torch.nn.DataParallel(self.encoder)
        print(self.encoder)
        self.optimizer = torch.optim.Adam(self.encoder.parameters())
        """ Noise Layers """
        self.noise_layers = [Identity()]
        # self.cropout_layer = Cropout(config).cuda()
        self.jpeg_layer_80 = DiffJPEG(256,
                                      256,
                                      quality=80,
                                      differentiable=True).cuda()
        self.jpeg_layer_90 = DiffJPEG(256,
                                      256,
                                      quality=90,
                                      differentiable=True).cuda()
        self.jpeg_layer_70 = DiffJPEG(256,
                                      256,
                                      quality=70,
                                      differentiable=True).cuda()
        self.jpeg_layer_60 = DiffJPEG(256,
                                      256,
                                      quality=60,
                                      differentiable=True).cuda()
        self.jpeg_layer_50 = DiffJPEG(256,
                                      256,
                                      quality=50,
                                      differentiable=True).cuda()
        # self.gaussian = Gaussian().cuda()
        # self.dropout = Dropout(self.config,keep_ratio_range=(0.5,0.75)).cuda()
        # self.resize = Resize().cuda()
        # self.crop_layer = Crop((0.2, 0.5), (0.2, 0.5)).cuda()
        self.noise_layers.append(self.jpeg_layer_80)
        self.noise_layers.append(self.jpeg_layer_90)
        self.noise_layers.append(self.jpeg_layer_70)
        self.noise_layers.append(self.jpeg_layer_60)
        self.noise_layers.append(self.jpeg_layer_50)