def __init__(self, height, width, differentiable=True, quality=80, config=GlobalConfig()): ''' Initialize the DiffJPEG layer Inputs: height(int): Original image hieght width(int): Original image width differentiable(bool): If true uses custom differentiable rounding function, if false uses standrard torch.round quality(float): Quality factor for jpeg compression scheme. ''' super(DiffJPEG, self).__init__() self.config = config self.quality = quality if differentiable: rounding = diff_round else: rounding = torch.round factor = quality_to_factor(quality) self.compress = compress_jpeg(rounding=rounding, factor=factor) self.decompress = decompress_jpeg(height, width, rounding=rounding, factor=factor)
def __init__(self): self.config = GlobalConfig() os.environ["CUDA_VISIBLE_DEVICES"] = "0,1" print("torch.distributed.is_available: " + str(torch.distributed.is_available())) print("Device Count: {0}".format(torch.cuda.device_count())) transform = transforms.Compose([ transforms.Resize(self.config.Width), transforms.RandomCrop(self.config.Width), transforms.ToTensor(), transforms.Normalize(mean=self.config.mean, std=self.config.std) ]) # Creates training set self.train_loader = torch.utils.data.DataLoader( datasets.ImageFolder(self.config.TRAIN_PATH, transform), batch_size=self.config.train_batch_size, num_workers=4, pin_memory=True, shuffle=True, drop_last=True) self.train_dataset = MyDataset(root='F:\\ILSVRC2012_img_val\\', filename='./val.txt') print(len(self.train_dataset)) self.train_loader = data.DataLoader( dataset=self.train_dataset, batch_size=self.config.train_batch_size, shuffle=True, num_workers=4) self.net = HighQualityNet(config=self.config)
def test_write_to_log(self): config = GlobalConfig() config.set_log_path(f"{os.getcwd()}/test.txt") config.write_to_log("TESTING") with open(f"{os.getcwd()}/test.txt", "r") as f: self.assertEqual(f.readline(), f"{datetime.now()}: TESTING \n") os.remove(f"{os.getcwd()}/test.txt")
def __init__(self, config=GlobalConfig()): super(Net, self).__init__() self.config = config self.device = config.device self.m1 = PrepNetwork().cuda() self.m2 = HidingNetwork().cuda() self.m3 = RevealNetwork().cuda()
def __init__(self, config=GlobalConfig(), resize_ratio_range=(0.5,2), interpolation_method='nearest'): super(Resize, self).__init__() self.config = config self.device = config.device self.resize_ratio_min = resize_ratio_range[0] self.resize_ratio_max = resize_ratio_range[1] self.interpolation_method = interpolation_method
def initData(self): self.initDir() self.tempconfig = GlobalConfig(TEMP_FILE) if os.path.exists(TEMP_FILE): self.current_cluster_index = int( self.tempconfig.get("TEMP", "cluster_index")) else: self.current_cluster_index = 1 self.init_cluster_data(self.current_cluster_index)
def initDir(self): if not os.path.exists(ROOT_DIR): os.mkdir(ROOT_DIR) if not os.path.exists(CLUSTER_DIR): os.mkdir(CLUSTER_DIR) if not os.path.exists(TEMP_FILE): self.tempconfig = GlobalConfig(TEMP_FILE) self.tempconfig.add_section("TEMP") self.tempconfig.set("TEMP", "cluster_index", "1") self.tempconfig.save(TEMP_FILE)
def test_type_assertation(self): def _test(obj, type_): try: config._type_assertation(str, str) except AssertionError: return True return False config = GlobalConfig() self.assertFalse(_test(str, str)) self.assertFalse(_test(float, float)) self.assertFalse(_test(int, int)) self.assertTrue(_test(float, int)) self.assertTrue(_test(str, int))
def main(): global option option = GlobalConfig() click.echo("Loading the news...") story_list = newsfeeds.feeder() global exit_now exit_now = False click.clear() global mixed_story_list mixed_story_list = mixer(story_list, option.article_limit) default_display(mixed_story_list)
def __init__(self, height_ratio_range=(0.5, 1), width_ratio_range=(0.5, 1), config=GlobalConfig()): """ :param height_ratio_range: :param width_ratio_range: """ super(Crop, self).__init__() self.config = config self.height_ratio_range = height_ratio_range self.width_ratio_range = width_ratio_range self.h_start, self.h_end, self.w_start, self.w_end = None, None, None, None self.bool = False
def test_set_log_path(self): config = GlobalConfig() error = False for x in [ 123, "aaaaaaaaabbbbbbbbbccccccccccc/aaaaaaaaaabbbbbbbccccccc/test.txt" ]: try: config.set_log_path(x) except AssertionError: error = True self.assertTrue(error) try: config.set_log_path(f"{os.getcwd()}/test.txt") error = False except AssertionError: error = True self.assertFalse(error)
def loadServerIni(self, wtype, wid): file = os.path.join(CLUSTER_DIR, "Cluster_" + self.getCurrentCluster(), wtype + "_" + str(wid), "server.ini") if not os.path.exists(file): file = os.path.join(CONFIG_DIR, "server.ini") self.serverconfig[wid] = GlobalConfig(file) self.serverconfig[wid].server_port = self.serverconfig[wid].get( "NETWORK", "server_port") self.serverconfig[wid].is_master = self.serverconfig[wid].getboolean( "SHARD", "is_master") self.serverconfig[wid].name = self.serverconfig[wid].get( "SHARD", "name") self.serverconfig[wid].id = self.serverconfig[wid].get("SHARD", "id") self.serverconfig[wid].master_server_port = self.serverconfig[wid].get( "STEAM", "master_server_port") self.serverconfig[wid].authentication_port = self.serverconfig[ wid].get("STEAM", "authentication_port") if self.serverconfig[wid].has_section("SERVER"): self.serverconfig[wid].ip = self.serverconfig[wid].get( "SERVER", "ip") self.serverconfig[wid].alias = self.serverconfig[wid].get( "SERVER", "alias") else: self.serverconfig[wid].add_section("SERVER") self.serverconfig[wid].set("SERVER", "ip", "127.0.0.1") self.serverconfig[wid].set("SERVER", "alias", wtype + "_" + str(wid)) if self.serverconfig[wid].server_port == "": self.serverconfig[wid].set("NETWORK", "server_port", str(10998 + random.randint(1, 100))) if self.serverconfig[wid].name == "": self.serverconfig[wid].set("SHARD", "name", wtype + str(wid)) if self.serverconfig[wid].id == "": self.serverconfig[wid].set("SHARD", "id", str(wid)) if self.serverconfig[wid].master_server_port == "": self.serverconfig[wid].set("STEAM", "master_server_port", str(27016 + random.randint(1, 100))) if self.serverconfig[wid].authentication_port == "": self.serverconfig[wid].set("STEAM", "authentication_port", str(8766 + random.randint(1, 100))) if self.serverconfig[wid].alias == "": self.serverconfig[wid].set("SERVER", "alias", wtype + "_" + str(wid))
def read_cluster_data(self, file): if not os.path.exists(file): file = os.path.join(CONFIG_DIR, "cluster.ini") self.cluster_config = GlobalConfig(file) self.steam_group_id.setText( self.cluster_config.get("STEAM", "steam_group_id")) self.steam_group_only.setChecked( self.cluster_config.getboolean("STEAM", "steam_group_only")) self.steam_group_admin.setChecked( self.cluster_config.getboolean("STEAM", "steam_group_admins")) self.pvp.setChecked(self.cluster_config.getboolean("GAMEPLAY", "pvp")) self.game_mode.setCurrentIndex( self.game_mode_value.index( self.cluster_config.get("GAMEPLAY", "game_mode"))) self.pause_when_empty.setChecked( self.cluster_config.getboolean("GAMEPLAY", "pause_when_empty")) self.vote.setChecked( self.cluster_config.getboolean("GAMEPLAY", "vote_enabled")) self.max_players.setText( self.cluster_config.get("GAMEPLAY", "max_players")) self.cluster_name.setText( self.cluster_config.get("NETWORK", "cluster_name")) self.cluster_description.setText( self.cluster_config.get("NETWORK", "cluster_description")) self.cluster_intention.setCurrentIndex( self.cluster_intention_value.index( self.cluster_config.get("NETWORK", "cluster_intention"))) if self.cluster_config.get("NETWORK", "cluster_language") == "zh": self.zh_rbtn.setChecked(True) else: self.en_rbtn.setChecked(True) self.white_players.setText( self.cluster_config.get("NETWORK", "whitelist_slots")) self.password.setText( self.cluster_config.get("NETWORK", "cluster_password")) self.setServerIP(self.masterip, self.cluster_config.get("SHARD", "master_ip"))
def __init__(self, config=GlobalConfig()): super(UnetInception, self).__init__() self.config = config # input channel: 3, output channel: 96 """Features with Kernel Size 7---->channel:128 """ self.downsample_8_Cover = nn.Sequential( nn.Conv2d(3, 32, kernel_size=3, stride=1, dilation=1, padding=1), nn.ELU(inplace=True), SingleConv(32, out_channels=32, kernel_size=3, stride=1, dilation=1, padding=1), ) # self.downsample_8_Secret = nn.Sequential( # nn.Conv2d(3, 32, kernel_size=3, stride=1, dilation=1, padding=1), # nn.ELU(inplace=True), # SingleConv(32, out_channels=32, kernel_size=3, stride=1, dilation=1, padding=1), # ) # 128 self.downsample_7_Cover = nn.Sequential( PureUpsampling(scale=1 / 2), SingleConv(32, out_channels=64, kernel_size=3, stride=1, dilation=1, padding=1), SingleConv(64, out_channels=64, kernel_size=3, stride=1, dilation=1, padding=1)) # self.pureDownsamle = PureUpsampling(scale=1/2) # self.downsample_7_Secret = nn.Sequential( # nn.Conv2d(1, 32, kernel_size=3, stride=1, dilation=1, padding=1), # nn.ELU(inplace=True), # SingleConv(32, out_channels=32, kernel_size=3, stride=1, dilation=1, padding=1), # ) # 64 self.downsample_6_Cover = nn.Sequential( PureUpsampling(scale=1 / 2), SingleConv(64, out_channels=128, kernel_size=3, stride=1, dilation=1, padding=1), SingleConv(128, out_channels=128, kernel_size=3, stride=1, dilation=1, padding=1)) self.downsample_6_Secret = nn.Sequential( nn.Conv2d(1, 32, kernel_size=3, stride=1, dilation=1, padding=1), nn.ELU(inplace=True), SingleConv(32, out_channels=32, kernel_size=3, stride=1, dilation=1, padding=1), ) # self.downsample_6_Secret_added = nn.Sequential( # PureUpsampling(scale=1 / 2), # SingleConv(32, out_channels=32, kernel_size=3, stride=1, dilation=1, padding=1), # SingleConv(32, out_channels=32, kernel_size=3, stride=1, dilation=1, padding=1) # ) # 32 self.downsample_5_Cover = nn.Sequential( PureUpsampling(scale=1 / 2), SingleConv(128, out_channels=256, kernel_size=3, stride=1, dilation=1, padding=1), SingleConv(256, out_channels=256, kernel_size=3, stride=1, dilation=1, padding=1)) self.downsample_5_Secret = nn.Sequential( PureUpsampling(scale=1 / 2), SingleConv(32, out_channels=64, kernel_size=3, stride=1, dilation=1, padding=1), SingleConv(64, out_channels=64, kernel_size=3, stride=1, dilation=1, padding=1)) # 16 self.downsample_4_Cover = nn.Sequential( PureUpsampling(scale=1 / 2), SingleConv(256, out_channels=256, kernel_size=3, stride=1, dilation=1, padding=1), SingleConv(256, out_channels=256, kernel_size=3, stride=1, dilation=1, padding=1)) self.downsample_4_Secret = nn.Sequential( PureUpsampling(scale=1 / 2), SingleConv(64, out_channels=64, kernel_size=3, stride=1, dilation=1, padding=1), SingleConv(64, out_channels=64, kernel_size=3, stride=1, dilation=1, padding=1)) # 16以下的卷积用4层conv self.fullConv = nn.Sequential( SingleConv(256 + 64, out_channels=256 + 64, kernel_size=5, stride=1, dilation=1, padding=2), SingleConv(256 + 64, out_channels=256 + 64, kernel_size=5, stride=1, dilation=1, padding=2), SingleConv(256 + 64, out_channels=256, kernel_size=5, stride=1, dilation=1, padding=2), SingleConv(256, out_channels=256, kernel_size=5, stride=1, dilation=1, padding=2)) self.pureUpsamle = PureUpsampling(scale=2) # 32 self.upsample4_3 = nn.Sequential( SingleConv(256 * 2 + 64, out_channels=256, kernel_size=3, stride=1, dilation=1, padding=1), SingleConv(256, out_channels=128, kernel_size=3, stride=1, dilation=1, padding=1)) # 64 self.upsample3_3 = nn.Sequential( SingleConv(128 * 2 + 32, out_channels=128, kernel_size=3, stride=1, dilation=1, padding=1), SingleConv(128, out_channels=64, kernel_size=3, stride=1, dilation=1, padding=1)) # 128 self.upsample2_3 = nn.Sequential( SingleConv(64 * 2, out_channels=64, kernel_size=3, stride=1, dilation=1, padding=1), SingleConv(64, out_channels=32, kernel_size=3, stride=1, dilation=1, padding=1)) # self.upsample2_3_added = nn.Sequential( # SingleConv(64*2+32, out_channels=64, kernel_size=3, stride=1, dilation=1, padding=1), # SingleConv(64, out_channels=32, kernel_size=3, stride=1, dilation=1, padding=1) # ) # 256 self.upsample1_3 = nn.Sequential( SingleConv(32 * 2, out_channels=32, kernel_size=3, stride=1, dilation=1, padding=1), SingleConv(32, out_channels=32, kernel_size=3, stride=1, dilation=1, padding=1)) self.final256 = nn.Sequential( nn.Conv2d(32, 3, kernel_size=1, padding=0), # nn.Tanh() )
def __init__(self, config=GlobalConfig()): super(HighQualityNet, self).__init__() self.config = config """ Settings """ if self.config.architecture == 'AlexNet': self.classification_net = models.alexnet(pretrained=True).cuda() print(self.classification_net) elif self.config.architecture == 'ResNet': self.classification_net = models.resnet50(pretrained=True).cuda() print(self.classification_net) elif self.config.architecture == 'VGG': self.classification_net = models.vgg19(pretrained=True).cuda() print(self.classification_net) elif self.config.architecture == 'DenseNet': self.classification_net = models.densenet121( pretrained=True).cuda() print(self.classification_net) elif self.config.architecture == 'ResNet': self.classification_net = models.resnet152(pretrained=True).cuda() print(self.classification_net) elif self.config.architecture == 'GoogleNet': self.classification_net = models.googlenet(pretrained=True).cuda() print(self.classification_net) else: self.classification_net = models.mobilenet_v2( pretrained=True).cuda() print(self.classification_net) if torch.cuda.device_count() > 1: self.classification_net = torch.nn.DataParallel( self.classification_net) self.criterion = nn.CrossEntropyLoss().cuda() self.encoder = Prep_pureUnet(config=config).cuda() if torch.cuda.device_count() > 1: self.encoder = torch.nn.DataParallel(self.encoder) print(self.encoder) self.optimizer = torch.optim.Adam(self.encoder.parameters()) """ Noise Layers """ self.noise_layers = [Identity()] # self.cropout_layer = Cropout(config).cuda() self.jpeg_layer_80 = DiffJPEG(256, 256, quality=80, differentiable=True).cuda() self.jpeg_layer_90 = DiffJPEG(256, 256, quality=90, differentiable=True).cuda() self.jpeg_layer_70 = DiffJPEG(256, 256, quality=70, differentiable=True).cuda() self.jpeg_layer_60 = DiffJPEG(256, 256, quality=60, differentiable=True).cuda() self.jpeg_layer_50 = DiffJPEG(256, 256, quality=50, differentiable=True).cuda() # self.gaussian = Gaussian().cuda() # self.dropout = Dropout(self.config,keep_ratio_range=(0.5,0.75)).cuda() # self.resize = Resize().cuda() # self.crop_layer = Crop((0.2, 0.5), (0.2, 0.5)).cuda() self.noise_layers.append(self.jpeg_layer_80) self.noise_layers.append(self.jpeg_layer_90) self.noise_layers.append(self.jpeg_layer_70) self.noise_layers.append(self.jpeg_layer_60) self.noise_layers.append(self.jpeg_layer_50)
import asyncio, json, re # from src.io import Document from config import GlobalConfig from genericio import DatabaseIOManager from asyncio.streams import StreamReader, StreamWriter gConfig = GlobalConfig() IOManager = DatabaseIOManager class RequestHandler: def __init__(self, dbTitle: str) -> None: self.dbTitle = dbTitle async def on_data_fetch(self, collectionTitle: str, clusterTitle: str, query: str): matches = [] # Iterate over each cell in the cluster cells = IOManager.getClusterElements(self.dbTitle, collectionTitle, clusterTitle) cells = tuple(map(json.loads, cells)) # Search each cell for the query element # If it's a match, append it's ID to the "matches" list # Return "matches" list pass async def on_data_update(self, tableID: str, cellID: str, entry): pass async def on_data_entry(self, tableID: str, cellID: str, entry): pass
def __init__(self, config=GlobalConfig()): super(LinJingZhiNet, self).__init__() self.config = config """ Settings """ self.criterionGAN = GANLoss().cuda() self.text_encoder = MLP_encode().cuda() if torch.cuda.device_count() > 1: self.text_encoder = torch.nn.DataParallel(self.text_encoder) self.text_decoder = MLP_decode().cuda() if torch.cuda.device_count() > 1: self.text_decoder = torch.nn.DataParallel(self.text_decoder) self.encoder = UnetInception(config=config).cuda() self.decoder = Prep_pureUnet(config=config).cuda() if torch.cuda.device_count() > 1: self.encoder = torch.nn.DataParallel(self.encoder) if torch.cuda.device_count() > 1: self.decoder = torch.nn.DataParallel(self.decoder) # print(self.encoder) self.optimizer_encoder = torch.optim.Adam(self.encoder.parameters()) self.optimizer_decoder = torch.optim.Adam(self.decoder.parameters()) """ Noise Layers """ self.noise_layers = [Identity().cuda()] self.jpeg_layer_80 = DiffJPEG(256, 256, quality=80, differentiable=True).cuda() self.jpeg_layer_90 = DiffJPEG(256, 256, quality=90, differentiable=True).cuda() self.jpeg_layer_70 = DiffJPEG(256, 256, quality=70, differentiable=True).cuda() self.jpeg_layer_60 = DiffJPEG(256, 256, quality=60, differentiable=True).cuda() self.jpeg_layer_50 = DiffJPEG(256, 256, quality=50, differentiable=True).cuda() self.gaussian = Gaussian().cuda() self.gaussian_blur = GaussianBlur().cuda() self.dropout = Dropout().cuda() self.resize = Resize().cuda() self.cropout_layer = Cropout().cuda() self.crop_layer = Crop().cuda() self.noise_layers.append(self.jpeg_layer_80) self.noise_layers.append(self.jpeg_layer_90) self.noise_layers.append(self.jpeg_layer_70) self.noise_layers.append(self.jpeg_layer_60) self.noise_layers.append(self.jpeg_layer_50) self.noise_layers.append(self.gaussian) self.noise_layers.append(self.resize) self.noise_layers.append(self.dropout) self.noise_layers.append(self.gaussian_blur) self.noise_layers.append(self.cropout_layer) self.noise_layers.append(self.crop_layer) # self.discriminator = Discriminator(self.config).cuda() # if torch.cuda.device_count() > 1: # self.discriminator = torch.nn.DataParallel(self.discriminator) # self.discriminator_B = Discriminator(self.config).cuda() # if torch.cuda.device_count() > 1: # self.discriminator_B = torch.nn.DataParallel(self.discriminator_B) self.discriminator_patchHidden = NLayerDiscriminator(input_nc=3).cuda() if torch.cuda.device_count() > 1: self.discriminator_patchHidden = torch.nn.DataParallel( self.discriminator_patchHidden) # self.discriminator_patchRecovery = NLayerDiscriminator(input_nc=1).cuda() # if torch.cuda.device_count() > 1: # self.discriminator_patchRecovery = torch.nn.DataParallel(self.discriminator_patchRecovery) # self.optimizer_discrim = torch.optim.Adam(self.discriminator.parameters()) # self.optimizer_discrim_B = torch.optim.Adam(self.discriminator.parameters()) self.optimizer_discrim_patchHiddem = torch.optim.Adam( self.discriminator_patchHidden.parameters()) # self.optimizer_discrim_patchRecovery = torch.optim.Adam(self.discriminator_patchRecovery.parameters()) self.optimizer_text_encoder = torch.optim.Adam( self.text_encoder.parameters()) self.optimizer_text_decoder = torch.optim.Adam( self.text_decoder.parameters()) # self.downsample_layer = PureUpsampling(scale=64 / 256).cuda() # self.upsample_layer = PureUpsampling(scale=256 / 64).cuda() self.bce_with_logits_loss = nn.BCEWithLogitsLoss().cuda() self.mse_loss = nn.MSELoss().cuda() self.ssim_loss = pytorch_ssim.SSIM().cuda() self.vgg_loss = VGGLoss(3, 1, False).cuda() if torch.cuda.device_count() > 1: self.vgg_loss = torch.nn.DataParallel(self.vgg_loss) # Defined the labels used for training the discriminator/adversarial loss self.cover_label = 1 self.encoded_label = 0 self.roundCount = 1.0
# coding=utf-8 import datetime import os import sys import matplotlib.pyplot as plt import pandas as pd from sklearn.metrics import classification_report import numpy as np import data_helper from config import GlobalConfig from text_cnn import * cf = GlobalConfig() def print_result(x_list, y_prediction_list, y_true_list, ivocab, label_ivocab, i): writer = pd.ExcelWriter("./result/result_" + str(i) + ".xlsx") length = len(x_list) x_strs = [] for i in range(length): x = x_list[i] x_str = "" for w in x: if w != 0: x_str += ivocab[w] else: continue x_strs.append(x_str)
def __init__(self, config=GlobalConfig()): super(Gaussian, self).__init__() self.config = config
def getCurrentCluster(self): if os.path.exists(TEMP_FILE): tc = GlobalConfig(TEMP_FILE) return tc.get("TEMP", "cluster_index") else: return "1"
def getCurrentCluster(self): tc = GlobalConfig(TEMP_FILE) return tc.get("TEMP", "cluster_index")
def __init__(self): self.config = GlobalConfig() os.environ["CUDA_VISIBLE_DEVICES"] = "0,1" print("torch.distributed.is_available: "+str(torch.distributed.is_available())) print("Device Count: {0}".format(torch.cuda.device_count())) # Creates training set train_transform = transforms.Compose([ transforms.Resize(self.config.Width), transforms.RandomCrop(self.config.Width), transforms.ToTensor(), transforms.Normalize(mean=self.config.mean, std=self.config.std) ]) self.train_loader = torch.utils.data.DataLoader( datasets.ImageFolder( self.config.TRAIN_PATH, train_transform), batch_size=self.config.train_batch_size, num_workers=4, pin_memory=True, shuffle=True, drop_last=True) # Creates water set train_water_transform = transforms.Compose([ transforms.Resize(self.config.Width), transforms.RandomCrop(self.config.Width), # transforms.Grayscale(), transforms.ToTensor(), transforms.Normalize(mean=self.config.mean[0], std=self.config.std[0]) ]) self.train_water_loader = torch.utils.data.DataLoader( datasets.ImageFolder( self.config.TRAIN_PATH, train_water_transform), batch_size=self.config.train_batch_size, num_workers=4, pin_memory=True, shuffle=True, drop_last=True) # Creates test set test_transform = transforms.Compose([ transforms.Resize(self.config.Width), transforms.RandomCrop(self.config.Width), transforms.ToTensor(), transforms.Normalize(mean=self.config.mean, std=self.config.std) ]) self.test_loader = torch.utils.data.DataLoader( datasets.ImageFolder( self.config.TEST_PATH, test_transform), batch_size=1, num_workers=4, pin_memory=True, shuffle=True, drop_last=True) # Creates water test set test_water_transform = transforms.Compose([ transforms.Resize(self.config.Water_Width), transforms.RandomCrop(self.config.Water_Width), transforms.Grayscale(), transforms.ToTensor(), transforms.Normalize(mean=self.config.mean[0], std=self.config.std[0]) ]) self.test_water_loader = torch.utils.data.DataLoader( datasets.ImageFolder( self.config.TEST_PATH, test_water_transform), batch_size=1, num_workers=4, pin_memory=True, shuffle=True, drop_last=True) self.net = Net() self.train_cover, self.train_water = None, None self.test_cover, self.test_water = None, None
def __init__(self, config=GlobalConfig()): super(Prep_pureUnet, self).__init__() self.config = config # input channel: 3, output channel: 96 """Features with Kernel Size 7---->channel:64 """ self.downsample_8 = nn.Sequential( nn.Conv2d(3, 32, kernel_size=3, stride=1, dilation=1, padding=1), #1 nn.ELU(inplace=True), SingleConv(32, out_channels=32, kernel_size=3, stride=1, dilation=1, padding=1), #2 ) # 64 self.downsample_7 = nn.Sequential( PureUpsampling(scale=1 / 2), SingleConv(32, out_channels=64, kernel_size=3, stride=1, dilation=1, padding=1), #1 SingleConv(64, out_channels=64, kernel_size=3, stride=1, dilation=1, padding=1), #2 ) # 32 self.downsample_6 = nn.Sequential( PureUpsampling(scale=1 / 2), SingleConv(64, out_channels=128, kernel_size=3, stride=1, dilation=1, padding=1), #1 SingleConv(128, out_channels=128, kernel_size=3, stride=1, dilation=1, padding=1), #2 ) # 32 self.downsample_5 = nn.Sequential( PureUpsampling(scale=1 / 2), SingleConv(128, out_channels=256, kernel_size=3, stride=1, dilation=1, padding=1), #1 SingleConv(256, out_channels=256, kernel_size=3, stride=1, dilation=1, padding=1), #2 ) # 16 self.downsample_4 = nn.Sequential( PureUpsampling(scale=1 / 2), SingleConv(256, out_channels=256, kernel_size=3, stride=1, dilation=1, padding=1), #1 SingleConv(256, out_channels=256, kernel_size=3, stride=1, dilation=1, padding=1), #2 ) # 16以下的卷积用4层conv self.fullConv = nn.Sequential( SingleConv(256, out_channels=256, kernel_size=5, stride=1, dilation=1, padding=2), SingleConv(256, out_channels=256, kernel_size=5, stride=1, dilation=1, padding=2), SingleConv(256, out_channels=256, kernel_size=5, stride=1, dilation=1, padding=2), SingleConv(256, out_channels=256, kernel_size=5, stride=1, dilation=1, padding=2)) self.pureUpsamle = PureUpsampling(scale=128 / 64) self.pureUpsamle4 = PureUpsampling(scale=128 / 32) # self.downsample = PureUpsampling(scale=32/128) # 32 self.upsample4_3 = nn.Sequential( SingleConv(512, out_channels=256, kernel_size=3, stride=1, dilation=1, padding=1), #1 SingleConv(256, out_channels=128, kernel_size=3, stride=1, dilation=1, padding=1), #2 ) # 32 self.upsample3_3 = nn.Sequential( SingleConv(256, out_channels=128, kernel_size=3, stride=1, dilation=1, padding=1), #1 SingleConv(128, out_channels=64, kernel_size=3, stride=1, dilation=1, padding=1), #2 ) self.final64 = nn.Sequential( nn.Conv2d(64, 3, kernel_size=1, padding=0), nn.Tanh()) # 64 self.upsample2_3 = nn.Sequential( SingleConv(128, out_channels=64, kernel_size=3, stride=1, dilation=1, padding=1), #1 SingleConv(64, out_channels=32, kernel_size=3, stride=1, dilation=1, padding=1), #2 ) self.final128 = nn.Sequential( nn.Conv2d(32, 3, kernel_size=1, padding=0), nn.Tanh()) # 128 self.upsample1_3 = nn.Sequential( SingleConv(64, out_channels=32, kernel_size=3, stride=1, dilation=1, padding=1), #1 SingleConv(32, out_channels=32, kernel_size=3, stride=1, dilation=1, padding=1), #2 ) self.final256 = nn.Sequential( nn.Conv2d(32, 3, kernel_size=1, padding=0), nn.Tanh())
def __init__(self): self.config = GlobalConfig() os.environ["CUDA_VISIBLE_DEVICES"] = "0,1" print("torch.distributed.is_available: " + str(torch.distributed.is_available())) print("Device Count: {0}".format(torch.cuda.device_count())) # Creates training set train_transform = transforms.Compose([ transforms.Resize(self.config.Width), transforms.RandomCrop(self.config.Width), transforms.ToTensor(), transforms.Normalize(mean=self.config.mean, std=self.config.std) ]) self.train_loader = torch.utils.data.DataLoader( datasets.ImageFolder(self.config.TRAIN_PATH, train_transform), batch_size=self.config.train_batch_size, num_workers=4, pin_memory=True, shuffle=True, drop_last=True) # Creates water set train_water_transform = transforms.Compose([ transforms.Resize(self.config.Water_Width), transforms.RandomCrop(self.config.Water_Width), transforms.Grayscale(), transforms.ToTensor(), transforms.Normalize(mean=self.config.mean[0], std=self.config.std[0]) ]) self.train_water_loader = torch.utils.data.DataLoader( datasets.ImageFolder(self.config.TRAIN_PATH, train_water_transform), batch_size=self.config.train_batch_size, num_workers=4, pin_memory=True, shuffle=True, drop_last=True) # Creates test set test_transform = transforms.Compose([ transforms.Resize(self.config.Width), transforms.RandomCrop(self.config.Width), transforms.ToTensor(), transforms.Normalize(mean=self.config.mean, std=self.config.std) ]) self.test_loader = torch.utils.data.DataLoader(datasets.ImageFolder( self.config.TEST_PATH, test_transform), batch_size=1, num_workers=4, pin_memory=True, shuffle=True, drop_last=True) # Creates water test set test_water_transform = transforms.Compose([ transforms.Resize(self.config.Water_Width), transforms.RandomCrop(self.config.Water_Width), transforms.Grayscale(), transforms.ToTensor(), transforms.Normalize(mean=self.config.mean[0], std=self.config.std[0]) ]) self.test_water_loader = torch.utils.data.DataLoader( datasets.ImageFolder(self.config.TEST_PATH, test_water_transform), batch_size=1, num_workers=4, pin_memory=True, shuffle=True, drop_last=True) # self.train_dataset = MyDataset(root=self.config.TRAIN_PATH,filename=self.config.TAG_PATH,mean=self.config.mean,std=self.config.std) # self.another_dataset = MyDataset(root=self.config.TRAIN_PATH, filename=self.config.TAG_PATH, grayscale=True, size=64,mean=self.config.mean,std=self.config.std) # print(len(self.train_dataset)) # self.train_loader = data.DataLoader(dataset=self.train_dataset, batch_size=self.config.train_batch_size, # shuffle=True, num_workers=4) # self.another_loader = data.DataLoader(dataset=self.another_dataset, batch_size=self.config.train_batch_size, # shuffle=True, num_workers=4) self.net = LinJingZhiNet(config=self.config) self.train_cover, self.train_water = None, None self.test_cover, self.test_water = None, None self.pseudo = np.random.choice( [0, 1], (self.config.message_length, self.config.message_length * self.config.message_length))
def __init__(self, config=GlobalConfig()): super(GaussianBlur, self).__init__() self.config = config self.device = config.device
def __init__(self, config=GlobalConfig() ,keep_ratio_range=(0.7,1)): super(Dropout, self).__init__() self.config = config self.device = config.device self.keep_min = keep_ratio_range[0] self.keep_max = keep_ratio_range[1]
#!/usr/bin/python # coding: utf8 ######################################################################### # Author: Xinyu.Xiang(John) # email: [email protected] # Created Time: 2009年05月26日 星期二 03时01分09秒 # File Name: global.py # Description: ######################################################################### chmFile = None chmpath = None mainWindow = None currentwebview = None tabs = None encoding = None from config import GlobalConfig globalcfg = GlobalConfig()