Ejemplo n.º 1
0
def get_cat_loader(batch_size):
    transforms = transforms.Compose(
        [transforms.Resize(64), transforms.ToTensor()])
    ds64 = DatasetFromFolder("data/cats_bigger_than_64x64",
                             transform=transforms)
    ds128 = DatasetFromFolder("data/cats_bigger_than_128x128",
                              transform=transforms)
    ds_concat = ConcatDataset((ds64, ds128))
    data_loader = DataLoader(ds_concat, batch_size=batch_size, shuffle=True)
    return data_loader
Ejemplo n.º 2
0
 def __get_test_set(self):
     root_dir = self.__dir_exist(self.data_dir)
     test_dir = self.__dir_exist(join(root_dir, "test"))
     return DatasetFromFolder(test_dir,
                              colordim=self.colordim,
                              size=self.size,
                              _input_transform=self.input_transform,
                              _target_transform=self.target_transform,
                              suffix=self.suffix)
Ejemplo n.º 3
0
        init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
    elif classname.find('BatchNorm') != -1:
        # 初始化权重值
        m.weight.data.normal_(1.0, 0.02)
        m.bias.data.fill_(0)


# 主函数入口
if __name__ == '__main__':
    '''
    将放置图片的文件夹进行导入,并对图片进行裁剪表明其大小为256
    将图片信息进行导入封装成对象的形式,封装成数据集的形式
    将文件夹中的前一半(400)张图片作为秘密图片、后一半(400)张图片作为载体图片
    一共400个数据集
    '''
    dataset = DatasetFromFolder('./data', crop_size=256)
    # 然后利用torch.utils.data.DataLoader将整个数据集分成多个批次。将400数据集以每组32个进行导出运算
    dataloader = DataLoader(dataset, 1, shuffle=True, num_workers=4)
    # 生成模型实例
    hide_net = Hide()
    # 初始化权重
    '''
    apply函数将递归地搜索网络中的所有模块,并在每个模块上调用该函数。
    因此,您模型中的所有线性层都将使用这个调用进行初始化。
    '''
    hide_net.apply(init_weights)  #递归的调用weights_init函数,遍历hide_net的submodule作为参数
    # 将解析网络进行导入
    reveal_net = Reveal()
    # 初始化权重
    reveal_net.apply(init_weights)
Ejemplo n.º 4
0
parser.add_argument('--wavenet_model', '-w', help='Trained WaveNet model')
parser.add_argument('--encoder_model', '-e', help='Trained Encoder model')
parser.add_argument('--input', '-i', help='Input file name')
parser.add_argument('--output',
                    '-o',
                    default='result.wav',
                    help='Output file name')

args = parser.parse_args()

if args.use_cuda and not torch.cuda.is_available():
    raise Exception('No GPU found, please run without --cuda')
device = torch.device('cuda' if args.use_cuda else 'cpu')

dataset = DatasetFromFolder(args.input, 'file', params.sr, params.length,
                            params.frame_length, params.hop, params.n_mels,
                            'valid', None)

data_loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=1)

encoder = Encoder(params.upscale_factors,
                  params.n_wavenets * params.n_layers * params.n_loops,
                  params.r, params.n_mels)
wavenet = UniWaveNet(params.n_wavenets, params.n_layers, params.n_loops,
                     params.a, params.r, params.s)
encoder = encoder.to(device)
wavenet = wavenet.to(device)

encoder.load_state_dict(torch.load(args.encoder_model))
wavenet.load_state_dict(torch.load(args.wavenet_model))
Ejemplo n.º 5
0
parser.add_argument('--start_iteration', '-i', type=int, default=1,
                    help='Start iteraion setting for using resume')
parser.add_argument('--encoder_path', '-e', default=None,
                    help='Trained encoder path for using resum')
parser.add_argument('--wavenet_path', '-w', default=None,
                    help='Trained wavenet path for using resum')
parser.add_argument('--optimizer_path', '-o', default=None,
                    help='Optimizer state path for using resum')
args = parser.parse_args()

if args.use_cuda and not torch.cuda.is_available():
    raise Exception('No GPU found, please run without --cuda')
device = torch.device('cuda' if args.use_cuda else 'cpu')

train_dataset = DatasetFromFolder(
    params.root, params.dataset_type, params.sr, params.length,
    params.frame_length, params.hop, params.n_mels, 'train',
    params.seed)
valid_dataset = DatasetFromFolder(
    params.root, params.dataset_type, params.sr, params.length,
    params.frame_length, params.hop, params.n_mels, 'valid',
    params.seed)
train_data_loader = torch.utils.data.DataLoader(
    dataset=train_dataset,
    batch_size=params.batch_size,
    shuffle=True)
valid_data_loader = torch.utils.data.DataLoader(
    dataset=valid_dataset,
    batch_size=params.batch_size,
    shuffle=False)

encoder = Encoder(
Ejemplo n.º 6
0
def train():
    args = parser.parse_args()
    print("Number of GPUS available" + str(torch.cuda.device_count()))
    model = Net().cuda()
    #model.load_state_dict(torch.load("/home/yzm/pyproject/hw4/chkpt/guassian/model_ffinal_epoch.state"))
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=0.9,
                          weight_decay=0.0001)

    #useless
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=10,
                                                gamma=0.1)

    dataset = DatasetFromFolder()

    dataloader = DataLoader(dataset=dataset,
                            batch_size=args.batch_size,
                            shuffle=True)
    print(
        f'start with dataloader:{len(dataloader)},batchsize of {args.batch_size}'
    )
    args = parser.parse_args()
    model.train()
    iterationnum = 0
    losslist = []
    iteration = []
    for ei in range(0, args.Epochs):
        train_loss = 0

        for x, target in dataloader:
            iterationnum = iterationnum + 1
            x = x.cuda()
            target = target.cuda()
            y = model(x)
            loss = (100 - PSNR(y, target)).cuda()
            iteration.append(iterationnum)
            losslist.append(loss)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            print("{%3d}/{%3d} -------loss:{%3f}" %
                  (ei, args.Epochs - 1, loss))

            train_loss += loss.item() / len(dataloader)
        scheduler.step()
        print(f"----------------{ei}epoch's loss is{train_loss}")

        if ei % args.save_every == args.save_every - 1:
            print(f"save model model_{ei}_epoch")
            torch.save(model.state_dict(),
                       f"../chkpt/{args.exp_name}/model_{ei}_epoch.state")
    torch.save(model.state_dict(),
               f"../chkpt/{args.exp_name}/model_final_epoch.state")
    plt.figure()
    plt.plot(iteration, losslist, label='loss')
    plt.draw()
    plt.show()
    plt.savefig("/home/yzm/pyproject/hw4/experience/loss.jpg")