def set_data():

    opt = options.train_options()
    protocol = opt.protocol
    root = str(opt.dataroot)
    data_path_train = os.path.join(root, "training")
    data_path_valid = os.path.join(root, "validation")
    data_path_test = os.path.join(root, "testing")
    normal_class = 1
    splits = ['train', 'valid', 'test']
    drop_last_batch = {'train': True, 'valid': True, 'test': True}
    shuffle = {'train': True, 'valid': True, 'test': True}
    batch_size = {'train': opt.batch, 'valid': opt.batch, 'test': opt.batch}

    transform = transforms.Compose([
        transforms.Resize(opt.isize),
        transforms.RandomCrop(opt.cropsize),
        transforms.ToTensor(),
        transforms.Normalize((0.5, ), (0.5, ))
    ])

    dataset = {}
    dataset['train'] = torchvision.datasets.ImageFolder(root=data_path_train,
                                                        transform=transform)
    dataset['valid'] = torchvision.datasets.ImageFolder(root=data_path_valid,
                                                        transform=transform)
    dataset['test'] = torchvision.datasets.ImageFolder(root=data_path_test,
                                                       transform=transform)
    dataloader = {
        m: DataLoader(dataset=dataset[m],
                      batch_size=batch_size[m],
                      shuffle=shuffle[m],
                      num_workers=opt.workers,
                      drop_last=drop_last_batch[m])
        for m in splits
    }
    print("data loading complete")
    return dataloader
Beispiel #2
0
    inputs = inputs.cpu().data
    inputs = inputs / 2 + 0.5
    if not os.path.exists('./result'):
        os.mkdir('./result')
    if not os.path.isdir(os.path.join("result", exp, "test_real")):
        utility.mkdir_p(os.path.join("result", exp, "test_real"))
    if not os.path.isdir(os.path.join("result", exp, "test_fake")):
        utility.mkdir_p(os.path.join("result", exp, "test_fake"))
    t_fake_name = 'fake' + exp + '_' + str(epoch) + '.png'
    t_real_name = 'real' + exp + '_' + str(epoch) + '.png'
    save_image(recon, os.path.join(".", "result", exp, "test_fake",
                                   t_fake_name))
    save_image(inputs,
               os.path.join(".", "result", exp, "test_real", t_real_name))

    return top1.avg


def save_checkpoint(state, is_best, checkpoint, filename):
    filepath = os.path.join(checkpoint, filename)
    torch.save(state, filepath)
    if is_best:
        shutil.copyfile(filepath, os.path.join(checkpoint,
                                               'model_best.pth.tar'))


if __name__ == '__main__':
    opt = options.train_options()
    best_acc = 1000
    main(opt)
Beispiel #3
0
                axs[i, j].axis('off')
                cnt += 1
        fig.savefig("images/%s/%d_%d.png" %
                    (self.dataset_name, epoch, batch_i))
        plt.close()

    def save_model(self, epochs, exp_dir):

        if self.args.ctc_condition:
            self.ctc_model.save_weights(
                join(exp_dir, 'ctc_weights_{}.h5').format(epochs))
        self.d_A.save_weights(
            join(exp_dir, 'd_A_weights_{}.h5').format(epochs))
        self.d_B.save_weights(
            join(exp_dir, 'd_B_weights_{}.h5').format(epochs))
        self.g_AB.save_weights(
            join(exp_dir, 'g_AB_weights_{}.h5').format(epochs))
        self.g_BA.save_weights(
            join(exp_dir, 'g_BA_weights_{}.h5').format(epochs))
        self.combined.save_weights(
            join(exp_dir, 'combined_weights_{}.h5').format(epochs))


if __name__ == '__main__':
    args = train_options()

    if not os.path.exists(args.exp_dir):
        os.makedirs(args.exp_dir)

    gan = CycleGAN(args)
    gan.train(epochs=args.epoch, batch_size=args.batch)
Beispiel #4
0
def dataspread():

    opt = options.train_options()
    normal = str(opt.normal_class)
    normal = normal.replace(" ", "")
    normal = normal.split(",")
    dataroot = opt.dataroot
    datasource = opt.datasource
    print("Spread dataset")
    for i in normal:
        if not i in os.listdir(datasource):
            print("no class in data source")
            return 0
    abnormal = str(opt.abnormal_class)
    abnormal = abnormal.replace(" ", "")
    abnormal = abnormal.split(",")
    for i in abnormal:
        if not i in os.listdir(datasource):
            print("no class in data source")
            return 0

    if os.path.isdir(dataroot):
        shutil.rmtree(dataroot, ignore_errors=True)
    if not os.path.exists(dataroot):
        os.mkdir(dataroot)
    if not os.path.exists(os.path.join(dataroot, "training")):
        os.mkdir(os.path.join(dataroot, "training"))
    if not os.path.exists(os.path.join(dataroot, "validation")):
        os.mkdir(os.path.join(dataroot, "validation"))
    if not os.path.exists(os.path.join(dataroot, "testing")):
        os.mkdir(os.path.join(dataroot, "testing"))
    if not os.path.exists(os.path.join(dataroot, "validation", "1")):
        os.mkdir(os.path.join(dataroot, "validation", "1"))
    if not os.path.exists(os.path.join(dataroot, "training", "1")):
        os.mkdir(os.path.join(dataroot, "training", "1"))
    if not os.path.exists(os.path.join(dataroot, "testing", "1")):
        os.mkdir(os.path.join(dataroot, "testing", "1"))
    if not os.path.exists(os.path.join(dataroot, "testing", "0")):
        os.mkdir(os.path.join(dataroot, "testing", "0"))

    train_root = os.path.join(dataroot, "training")
    valid_root = os.path.join(dataroot, "validation")
    test_root = os.path.join(dataroot, "testing")
    abs_normal_list = []
    for i in normal:
        normal_list = []
        normal_list = os.listdir(os.path.join(datasource, i))
        for j in normal_list:
            abs_normal_list.append(os.path.join(datasource, i, j))
    random.shuffle(abs_normal_list)
    tv_list = abs_normal_list[:int(len(abs_normal_list) * 0.8)]
    valid_list = tv_list[:int(len(tv_list) * 0.1)]
    train_list = tv_list[int(len(tv_list) * 0.1):]
    test_list = abs_normal_list[int(len(abs_normal_list) * 0.8):]
    nrm = 0
    #train
    for i in train_list:
        file_name = str(nrm) + ".png"
        src = i
        dst = os.path.join(train_root, "1", file_name)
        nrm = nrm + 1

        shutil.copy(src, dst)
        #valid
    for i in valid_list:
        file_name = str(nrm) + ".png"
        src = i
        dst = os.path.join(valid_root, "1", file_name)
        nrm = nrm + 1

        shutil.copy(src, dst)
        #test_normal
    for i in test_list:
        file_name = str(nrm) + ".png"
        src = i
        dst = os.path.join(test_root, "1", file_name)
        nrm = nrm + 1

        shutil.copy(src, dst)
    #test_abnormal
    nrm_test = len(test_list)
    ab_nrm_each = int(nrm_test // len(abnormal))

    for i in abnormal:
        files = os.listdir(os.path.join(datasource, i))
        random.shuffle(files)
        files = files[:ab_nrm_each]
        for j in files:
            file_name = str(nrm) + ".png"
            src = os.path.join(datasource, i, j)
            dst = os.path.join(test_root, "0", file_name)
            nrm = nrm + 1
            shutil.copy(src, dst)
    print("complete data split")