Esempio n. 1
0
def train_model(input_name):
    parser = get_arguments()
    parser.add_argument('--input_dir',
                        help='input image dir',
                        default='Input/Images')
    parser.add_argument('--input_name', help='input image name', required=True)
    parser.add_argument('--mode', help='task to be done', default='train')
    opt = parser.parse_args("")
    opt = functions.post_config(opt)
    Gs = []
    Zs = []
    reals = []
    NoiseAmp = []
    dir2save = functions.generate_dir2save(opt)

    if (os.path.exists(dir2save)):
        print('trained model already exist')
    else:
        try:
            os.makedirs(dir2save)
        except OSError:
            pass
        real = functions.read_image(opt)
        functions.adjust_scales2image(real, opt)
        train(opt, Gs, Zs, reals, NoiseAmp)
        SinGAN_generate(Gs, Zs, reals, NoiseAmp, opt)
Esempio n. 2
0
def main():
    # Prepare arguments
    opt = get_arguments().parse_args()
    if opt.dataset == "mnist" or opt.dataset == "cifar10":
        opt.num_classes = 10
    elif opt.dataset == "gtsrb":
        opt.num_classes = 43
    else:
        raise Exception("Invalid Dataset")
    if opt.dataset == "cifar10":
        opt.input_height = 32
        opt.input_width = 32
        opt.input_channel = 3
    elif opt.dataset == "gtsrb":
        opt.input_height = 32
        opt.input_width = 32
        opt.input_channel = 3
    elif opt.dataset == "mnist":
        opt.input_height = 28
        opt.input_width = 28
        opt.input_channel = 1
    else:
        raise Exception("Invalid Dataset")

    # Load models and masks
    if opt.dataset == "cifar10":
        netC = PreActResNet18().to(opt.device)
    elif opt.dataset == "gtsrb":
        netC = PreActResNet18(num_classes=43).to(opt.device)
    elif opt.dataset == "mnist":
        netC = NetC_MNIST().to(opt.device)
    else:
        raise Exception("Invalid dataset")

    path_model = os.path.join(
        opt.checkpoints, opt.dataset, opt.attack_mode, "{}_{}_ckpt.pth.tar".format(opt.attack_mode, opt.dataset)
    )
    state_dict = torch.load(path_model)
    print("load C")
    netC.load_state_dict(state_dict["netC"])
    netC.to(opt.device)
    netC.eval()
    netC.requires_grad_(False)
    print("load G")
    netG = Generator(opt)
    netG.load_state_dict(state_dict["netG"])
    netG.to(opt.device)
    netG.eval()
    netG.requires_grad_(False)
    print("load M")
    netM = Generator(opt, out_channels=1)
    netM.load_state_dict(state_dict["netM"])
    netM.to(opt.device)
    netM.eval()
    netM.requires_grad_(False)

    # Prepare dataloader
    test_dl = get_dataloader(opt, train=False)
    test_dl2 = get_dataloader(opt, train=False)
    eval(netC, netG, netM, test_dl, test_dl2, opt)
Esempio n. 3
0
def main():
    opt = get_arguments().parse_args()
    ds_train = torchvision.datasets.MNIST(opt.dataroot,
                                          train=True,
                                          download=True)
    ds_test = torchvision.datasets.MNIST(opt.dataroot,
                                         train=False,
                                         download=False)
    dir_train = os.path.join(opt.dataroot, 'train')
    dir_test = os.path.join(opt.dataroot, 'test')

    try:
        os.mkdir(dir_train)
        os.mkdir(dir_test)
        for i in range(10):
            os.mkdir(os.path.join(dir_train, str(i)))
    except:
        pass

    # Process train data
    with open(os.path.join(dir_train, 'annotation_train.txt'), 'w+') as f:
        for idx, (image, target) in enumerate(ds_train):
            image = np.asarray(image)
            image_path = os.path.join(dir_train, str(target),
                                      'image_{}.png'.format(idx))
            cv2.imwrite(image_path, image)
            f.write(image_path + ',' + str(target) + '\n')

    # Process test data
    with open(os.path.join(dir_test, 'annotation_test.txt'), 'w+') as f:
        for idx, (image, target) in enumerate(ds_test):
            image = np.asarray(image)
            image_path = os.path.join(dir_test, 'image_{}.png'.format(idx))
            cv2.imwrite(image_path, image)
            f.write(image_path + ',' + str(target) + '\n')
def main():
    opt = config.get_arguments().parse_args()
    if opt.dataset == "mnist" or opt.dataset == "cifar10":
        opt.num_classes = 10
    elif opt.dataset == "gtsrb":
        opt.num_classes = 43
    elif opt.dataset == "celeba":
        opt.num_classes = 8
    else:
        raise Exception("Invalid Dataset")

    if opt.dataset == "cifar10":
        opt.input_height = 32
        opt.input_width = 32
        opt.input_channel = 3
    elif opt.dataset == "gtsrb":
        opt.input_height = 32
        opt.input_width = 32
        opt.input_channel = 3
    elif opt.dataset == "mnist":
        opt.input_height = 28
        opt.input_width = 28
        opt.input_channel = 1
    else:
        raise Exception("Invalid Dataset")
    train(opt)
Esempio n. 5
0
def generate(model_name,
             anchor_image=None,
             direction=None,
             transfer=None,
             noise_solutions=None,
             factor=0.25,
             base=None,
             insert_limit=4):
    #direction = 'L, R, T, B'

    parser = get_arguments()
    parser.add_argument('--input_dir',
                        help='input image dir',
                        default='Input/Images')
    parser.add_argument('--mode',
                        help='random_samples | random_samples_arbitrary_sizes',
                        default='random_samples')
    # for random_samples:
    parser.add_argument('--gen_start_scale',
                        type=int,
                        help='generation start scale',
                        default=0)
    opt = parser.parse_args("")
    opt.input_name = model_name

    if model_name == 'islands2_basis_2.jpg':  #HARDCODED
        opt.scale_factor = 0.6

    opt = functions.post_config(opt)
    Gs = []
    Zs = []
    reals = []
    NoiseAmp = []

    real = functions.read_image(opt)
    #opt.input_name = anchor #CHANGE TO ANCHOR HERE
    anchor = functions.read_image(opt)

    functions.adjust_scales2image(real, opt)
    Gs, Zs, reals, NoiseAmp = functions.load_trained_pyramid(opt)
    in_s = functions.generate_in2coarsest(reals, 1, 1, opt)

    array = SinGAN_anchor_generate(Gs,
                                   Zs,
                                   reals,
                                   NoiseAmp,
                                   opt,
                                   gen_start_scale=opt.gen_start_scale,
                                   anchor_image=anchor_image,
                                   direction=direction,
                                   transfer=transfer,
                                   noise_solutions=noise_solutions,
                                   factor=factor,
                                   base=base,
                                   insert_limit=insert_limit)
    return array
Esempio n. 6
0
def cli():
    parser = get_arguments()
    parser.add_argument("--input_dir",
                        help="input image dir",
                        default="Input/Images")
    parser.add_argument("--input_name", help="input image name", required=True)
    parser.add_argument("--mode", help="task to be done", default="train")
    opt = parser.parse_args()
    opt = functions.post_config(opt)
    main(opt)
Esempio n. 7
0
def main():
    requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
    logger = config.get_logging()

    arguments = config.get_arguments()

    Path(arguments['json_path']).mkdir(parents=True, exist_ok=True)
    index_url = 'https://tululu.org/'
    txt_url = 'https://tululu.org/txt.php'
    json_filename = 'JSON'

    books_urls = parse_tululu_category.fetch_all_page_urls(arguments['start_page'], arguments['end_page'])

    books_json = []
    for url in books_urls:
        book_id = url.split('b')[1].replace('/', '')
        book_url = f'https://tululu.org/b{book_id}/'
        book_response = get_book_response(txt_url, book_id)

        try:
            check_for_redirect(book_response)
            book_page = parse_book.parse_book_page(book_url, index_url)
            image_link = book_page['image_link']
            img_src = parse_book.download_book_cover(image_link, book_id, arguments['dest_folder'], arguments['skip_img'])
            title = book_page['title']
            filename = f'{book_id}-{title}.txt'
            book_path = parse_book.save_book(filename, book_response, arguments['dest_folder'], arguments['skip_txt'])
            author = book_page['author']
            soup = parse_book.get_soup(book_url)
            comments = parse_book.get_comments(soup)
            genres = parse_book.get_genres(soup)
            books_json.append({'title': title,
                               'author': author,
                               'img_src': img_src,
                               'book_path': book_path,
                               'comments': comments,
                               'genres': genres})
        except requests.HTTPError:
            logger.error(f'книги id-{book_id} нет на сайте!')

    save_json(books_json, json_filename, arguments['json_path'])
Esempio n. 8
0
def main():
    opt = get_arguments().parse_args()

    # prepare model
    classifier = MNIST_classifier()

    # prepare optimizer
    optimizer = torch.optim.SGD(classifier.parameters(), opt.lr, momentum=0.9)

    # prepare scheduler
    scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
                                                     opt.scheduler_milestones,
                                                     opt.scheduler_lambda)

    # prepare dataloader
    dl_train = get_dataloader(opt, train=True)
    dl_test = get_dataloader(opt, train=False)

    # continue training ?
    create_dir(opt.checkpoint)
    path_model = os.path.join(opt.checkpoint, 'model_ckpt.pth.tar')
    if (os.path.exists(path_model)):
        print('Continue Training')
        state_dict = torch.load(path_model)
        classifier.load_state_dict(state_dict['classifier'])
        optimizer.load_state_dict(state_dict['optimizer'])
        scheduler.load_state_dict(state_dict['scheduler'])
        best_acc = state_dict['best_acc']
        epoch = state_dict['epoch']
    else:
        print('Train from scratch!!')
        best_acc = 0.
        epoch = 0

    for epoch_idx in range(opt.n_iters):
        print('Epoch {}:'.format(epoch))
        train(classifier, optimizer, scheduler, dl_train, opt)
        best_acc = evaluate(classifier, optimizer, scheduler, dl_test,
                            best_acc, epoch, opt)
        epoch += 1
Esempio n. 9
0
def test_pyramid(images):
    parser = get_arguments()
    parser.add_argument('--input_dir',
                        help='input image dir',
                        default='Input/Images')
    #parser.add_argument('--input_name', help='input image name', required=True)
    parser.add_argument('--mode', help='task to be done', default='train')
    opt = parser.parse_args("")
    opt.input_name = 'blank'
    opt = functions.post_config(opt)

    real = functions.np2torch(images[0], opt)
    functions.adjust_scales2image(real, opt)

    all_reals = []
    for image in images:
        reals = []
        real_ = functions.np2torch(image, opt)
        real = imresize(real_, opt.scale1, opt)
        reals = functions.creat_reals_pyramid(real, reals, opt)
        all_reals.append(reals)

    return np.array(all_reals).T
Esempio n. 10
0
def main():
    """ Main Training funtion. Parses inputs, inits logger, trains, and then generates some samples. """

    # torch.autograd.set_detect_anomaly(True)

    # Logger init
    logger.remove()
    logger.add(sys.stdout,
               colorize=True,
               format="<green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green> | " +
               "<level>{level}</level> | " +
               "<light-black>{file.path}:{line}</light-black> | " +
               "{message}")

    # Parse arguments
    opt = get_arguments().parse_args()
    opt = post_config(opt)

    # Init wandb
    run = wandb.init(project="mario",
                     tags=get_tags(opt),
                     config=opt,
                     dir=opt.out)
    opt.out_ = run.dir

    # Init game specific inputs
    replace_tokens = {}
    sprite_path = opt.game + '/sprites'
    if opt.game == 'mario':
        opt.ImgGen = MarioLevelGen(sprite_path)
        replace_tokens = MARIO_REPLACE_TOKENS
        downsample = special_mario_downsampling
    elif opt.game == 'mariokart':
        opt.ImgGen = MariokartLevelGen(sprite_path)
        replace_tokens = MARIOKART_REPLACE_TOKENS
        downsample = special_mariokart_downsampling
    else:
        NameError("name of --game not recognized. Supported: mario, mariokart")

    # Read level according to input arguments
    real = read_level(opt, None, replace_tokens).to(opt.device)

    # Train!
    generators, noise_maps, reals, noise_amplitudes = train(real, opt)

    # Generate Samples of same size as level
    logger.info("Finished training! Generating random samples...")
    in_s = None
    generate_samples(generators,
                     noise_maps,
                     reals,
                     noise_amplitudes,
                     opt,
                     in_s=in_s)

    # Generate samples of smaller size than level
    logger.info("Generating arbitrary sized random samples...")
    scale_v = 0.8  # Arbitrarily chosen scales
    scale_h = 0.4
    real_down = downsample(1, [[scale_v, scale_h]], real, opt.token_list)
    real_down = real_down[0]
    # necessary for correct input shape
    in_s = torch.zeros(real_down.shape, device=opt.device)
    generate_samples(generators,
                     noise_maps,
                     reals,
                     noise_amplitudes,
                     opt,
                     in_s=in_s,
                     scale_v=scale_v,
                     scale_h=scale_h,
                     save_dir="arbitrary_random_samples")
Esempio n. 11
0
from ToadVAE import LevelPatchesDataset, ToadPatchVAE, ToadLevelVAE

################################################################################
# TOAD-GAN

# Logger init
logger.remove()
logger.add(sys.stdout,
           colorize=True,
           format="<green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green> | " +
           "<level>{level}</level> | " +
           "<light-black>{file.path}:{line}</light-black> | " + "{message}")

# Parse arguments
opt = get_arguments().parse_args()
opt = post_config(opt)

# Init game specific inputs
replace_tokens = {}
sprite_path = opt.game + '/sprites'
if opt.game == 'mario':
    opt.ImgGen = MarioLevelGen(sprite_path)
    replace_tokens = MARIO_REPLACE_TOKENS
    downsample = special_mario_downsampling
elif opt.game == 'mariokart':
    opt.ImgGen = MariokartLevelGen(sprite_path)
    replace_tokens = MARIOKART_REPLACE_TOKENS
    downsample = special_mariokart_downsampling
else:
    NameError("name of --game not recognized. Supported: mario, mariokart")
Esempio n. 12
0
    writer.add_scalar('behavior 1', bc1, itr)
    writer.add_scalar("completed",
                      100 * float(sum(completed)) / float(len(end_states)),
                      itr)
    writer.add_scalar("timeouts",
                      100 * float(sum(timeouts)) / float(len(end_states)), itr)
    writer.add_scalar("killed",
                      100 * float(sum(killed)) / float(len(end_states)), itr)
    writer.add_scalar('seconds/generation', elapsed_time, itr)


if __name__ == '__main__':
    # NOTICE: The "output" dir is where the generator is located as with main.py, even though it is the "input" here

    # Parse arguments
    parse = get_arguments()
    parse.add_argument("--out_",
                       help="folder containing generator files",
                       default="output/wandb/latest-run/files/")
    parse.add_argument("--scale_v",
                       type=float,
                       help="vertical scale factor",
                       default=1.0)
    parse.add_argument("--scale_h",
                       type=float,
                       help="horizontal scale factor",
                       default=1.0)
    parse.add_argument("--gen_start_scale",
                       type=int,
                       help="scale to start generating in",
                       default=0)
Esempio n. 13
0
def main():
    opt = config.get_arguments().parse_args()

    if opt.dataset in ["mnist", "cifar10"]:
        opt.num_classes = 10
    elif opt.dataset == "gtsrb":
        opt.num_classes = 43
    elif opt.dataset == "celeba":
        opt.num_classes = 8
    else:
        raise Exception("Invalid Dataset")

    if opt.dataset == "cifar10":
        opt.input_height = 32
        opt.input_width = 32
        opt.input_channel = 3
    elif opt.dataset == "gtsrb":
        opt.input_height = 32
        opt.input_width = 32
        opt.input_channel = 3
    elif opt.dataset == "mnist":
        opt.input_height = 28
        opt.input_width = 28
        opt.input_channel = 1
    elif opt.dataset == "celeba":
        opt.input_height = 64
        opt.input_width = 64
        opt.input_channel = 3
    else:
        raise Exception("Invalid Dataset")

    # Dataset
    test_dl = get_dataloader(opt, False)

    # prepare model
    netC, optimizerC, schedulerC = get_model(opt)

    # Load pretrained model
    mode = opt.attack_mode
    opt.ckpt_folder = os.path.join(opt.checkpoints, opt.dataset)
    opt.ckpt_path = os.path.join(
        opt.ckpt_folder, "{}_{}_morph.pth.tar".format(opt.dataset, mode))
    opt.log_dir = os.path.join(opt.ckpt_folder, "log_dir")

    if os.path.exists(opt.ckpt_path):
        state_dict = torch.load(opt.ckpt_path)
        netC.load_state_dict(state_dict["netC"])
        identity_grid = state_dict["identity_grid"]
        noise_grid = state_dict["noise_grid"]
    else:
        print("Pretrained model doesnt exist")
        exit()

    eval(
        netC,
        optimizerC,
        schedulerC,
        test_dl,
        noise_grid,
        identity_grid,
        opt,
    )
Esempio n. 14
0
File: main.py Progetto: Milkigit/NAD
def main():
    # Prepare arguments
    opt = get_arguments().parse_args()
    train(opt)
Esempio n. 15
0
import torch
import random


def setup_seed(seed):
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    np.random.seed(seed)
    random.seed(seed)
    torch.backends.cudnn.deterministic = True


setup_seed(1234)

config = get_arguments()

config.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

print(config)


def train(dataset):

    train_x, train_y, train_adj, test_x, test_y, test_adj, index = dataset

    seq_len, fea_dim = np.shape(train_x)[1], np.shape(train_x)[2]

    train_x = torch.FloatTensor(train_x).to(config.device)
    train_y = torch.LongTensor(train_y).to(config.device)
    train_adj = torch.FloatTensor(train_adj).to(config.device)
Esempio n. 16
0
def test_generate(model_name,
                  anchor_image=None,
                  direction=None,
                  transfer=None,
                  noise_solutions=None,
                  factor=0.25,
                  base=None,
                  insert_limit=4):
    #direction = 'L, R, T, B'

    parser = get_arguments()
    parser.add_argument('--input_dir',
                        help='input image dir',
                        default='Input/Images')
    parser.add_argument('--mode',
                        help='random_samples | random_samples_arbitrary_sizes',
                        default='random_samples')
    # for random_samples:
    parser.add_argument('--gen_start_scale',
                        type=int,
                        help='generation start scale',
                        default=0)
    opt = parser.parse_args("")
    opt.input_name = model_name

    opt = functions.post_config(opt)
    Gs = []
    Zs = []
    reals = []
    NoiseAmp = []

    opt.input_name = 'island_basis_0.jpg'  #grabbing image that exists...
    real = functions.read_image(opt)
    #opt.input_name = anchor #CHANGE TO ANCHOR HERE
    #anchor = functions.read_image(opt)
    functions.adjust_scales2image(real, opt)

    opt.input_name = 'test1.jpg'  #grabbing model that we want
    Gs, Zs, reals, NoiseAmp = functions.load_trained_pyramid(opt)

    #dummy stuff for dimensions
    reals = []
    real_ = real
    real = imresize(real_, opt.scale1, opt)
    reals = functions.creat_reals_pyramid(real, reals, opt)
    in_s = functions.generate_in2coarsest(reals, 1, 1, opt)

    array = SinGAN_anchor_generate(Gs,
                                   Zs,
                                   reals,
                                   NoiseAmp,
                                   opt,
                                   gen_start_scale=opt.gen_start_scale,
                                   anchor_image=anchor_image,
                                   direction=direction,
                                   transfer=transfer,
                                   noise_solutions=noise_solutions,
                                   factor=factor,
                                   base=base,
                                   insert_limit=insert_limit)
    return array
def main():
    opt = config.get_arguments().parse_args()

    if opt.dataset in ["mnist", "cifar10"]:
        opt.num_classes = 10
    elif opt.dataset == "gtsrb":
        opt.num_classes = 43
    elif opt.dataset == "celeba":
        opt.num_classes = 8
    else:
        raise Exception("Invalid Dataset")

    if opt.dataset == "cifar10":
        opt.input_height = 32
        opt.input_width = 32
        opt.input_channel = 3
    elif opt.dataset == "gtsrb":
        opt.input_height = 32
        opt.input_width = 32
        opt.input_channel = 3
    elif opt.dataset == "mnist":
        opt.input_height = 28
        opt.input_width = 28
        opt.input_channel = 1
    elif opt.dataset == "celeba":
        opt.input_height = 64
        opt.input_width = 64
        opt.input_channel = 3
    else:
        raise Exception("Invalid Dataset")

    # Dataset
    train_dl = get_dataloader(opt, True)
    test_dl = get_dataloader(opt, False)

    # prepare model
    netC, optimizerC, schedulerC = get_model(opt)

    # Load pretrained model
    mode = opt.attack_mode
    opt.ckpt_folder = os.path.join(opt.checkpoints, opt.dataset)
    opt.ckpt_path = os.path.join(
        opt.ckpt_folder, "{}_{}_morph.pth.tar".format(opt.dataset, mode))
    opt.log_dir = os.path.join(opt.ckpt_folder, "log_dir")
    if not os.path.exists(opt.log_dir):
        os.makedirs(opt.log_dir)

    if opt.continue_training:
        if os.path.exists(opt.ckpt_path):
            print("Continue training!!")
            state_dict = torch.load(opt.ckpt_path)
            netC.load_state_dict(state_dict["netC"])
            optimizerC.load_state_dict(state_dict["optimizerC"])
            schedulerC.load_state_dict(state_dict["schedulerC"])
            best_clean_acc = state_dict["best_clean_acc"]
            best_bd_acc = state_dict["best_bd_acc"]
            best_cross_acc = state_dict["best_cross_acc"]
            epoch_current = state_dict["epoch_current"]
            identity_grid = state_dict["identity_grid"]
            noise_grid = state_dict["noise_grid"]
            tf_writer = SummaryWriter(log_dir=opt.log_dir)
        else:
            print("Pretrained model doesnt exist")
            exit()
    else:
        print("Train from scratch!!!")
        best_clean_acc = 0.0
        best_bd_acc = 0.0
        best_cross_acc = 0.0
        epoch_current = 0

        # Prepare grid
        ins = torch.rand(1, 2, opt.k, opt.k) * 2 - 1
        ins = ins / torch.mean(torch.abs(ins))
        noise_grid = (F.upsample(ins,
                                 size=opt.input_height,
                                 mode="bicubic",
                                 align_corners=True).permute(0, 2, 3,
                                                             1).to(opt.device))
        array1d = torch.linspace(-1, 1, steps=opt.input_height)
        x, y = torch.meshgrid(array1d, array1d)
        identity_grid = torch.stack((y, x), 2)[None, ...].to(opt.device)

        shutil.rmtree(opt.ckpt_folder, ignore_errors=True)
        os.makedirs(opt.log_dir)
        with open(os.path.join(opt.ckpt_folder, "opt.json"), "w+") as f:
            json.dump(opt.__dict__, f, indent=2)
        tf_writer = SummaryWriter(log_dir=opt.log_dir)

    for epoch in range(epoch_current, opt.n_iters):
        print("Epoch {}:".format(epoch + 1))
        train(netC, optimizerC, schedulerC, train_dl, noise_grid,
              identity_grid, tf_writer, epoch, opt)
        best_clean_acc, best_bd_acc, best_cross_acc = eval(
            netC,
            optimizerC,
            schedulerC,
            test_dl,
            noise_grid,
            identity_grid,
            best_clean_acc,
            best_bd_acc,
            best_cross_acc,
            tf_writer,
            epoch,
            opt,
        )
Esempio n. 18
0
from config import get_arguments
from SinGAN.manipulate import *
from SinGAN.training import *
import SinGAN.functions as functions

if __name__ == '__main__':
    parser = get_arguments()
    parser.add_argument('--input_dir',
                        help='input image dir',
                        default='Input/Images')
    parser.add_argument('--model_name',
                        help='input image name -1',
                        required=True)
    parser.add_argument('--mode', help='task to be done', default='train')
    opt = parser.parse_args()
    opt = functions.post_config(opt)
    Gs = []
    Zs = []
    reals = []
    NoiseAmp = []
    dir2save = functions.generate_dir2save(opt)

    if (os.path.exists(dir2save)):
        print('trained model already exist')
    else:
        try:
            os.makedirs(dir2save)
        except OSError:
            pass
        real = functions.read_images(opt)
        functions.adjust_scales2image(real, opt)
Esempio n. 19
0
def main():
    # Prepare arguments
    opt = get_arguments().parse_args()
    if (opt.dataset == 'cifar10'):
        opt.num_classes = 10
    elif (opt.dataset == 'gtsrb'):
        opt.num_classes = 43
    else:
        raise Exception("Invalid Dataset")
    if (opt.dataset == 'cifar10'):
        opt.input_height = 32
        opt.input_width = 32
        opt.input_channel = 3
    elif (opt.dataset == 'gtsrb'):
        opt.input_height = 32
        opt.input_width = 32
        opt.input_channel = 3
    else:
        raise Exception("Invalid Dataset")

    # Load models
    if (opt.dataset == 'cifar10'):
        netC = PreActResNet18().to(opt.device)
    elif (opt.dataset == 'gtsrb'):
        netC = PreActResNet18(num_classes=43).to(opt.device)
    else:
        raise Exception("Invalid dataset")

    path_model = os.path.join(
        opt.checkpoints, opt.dataset, opt.attack_mode,
        '{}_{}_ckpt.pth.tar'.format(opt.attack_mode, opt.dataset))
    state_dict = torch.load(path_model)
    print('load C')
    netC.load_state_dict(state_dict['netC'])
    netC.to(opt.device)
    netC.eval()
    netC.requires_grad_(False)
    print('load G')
    netG = Generator(opt)
    netG.load_state_dict(state_dict['netG'])
    netG.to(opt.device)
    netG.eval()
    netG.requires_grad_(False)

    netM = Generator(opt, out_channels=1)
    netM.load_state_dict(state_dict['netM'])
    netM.to(opt.device)
    netM.eval()
    netM.requires_grad_(False)

    # Prepare dataloader
    test_dl = get_dataloader(opt, train=False)

    # Forward hook for getting layer's output
    container = []

    def forward_hook(module, input, output):
        container.append(output)

    hook = netC.layer4.register_forward_hook(forward_hook)

    # Forwarding all the validation set
    print("Forwarding all the validation dataset:")
    for batch_idx, (inputs, _) in enumerate(test_dl):
        inputs = inputs.to(opt.device)
        netC(inputs)
        progress_bar(batch_idx, len(test_dl))

    # Processing to get the "more important mask"
    container = torch.cat(container, dim=0)
    activation = torch.mean(container, dim=[0, 2, 3])
    seq_sort = torch.argsort(activation)
    pruning_mask = torch.ones(seq_sort.shape[0], dtype=bool)
    hook.remove()

    # Pruning times - no-tuning after pruning a channel!!!
    acc_clean = []
    acc_bd = []
    with open(opt.outfile, 'w') as outs:
        for index in range(pruning_mask.shape[0]):
            net_pruned = copy.deepcopy(netC)
            num_pruned = index
            if (index):
                channel = seq_sort[index - 1]
                pruning_mask[channel] = False
            print("Pruned {} filters".format(num_pruned))

            net_pruned.layer4[1].conv2 = nn.Conv2d(pruning_mask.shape[0],
                                                   pruning_mask.shape[0] -
                                                   num_pruned, (3, 3),
                                                   stride=1,
                                                   padding=1,
                                                   bias=False)
            net_pruned.linear = nn.Linear(pruning_mask.shape[0] - num_pruned,
                                          10)

            #Re-assigning weight to the pruned net
            for name, module in net_pruned._modules.items():
                if ('layer4' in name):
                    module[1].conv2.weight.data = netC.layer4[
                        1].conv2.weight.data[pruning_mask]
                    module[1].ind = pruning_mask
                elif ('linear' == name):
                    module.weight.data = netC.linear.weight.data[:,
                                                                 pruning_mask]
                    module.bias.data = netC.linear.bias.data
                else:
                    continue
            net_pruned.to(opt.device)
            clean, bd = eval(net_pruned, netG, netM, test_dl, opt)
            outs.write('%d %0.4f %0.4f\n' % (index, clean, bd))
Esempio n. 20
0
def main():
    opt = config.get_arguments().parse_args()
    transforms = get_transform(opt, False)
    dataloader = get_dataloader(opt, False)
    for item in dataloader:
        images, labels = item
Esempio n. 21
0
import os
import time
from datetime import timedelta

from config import get_arguments
from train import distributed_device_train
from evaluate import single_device_evaluate

if __name__ == '__main__':
    args = get_arguments()

    tictoc = time.time()
    if args.train_flag:
        distributed_device_train(args)
    else:
        single_device_evaluate(args)
    print('%s: Process is Done During %s'%(time.ctime(), str(timedelta(seconds=(time.time() - tictoc)))))
Esempio n. 22
0
def main():
    opt = get_arguments().parse_args()
    transforms = torchvision.transforms.Compose([torchvision.transforms.ToTensor()])
    dl = get_dataloader(opt, False)
    inputs, targets = next(iter(dl))
    print(inputs.shape)
Esempio n. 23
0
def main():
    # Prepare arguments
    opt = get_arguments().parse_args()
    if (opt.dataset == 'mnist' or opt.dataset == 'cifar10'):
        opt.num_classes = 10
    elif (opt.dataset == 'gtsrb'):
        opt.num_classes = 43
    else:
        raise Exception("Invalid Dataset")
    if (opt.dataset == 'cifar10'):
        opt.input_height = 32
        opt.input_width = 32
        opt.input_channel = 3
    elif (opt.dataset == 'gtsrb'):
        opt.input_height = 32
        opt.input_width = 32
        opt.input_channel = 3
    elif (opt.dataset == 'mnist'):
        opt.input_height = 28
        opt.input_width = 28
        opt.input_channel = 1
    else:
        raise Exception("Invalid Dataset")

    # Load models and masks
    if (opt.dataset == 'cifar10'):
        netC = PreActResNet18().to(opt.device)
    elif (opt.dataset == 'gtsrb'):
        netC = PreActResNet18(num_classes=43).to(opt.device)
    elif (opt.dataset == 'mnist'):
        netC = NetC_MNIST().to(opt.device)
    else:
        raise Exception("Invalid dataset")

    path_model = os.path.join(
        opt.checkpoints, opt.dataset, opt.attack_mode,
        '{}_{}_ckpt.pth.tar'.format(opt.attack_mode, opt.dataset))
    state_dict = torch.load(path_model)
    print('load C')
    netC.load_state_dict(state_dict['netC'])
    netC.to(opt.device)
    netC.eval()
    netC.requires_grad_(False)
    print('load G')
    netG = Generator(opt)
    netG.load_state_dict(state_dict['netG'])
    netG.to(opt.device)
    netG.eval()
    netG.requires_grad_(False)

    netM = Generator(opt, out_channels=1)
    netM.load_state_dict(state_dict['netM'])
    netM.to(opt.device)
    netM.eval()
    netM.requires_grad_(False)

    # Prepare dataloader
    test_dl = get_dataloader(opt, train=False)

    print('Original')
    eval(netC, netG, netM, test_dl, opt)
    print('Smoothing')
    for k in [3, 5]:
        print('k = ', k)
        test_dl2 = get_dataloader(opt, train=False, k=k)
        eval(netC, netG, netM, test_dl2, opt)

    print('Color-depth shrinking')
    for cc in range(3):
        c = cc + 1
        print('c = ', c)
        test_dl2 = get_dataloader(opt, train=False, c=c)
        eval(netC, netG, netM, test_dl2, opt)
Esempio n. 24
0
def invert_model(test_image,
                 model_name,
                 scales2invert=None,
                 penalty=1e-3,
                 show=True):
    '''test_image is an array, model_name is a name'''
    Noise_Solutions = []

    parser = get_arguments()
    parser.add_argument('--input_dir',
                        help='input image dir',
                        default='Input/Images')

    parser.add_argument('--mode', default='RandomSamples')
    opt = parser.parse_args("")
    opt.input_name = model_name
    opt.reg = penalty

    if model_name == 'islands2_basis_2.jpg':  #HARDCODED
        opt.scale_factor = 0.6

    opt = functions.post_config(opt)

    ### Loading in Generators
    Gs, Zs, reals, NoiseAmp = functions.load_trained_pyramid(opt)
    for G in Gs:
        G = functions.reset_grads(G, False)
        G.eval()

    ### Loading in Ground Truth Test Images
    reals = []  #deleting old real images
    real = functions.np2torch(test_image, opt)
    functions.adjust_scales2image(real, opt)

    real_ = functions.np2torch(test_image, opt)
    real = imresize(real_, opt.scale1, opt)
    reals = functions.creat_reals_pyramid(real, reals, opt)

    ### General Padding
    pad_noise = int(((opt.ker_size - 1) * opt.num_layer) / 2)
    m_noise = nn.ZeroPad2d(int(pad_noise))

    pad_image = int(((opt.ker_size - 1) * opt.num_layer) / 2)
    m_image = nn.ZeroPad2d(int(pad_image))

    I_prev = None
    REC_ERROR = 0

    if scales2invert is None:
        scales2invert = opt.stop_scale + 1

    for scale in range(scales2invert):
        #for scale in range(3):

        #Get X, G
        X = reals[scale]
        G = Gs[scale]
        noise_amp = NoiseAmp[scale]

        #Defining Dimensions
        opt.nc_z = X.shape[1]
        opt.nzx = X.shape[2]
        opt.nzy = X.shape[3]

        #getting parameters for prior distribution penalty
        pdf = torch.distributions.Normal(0, 1)
        alpha = opt.reg
        #alpha = 1e-2

        #Defining Z
        if scale == 0:
            z_init = functions.generate_noise(
                [1, opt.nzx, opt.nzy], device=opt.device)  #only 1D noise
        else:
            z_init = functions.generate_noise(
                [3, opt.nzx, opt.nzy],
                device=opt.device)  #otherwise move up to 3d noise

        z_init = Variable(z_init.cuda(),
                          requires_grad=True)  #variable to optimize

        #Building I_prev
        if I_prev == None:  #first scale scenario
            in_s = torch.full(reals[0].shape, 0, device=opt.device)  #all zeros
            I_prev = in_s
            I_prev = m_image(I_prev)  #padding

        else:  #otherwise take the output from the previous scale and upsample
            I_prev = imresize(I_prev, 1 / opt.scale_factor, opt)  #upsamples
            I_prev = m_image(I_prev)
            I_prev = I_prev[:, :, 0:X.shape[2] + 10, 0:X.shape[
                3] + 10]  #making sure that precision errors don't mess anything up
            I_prev = functions.upsampling(I_prev, X.shape[2] + 10, X.shape[3] +
                                          10)  #seems to be redundant

        LR = [2e-3, 2e-2, 2e-1, 2e-1, 2e-1, 2e-1, 2e-1, 2e-1, 2e-1, 2e-1, 2e-1]
        Zoptimizer = torch.optim.RMSprop([z_init],
                                         lr=LR[scale])  #Defining Optimizer
        x_loss = []  #for plotting
        epochs = []  #for plotting

        niter = [
            200, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400,
            400, 400
        ]
        for epoch in range(niter[scale]):  #Gradient Descent on Z

            if scale == 0:
                noise_input = m_noise(z_init.expand(1, 3, opt.nzx,
                                                    opt.nzy))  #expand and padd
            else:
                noise_input = m_noise(z_init)  #padding

            z_in = noise_amp * noise_input + I_prev
            G_z = G(z_in, I_prev)

            x_recLoss = F.mse_loss(G_z, X)  #MSE loss

            logProb = pdf.log_prob(z_init).mean()  #Gaussian loss

            loss = x_recLoss - (alpha * logProb.mean())

            Zoptimizer.zero_grad()
            loss.backward()
            Zoptimizer.step()

            #losses['rec'].append(x_recLoss.data[0])
            #print('Image loss: [%d] loss: %0.5f' % (epoch, x_recLoss.item()))
            #print('Noise loss: [%d] loss: %0.5f' % (epoch, z_recLoss.item()))
            x_loss.append(loss.item())
            epochs.append(epoch)

            REC_ERROR = x_recLoss

        if show:
            plt.plot(epochs, x_loss, label='x_loss')
            plt.legend()
            plt.show()

        I_prev = G_z.detach(
        )  #take final output, maybe need to edit this line something's very very fishy

        _ = show_image(X, show, 'target')
        reconstructed_image = show_image(I_prev, show, 'output')
        _ = show_image(noise_input.detach().cpu(), show, 'noise')

        Noise_Solutions.append(noise_input.detach())
    return Noise_Solutions, reconstructed_image, REC_ERROR
Esempio n. 25
0
def main():
    # Prepare arguments
    opt = get_arguments().parse_args()

    if opt.dataset == "mnist":
        opt.input_height = 28
        opt.input_width = 28
        opt.input_channel = 1
        netC = NetC_MNIST().to(opt.device)
    else:
        raise Exception("Invalid Dataset")

    mode = opt.attack_mode
    opt.ckpt_folder = os.path.join(opt.checkpoints, opt.dataset)
    opt.ckpt_path = os.path.join(
        opt.ckpt_folder, "{}_{}_morph.pth.tar".format(opt.dataset, mode))
    opt.log_dir = os.path.join(opt.ckpt_folder, "log_dir")

    state_dict = torch.load(opt.ckpt_path)
    print("load C")
    netC.load_state_dict(state_dict["netC"])
    netC.to(opt.device)
    netC.eval()
    netC.requires_grad_(False)
    print("load grid")
    identity_grid = state_dict["identity_grid"].to(opt.device)
    noise_grid = state_dict["noise_grid"].to(opt.device)
    print(state_dict["best_clean_acc"], state_dict["best_bd_acc"])

    # Prepare dataloader
    test_dl = get_dataloader(opt, train=False)

    for name, module in netC._modules.items():
        print(name)

    # Forward hook for getting layer's output
    container = []

    def forward_hook(module, input, output):
        container.append(output)

    hook = netC.layer3.register_forward_hook(forward_hook)

    # Forwarding all the validation set
    print("Forwarding all the validation dataset:")
    for batch_idx, (inputs, _) in enumerate(test_dl):
        inputs = inputs.to(opt.device)
        netC(inputs)
        progress_bar(batch_idx, len(test_dl))

    # Processing to get the "more important mask"
    container = torch.cat(container, dim=0)
    activation = torch.mean(container, dim=[0, 2, 3])
    seq_sort = torch.argsort(activation)
    pruning_mask = torch.ones(seq_sort.shape[0], dtype=bool)
    hook.remove()

    # Pruning times - no-tuning after pruning a channel!!!
    acc_clean = []
    acc_bd = []
    with open("mnist_{}_results.txt".format(opt.attack_mode), "w") as outs:
        for index in range(pruning_mask.shape[0]):
            net_pruned = copy.deepcopy(netC)
            num_pruned = index
            if index:
                channel = seq_sort[index - 1]
                pruning_mask[channel] = False
            print("Pruned {} filters".format(num_pruned))

            net_pruned.layer3.conv1 = nn.Conv2d(pruning_mask.shape[0],
                                                pruning_mask.shape[0] -
                                                num_pruned, (3, 3),
                                                stride=2,
                                                padding=1,
                                                bias=False)
            net_pruned.linear6 = nn.Linear(
                (pruning_mask.shape[0] - num_pruned) * 16, 512)

            # Re-assigning weight to the pruned net
            for name, module in net_pruned._modules.items():
                if "layer3" in name:
                    module.conv1.weight.data = netC.layer3.conv1.weight.data[
                        pruning_mask]
                    module.ind = pruning_mask
                elif "linear6" == name:
                    module.weight.data = netC.linear6.weight.data.reshape(
                        -1, 64,
                        16)[:, pruning_mask].reshape(512,
                                                     -1)  # [:, pruning_mask]
                    module.bias.data = netC.linear6.bias.data
                else:
                    continue
            net_pruned.to(opt.device)
            clean, bd = eval(net_pruned, identity_grid, noise_grid, test_dl,
                             opt)
            outs.write("%d %0.4f %0.4f\n" % (index, clean, bd))
def main():
    # Prepare arguments
    opt = get_arguments().parse_args()
    if opt.dataset == "mnist":
        opt.num_classes = 10
    else:
        raise Exception("Invalid Dataset")
    if opt.dataset == "mnist":
        opt.input_height = 28
        opt.input_width = 28
        opt.input_channel = 1
    else:
        raise Exception("Invalid Dataset")

    # Load models
    if opt.dataset == "mnist":
        netC = NetC_MNIST().to(opt.device)
    else:
        raise Exception("Invalid dataset")

    path_model = os.path.join(
        opt.checkpoints, opt.dataset, opt.attack_mode, "{}_{}_ckpt.pth.tar".format(opt.attack_mode, opt.dataset)
    )
    state_dict = torch.load(path_model)
    netC.load_state_dict(state_dict["netC"])
    netC.to(opt.device)
    netC.eval()
    netC.requires_grad_(False)
    netG = Generator(opt)
    netG.load_state_dict(state_dict["netG"])
    netG.to(opt.device)
    netG.eval()
    netG.requires_grad_(False)

    netM = Generator(opt, out_channels=1)
    netM.load_state_dict(state_dict["netM"])
    netM.to(opt.device)
    netM.eval()
    netM.requires_grad_(False)

    # Prepare dataloader
    test_dl = get_dataloader(opt, train=False)

    # Forward hook for getting layer's output
    container = []

    def forward_hook(module, input, output):
        container.append(output)

    hook = netC.relu6.register_forward_hook(forward_hook)

    # Forwarding all the validation set
    print("Forwarding all the validation dataset:")
    for batch_idx, (inputs, _) in enumerate(test_dl):
        inputs = inputs.to(opt.device)
        netC(inputs)
        progress_bar(batch_idx, len(test_dl))

    # Processing to get the "more important mask"
    container = torch.cat(container, dim=0)
    activation = torch.mean(container, dim=[0, 2, 3])
    seq_sort = torch.argsort(activation)
    pruning_mask = torch.ones(seq_sort.shape[0], dtype=bool)
    hook.remove()

    # Pruning times - no-tuning after pruning a channel!!!
    acc_clean = []
    acc_bd = []
    with open(opt.outfile, "w") as outs:
        for index in range(pruning_mask.shape[0]):
            net_pruned = copy.deepcopy(netC)
            num_pruned = index
            if index:
                channel = seq_sort[index]
                pruning_mask[channel] = False
            print("Pruned {} filters".format(num_pruned))

            net_pruned.conv5 = nn.Conv2d(64, 64 - num_pruned, (5, 5), 1, 0)
            net_pruned.linear6 = nn.Linear(16 * (64 - num_pruned), 512)

            # Re-assigning weight to the pruned net
            for name, module in net_pruned._modules.items():
                if "conv5" in name:
                    module.weight.data = netC.conv5.weight.data[pruning_mask]
                    module.bias.data = netC.conv5.bias.data[pruning_mask]
                elif "linear6" in name:
                    module.weight.data = netC.linear6.weight.data.reshape(-1, 64, 16)[:, pruning_mask].reshape(512, -1)
                    module.bias.data = netC.linear6.bias.data
                else:
                    continue
            clean, bd = eval(net_pruned, netG, netM, test_dl, opt)
            outs.write("%d %0.4f %0.4f\n" % (index, clean, bd))