Esempio n. 1
0
def test(args):

    args.seed = init_random_seed(args.manual_seed)

    #####################load datasets#####################

    kdataloader_trn, _, knownclass = get_data_loader(name=args.datasetname,
                                                     train=True,
                                                     split=args.split,
                                                     batch_size=args.batchsize,
                                                     image_size=args.imgsize)

    kdataloader_tst, ukdataloader_tst, knownclass = get_data_loader(
        name=args.datasetname,
        train=False,
        split=args.split,
        batch_size=args.batchsize,
        image_size=args.imgsize)

    nclass = len(knownclass)
    #####################Network Init#####################
    Encoderrestore = osp.join('results', args.defense, 'snapshots',
                              args.datasetname + '-' + args.split,
                              args.denoisemean, args.adv + str(args.adv_iter),
                              'Encoder-' + args.defensesnapshot + '.pt')
    Encoder = init_model(net=DenoiseResnet.ResnetEncoder(
        denoisemean=args.denoisemean,
        latent_size=args.latent_size,
        denoise=args.denoise),
                         init_type=args.init_type,
                         restore=Encoderrestore,
                         parallel_reload=args.parallel_train)

    NorClsfierrestore = osp.join('results', args.defense, 'snapshots',
                                 args.datasetname + '-' + args.split,
                                 args.denoisemean,
                                 args.adv + str(args.adv_iter),
                                 'NorClsfier-' + args.defensesnapshot + '.pt')
    NorClsfier = init_model(net=DenoiseResnet.NorClassifier(
        latent_size=args.latent_size, num_classes=nclass),
                            init_type=args.init_type,
                            restore=NorClsfierrestore,
                            parallel_reload=args.parallel_train)

    openmax(args, kdataloader_trn, kdataloader_tst, ukdataloader_tst,
            knownclass, Encoder, NorClsfier)
Esempio n. 2
0
def main(args):

    if args.training_type is 'Train':
        savefilename = osp.join(args.dataset1 + args.dataset2 + args.dataset3 +
                                '1')
    elif args.training_type is 'Test':
        savefilename = osp.join(
            args.tstfile,
            args.tstdataset + 'to' + args.dataset_target + args.snapshotnum)

    args.seed = init_random_seed(args.manual_seed)

    if args.training_type in ['Train', 'Test']:
        summary_writer = SummaryWriter(
            osp.join(args.results_path, 'log', savefilename))
        saver = Saver(args, savefilename)
        saver.print_config()

    ##################### load seed#####################

    #####################load datasets#####################

    if args.training_type is 'Train':

        data_loader1_real = get_dataset_loader(name=args.dataset1,
                                               getreal=True,
                                               batch_size=args.batchsize)
        data_loader1_fake = get_dataset_loader(name=args.dataset1,
                                               getreal=False,
                                               batch_size=args.batchsize)

        data_loader2_real = get_dataset_loader(name=args.dataset2,
                                               getreal=True,
                                               batch_size=args.batchsize)
        data_loader2_fake = get_dataset_loader(name=args.dataset2,
                                               getreal=False,
                                               batch_size=args.batchsize)

        data_loader3_real = get_dataset_loader(name=args.dataset3,
                                               getreal=True,
                                               batch_size=args.batchsize)
        data_loader3_fake = get_dataset_loader(name=args.dataset3,
                                               getreal=False,
                                               batch_size=args.batchsize)

        data_loader_target = get_tgtdataset_loader(name=args.dataset_target,
                                                   batch_size=args.batchsize)

    elif args.training_type is 'Test':

        data_loader_target = get_tgtdataset_loader(name=args.dataset_target,
                                                   batch_size=args.batchsize)

    ##################### load models#####################

    FeatExtmodel = models.create(args.arch_FeatExt)
    DepthEstmodel = models.create(args.arch_DepthEst)
    FeatEmbdmodel = models.create(args.arch_FeatEmbd,
                                  momentum=args.bn_momentum)

    if args.training_type is 'Train':

        FeatExt_restore = None
        DepthEst_restore = None
        FeatEmbd_restore = None

    elif args.training_type is 'Test':
        FeatExt_restore = osp.join('results', args.tstfile, 'snapshots',
                                   args.tstdataset,
                                   'FeatExtor-' + args.snapshotnum + '.pt')
        FeatEmbd_restore = osp.join('results', args.tstfile, 'snapshots',
                                    args.tstdataset,
                                    'FeatEmbder-' + args.snapshotnum + '.pt')
        DepthEst_restore = None

    else:
        raise NotImplementedError('method type [%s] is not implemented' %
                                  args.training_type)

    FeatExtor = init_model(net=FeatExtmodel,
                           init_type=args.init_type,
                           restore=FeatExt_restore,
                           parallel_reload=True)
    DepthEstor = init_model(net=DepthEstmodel,
                            init_type=args.init_type,
                            restore=DepthEst_restore,
                            parallel_reload=True)
    FeatEmbder = init_model(net=FeatEmbdmodel,
                            init_type=args.init_type,
                            restore=FeatEmbd_restore,
                            parallel_reload=False)

    print(">>> FeatExtor <<<")
    print(FeatExtor)
    print(">>> DepthEstor <<<")
    print(DepthEstor)
    print(">>> FeatEmbder <<<")
    print(FeatEmbder)
    ##################### tarining models#####################

    if args.training_type == 'Train':

        Train(args, FeatExtor, DepthEstor, FeatEmbder, data_loader1_real,
              data_loader1_fake, data_loader2_real, data_loader2_fake,
              data_loader3_real, data_loader3_fake, data_loader_target,
              summary_writer, saver, savefilename)

    elif args.training_type in ['Test']:

        Test(args, FeatExtor, FeatEmbder, data_loader_target, savefilename)

    else:
        raise NotImplementedError('method type [%s] is not implemented' %
                                  args.training_type)
Esempio n. 3
0
def main(args):

    if args.training_type is 'Train':
        savefilename = osp.join(args.dataset1 + args.dataset2 + args.dataset3 +
                                '1')
    elif args.training_type is 'Pre_train':
        savefilename = osp.join(args.dataset_target + '')
    elif args.training_type is 'Test':
        savefilename = osp.join(args.tstfile,
                                args.tstdataset + args.snapshotnum)

    args.seed = init_random_seed(args.manual_seed)

    if args.training_type in ['Train', 'Pre_train', 'Test']:
        summary_writer = SummaryWriter(
            osp.join(args.results_path, 'log', savefilename))
        saver = Saver(args, savefilename)
        saver.print_config()

    ##################### load seed#####################

    #####################load datasets#####################

    if args.training_type is 'Train':

        data_loader1_real = get_dataset_loader(name=args.dataset1,
                                               getreal=True,
                                               batch_size=args.batchsize)
        data_loader1_fake = get_dataset_loader(name=args.dataset1,
                                               getreal=False,
                                               batch_size=args.batchsize)

        data_loader2_real = get_dataset_loader(name=args.dataset2,
                                               getreal=True,
                                               batch_size=args.batchsize)
        data_loader2_fake = get_dataset_loader(name=args.dataset2,
                                               getreal=False,
                                               batch_size=args.batchsize)

        data_loader3_real = get_dataset_loader(name=args.dataset3,
                                               getreal=True,
                                               batch_size=args.batchsize)
        data_loader3_fake = get_dataset_loader(name=args.dataset3,
                                               getreal=False,
                                               batch_size=args.batchsize)

        data_loader_target = get_tgtdataset_loader(name=args.dataset_target,
                                                   batch_size=args.batchsize)

    elif args.training_type is 'Test':

        data_loader_target = get_tgtdataset_loader(name=args.dataset_target,
                                                   batch_size=args.batchsize)

    elif args.training_type is 'Pre_train':
        data_loader_real = get_dataset_loader(name=args.dataset_target,
                                              getreal=True,
                                              batch_size=args.batchsize)
        data_loader_fake = get_dataset_loader(name=args.dataset_target,
                                              getreal=False,
                                              batch_size=args.batchsize)

    ##################### load models#####################

    FeatExtmodel = models.create(args.arch_FeatExt)
    FeatExtmodel_pre1 = models.create(args.arch_FeatExt)
    FeatExtmodel_pre2 = models.create(args.arch_FeatExt)
    FeatExtmodel_pre3 = models.create(args.arch_FeatExt)

    FeatEmbdmodel = models.create(args.arch_FeatEmbd,
                                  embed_size=args.embed_size)
    DepthEstmodel = models.create(args.arch_DepthEst)

    Dismodel1 = models.create(args.arch_Dis1)
    Dismodel2 = models.create(args.arch_Dis2)
    Dismodel3 = models.create(args.arch_Dis3)

    if args.training_type is 'Train':

        FeatExtS1_restore = osp.join('results', 'Pre_train', 'snapshots',
                                     args.dataset1, 'DGFA-Ext-final.pt')
        FeatExtS2_restore = osp.join('results', 'Pre_train', 'snapshots',
                                     args.dataset2, 'DGFA-Ext-final.pt')
        FeatExtS3_restore = osp.join('results', 'Pre_train', 'snapshots',
                                     args.dataset3, 'DGFA-Ext-final.pt')

        FeatExtorS1 = init_model(net=FeatExtmodel_pre1,
                                 init_type=args.init_type,
                                 restore=FeatExtS1_restore)
        FeatExtorS2 = init_model(net=FeatExtmodel_pre2,
                                 init_type=args.init_type,
                                 restore=FeatExtS2_restore)
        FeatExtorS3 = init_model(net=FeatExtmodel_pre3,
                                 init_type=args.init_type,
                                 restore=FeatExtS3_restore)

        Dis_restore1 = None
        Dis_restore2 = None
        Dis_restore3 = None

        FeatExt_restore = None
        DepthEst_restore = None
        FeatEmbd_restore = None

        FeatEmbder = init_model(net=FeatEmbdmodel,
                                init_type=args.init_type,
                                restore=FeatEmbd_restore)

    elif args.training_type is 'Pre_train':
        FeatExt_restore = None
        DepthEst_restore = None

        Dis_restore1 = None
        Dis_restore2 = None
        Dis_restore3 = None

    elif args.training_type is 'Test':
        FeatExt_restore = osp.join('results', args.tstfile, 'snapshots',
                                   args.tstdataset,
                                   'DGFA-Ext-' + args.snapshotnum + '.pt')
        DepthEst_restore = osp.join('results', args.tstfile, 'snapshots',
                                    args.tstdataset,
                                    'DGFA-Depth-' + args.snapshotnum + '.pt')
        FeatEmbd_restore = osp.join('results', args.tstfile, 'snapshots',
                                    args.tstdataset,
                                    'DGFA-Embd-' + args.snapshotnum + '.pt')
        FeatEmbder = init_model(net=FeatEmbdmodel,
                                init_type=args.init_type,
                                restore=FeatEmbd_restore)

        Dis_restore1 = None
        Dis_restore2 = None
        Dis_restore3 = None

    else:
        raise NotImplementedError('method type [%s] is not implemented' %
                                  args.training_type)

    FeatExtor = init_model(net=FeatExtmodel,
                           init_type=args.init_type,
                           restore=FeatExt_restore)
    DepthEstor = init_model(net=DepthEstmodel,
                            init_type=args.init_type,
                            restore=DepthEst_restore)

    Discriminator1 = init_model(net=Dismodel1,
                                init_type=args.init_type,
                                restore=Dis_restore1)
    Discriminator2 = init_model(net=Dismodel2,
                                init_type=args.init_type,
                                restore=Dis_restore2)
    Discriminator3 = init_model(net=Dismodel3,
                                init_type=args.init_type,
                                restore=Dis_restore3)

    print(">>> FeatExtor <<<")
    print(FeatExtor)

    print(">>> FeatEmbder <<<")
    print(FeatEmbder)

    print(">>> DepthEstor <<<")
    print(DepthEstor)

    print(">>> Discriminator <<<")
    print(Discriminator1)

    ##################### tarining models#####################

    if args.training_type is 'Train':

        Train(args, FeatExtor, DepthEstor, FeatEmbder, Discriminator1,
              Discriminator2, Discriminator3, FeatExtorS1, FeatExtorS2,
              FeatExtorS3, data_loader1_real, data_loader1_fake,
              data_loader2_real, data_loader2_fake, data_loader3_real,
              data_loader3_fake, data_loader_target, summary_writer, saver,
              savefilename)

    elif args.training_type is 'Test':

        Test(args, FeatExtor, DepthEstor, FeatEmbder, data_loader_target,
             savefilename)

    elif args.training_type is 'Pre_train':

        Pre_train(args, FeatExtor, DepthEstor, data_loader_real,
                  data_loader_fake, summary_writer, saver, savefilename)
    else:
        raise NotImplementedError('method type [%s] is not implemented' %
                                  args.training_type)
Esempio n. 4
0
"""Main script for ADDA."""
import os
from misc import config as cfg
from core import train_src, train_src_rec, train_tgt
from misc.evaluate import eval_func
from models import Discriminator_feat, Discriminator_img, SythnetClassifier, SythnetEncoder, SythnetGenerator
from misc.utils import get_data_loader, init_model, init_random_seed, mkdirs
from misc.saver import Saver
from logger import Logger

from pdb import set_trace as st

if __name__ == '__main__':
    # init random seed
    init_random_seed(cfg.manual_seed)
    Saver = Saver()
    Saver.print_config()

    logs_path = os.path.join(cfg.model_root, cfg.name, 'logs')
    mkdirs(logs_path)
    logger = Logger(logs_path)

    # load dataset SM
    src_data_loader = get_data_loader(cfg.src_dataset)
    src_data_loader_eval = get_data_loader(cfg.src_dataset, train=False)
    tgt_data_loader = get_data_loader(cfg.tgt_dataset)
    tgt_data_loader_eval = get_data_loader(cfg.tgt_dataset, train=False)
    # load dataset UM MU
    # src_data_loader = get_data_loader(cfg.src_dataset, sample = True)
    # src_data_loader_eval = get_data_loader(cfg.src_dataset, train=False)
    # tgt_data_loader = get_data_loader(cfg.tgt_dataset, sample = True)
Esempio n. 5
0
def train_Ours(args, train_loader, val_loader, knownclass, Encoder, Decoder,
               NorClsfier, SSDClsfier, summary_writer, saver):
    seed = init_random_seed(args.manual_seed)

    criterionCls = nn.CrossEntropyLoss()
    criterionRec = nn.MSELoss()

    if args.parallel_train:
        Encoder = DataParallel(Encoder)
        Decoder = DataParallel(Decoder)
        NorClsfier = DataParallel(NorClsfier)
        SSDClsfier = DataParallel(SSDClsfier)

    optimizer = optim.Adam(
        list(Encoder.parameters()) + list(NorClsfier.parameters()) +
        list(SSDClsfier.parameters()) + list(Decoder.parameters()),
        lr=args.lr)

    if args.adv is 'PGDattack':
        print("**********Defense PGD Attack**********")
    elif args.adv is 'FGSMattack':
        print("**********Defense FGSM Attack**********")

    if args.adv is 'PGDattack':
        from advertorch.attacks import PGDAttack
        nor_adversary = PGDAttack(predict1=Encoder,
                                  predict2=NorClsfier,
                                  nb_iter=args.adv_iter)
        rot_adversary = PGDAttack(predict1=Encoder,
                                  predict2=SSDClsfier,
                                  nb_iter=args.adv_iter)

    elif args.adv is 'FGSMattack':
        from advertorch.attacks import GradientSignAttack
        nor_adversary = GradientSignAttack(predict1=Encoder,
                                           predict2=NorClsfier)
        rot_adversary = GradientSignAttack(predict1=Encoder,
                                           predict2=SSDClsfier)

    global_step = 0
    # ----------
    #  Training
    # ----------
    for epoch in range(args.n_epoch):

        Encoder.train()
        Decoder.train()
        NorClsfier.train()
        SSDClsfier.train()

        for steps, (orig, label, rot_orig,
                    rot_label) in enumerate(train_loader):

            label = lab_conv(knownclass, label)
            orig, label = orig.cuda(), label.long().cuda()

            rot_orig, rot_label = rot_orig.cuda(), rot_label.long().cuda()

            with ctx_noparamgrad_and_eval(Encoder):
                with ctx_noparamgrad_and_eval(NorClsfier):
                    with ctx_noparamgrad_and_eval(SSDClsfier):
                        adv = nor_adversary.perturb(orig, label)
                        rot_adv = rot_adversary.perturb(rot_orig, rot_label)

            latent_feat = Encoder(adv)
            norpred = NorClsfier(latent_feat)
            norlossCls = criterionCls(norpred, label)

            recon = Decoder(latent_feat)
            lossRec = criterionRec(recon, orig)

            ssdpred = SSDClsfier(Encoder(rot_adv))
            rotlossCls = criterionCls(ssdpred, rot_label)

            loss = args.norClsWgt * norlossCls + args.rotClsWgt * rotlossCls + args.RecWgt * lossRec

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            #============ tensorboard the log info ============#
            lossinfo = {
                'loss': loss.item(),
                'norlossCls': norlossCls.item(),
                'lossRec': lossRec.item(),
                'rotlossCls': rotlossCls.item(),
            }

            global_step += 1

            #============ print the log info ============#
            if (steps + 1) % args.log_step == 0:
                errors = OrderedDict([
                    ('loss', loss.item()),
                    ('norlossCls', norlossCls.item()),
                    ('lossRec', lossRec.item()),
                    ('rotlossCls', rotlossCls.item()),
                ])

                saver.print_current_errors((epoch + 1), (steps + 1), errors)

        # evaluate performance on validation set periodically
        if ((epoch + 1) % args.val_epoch == 0):

            # switch model to evaluation mode
            Encoder.eval()
            NorClsfier.eval()

            running_corrects = 0.0
            epoch_size = 0.0
            val_loss_list = []

            # calculate accuracy on validation set
            for steps, (images, label) in enumerate(val_loader):

                label = lab_conv(knownclass, label)
                images, label = images.cuda(), label.long().cuda()

                adv = nor_adversary.perturb(images, label)

                with torch.no_grad():
                    logits = NorClsfier(Encoder(adv))
                    _, preds = torch.max(logits, 1)
                    running_corrects += torch.sum(preds == label.data)
                    epoch_size += images.size(0)

                    val_loss = criterionCls(logits, label)

                    val_loss_list.append(val_loss.item())

            val_loss_mean = sum(val_loss_list) / len(val_loss_list)

            val_acc = running_corrects.double() / epoch_size
            print('Val Acc: {:.4f}, Val Loss: {:.4f}'.format(
                val_acc, val_loss_mean))

            valinfo = {
                'Val Acc': val_acc.item(),
                'Val Loss': val_loss.item(),
            }
            for tag, value in valinfo.items():
                summary_writer.add_scalar(tag, value, (epoch + 1))

            orig_show = vutils.make_grid(orig, normalize=True, scale_each=True)
            recon_show = vutils.make_grid(recon,
                                          normalize=True,
                                          scale_each=True)

            summary_writer.add_image('Ori_Image', orig_show, (epoch + 1))
            summary_writer.add_image('Rec_Image', recon_show, (epoch + 1))

        if ((epoch + 1) % args.model_save_epoch == 0):
            model_save_path = os.path.join(args.results_path,
                                           args.training_type, 'snapshots',
                                           args.datasetname + '-' + args.split,
                                           args.denoisemean,
                                           args.adv + str(args.adv_iter))
            mkdir(model_save_path)
            torch.save(
                Encoder.state_dict(),
                os.path.join(model_save_path,
                             "Encoder-{}.pt".format(epoch + 1)))
            torch.save(
                NorClsfier.state_dict(),
                os.path.join(model_save_path,
                             "NorClsfier-{}.pt".format(epoch + 1)))
            torch.save(
                Decoder.state_dict(),
                os.path.join(model_save_path,
                             "Decoder-{}.pt".format(epoch + 1)))

    torch.save(Encoder.state_dict(),
               os.path.join(model_save_path, "Encoder-final.pt"))
    torch.save(NorClsfier.state_dict(),
               os.path.join(model_save_path, "NorClsfier-final.pt"))
    torch.save(Decoder.state_dict(),
               os.path.join(model_save_path, "Decoder-final.pt"))
Esempio n. 6
0
"""Main script for ARDA."""

from core import test, train
from misc import params
from misc.utils import get_data_loader, init_model, init_random_seed
from models import Classifier, Discriminator, Generator

if __name__ == '__main__':
    # init random seed
    init_random_seed(params.manual_seed)

    # load dataset
    src_data_loader = get_data_loader(params.src_dataset)
    src_data_loader_test = get_data_loader(params.src_dataset, train=False)
    tgt_data_loader = get_data_loader(params.tgt_dataset)
    tgt_data_loader_test = get_data_loader(params.tgt_dataset, train=False)

    # init models
    classifier = init_model(net=Classifier(), restore=params.c_model_restore)
    generator = init_model(net=Generator(), restore=params.g_model_restore)
    critic = init_model(net=Discriminator(input_dims=params.d_input_dims,
                                          hidden_dims=params.d_hidden_dims,
                                          output_dims=params.d_output_dims),
                        restore=params.d_model_restore)

    # train models
    print("=== Training models ===")
    print(">>> Classifier <<<")
    print(classifier)
    print(">>> Generator <<<")
    print(generator)