Esempio n. 1
0
def test(args):

    args.seed = init_random_seed(args.manual_seed)

    #####################load datasets#####################

    kdataloader_trn, _, knownclass = get_data_loader(name=args.datasetname,
                                                     train=True,
                                                     split=args.split,
                                                     batch_size=args.batchsize,
                                                     image_size=args.imgsize)

    kdataloader_tst, ukdataloader_tst, knownclass = get_data_loader(
        name=args.datasetname,
        train=False,
        split=args.split,
        batch_size=args.batchsize,
        image_size=args.imgsize)

    nclass = len(knownclass)
    #####################Network Init#####################
    Encoderrestore = osp.join('results', args.defense, 'snapshots',
                              args.datasetname + '-' + args.split,
                              args.denoisemean, args.adv + str(args.adv_iter),
                              'Encoder-' + args.defensesnapshot + '.pt')
    Encoder = init_model(net=DenoiseResnet.ResnetEncoder(
        denoisemean=args.denoisemean,
        latent_size=args.latent_size,
        denoise=args.denoise),
                         init_type=args.init_type,
                         restore=Encoderrestore,
                         parallel_reload=args.parallel_train)

    NorClsfierrestore = osp.join('results', args.defense, 'snapshots',
                                 args.datasetname + '-' + args.split,
                                 args.denoisemean,
                                 args.adv + str(args.adv_iter),
                                 'NorClsfier-' + args.defensesnapshot + '.pt')
    NorClsfier = init_model(net=DenoiseResnet.NorClassifier(
        latent_size=args.latent_size, num_classes=nclass),
                            init_type=args.init_type,
                            restore=NorClsfierrestore,
                            parallel_reload=args.parallel_train)

    openmax(args, kdataloader_trn, kdataloader_tst, ukdataloader_tst,
            knownclass, Encoder, NorClsfier)
Esempio n. 2
0
def main(args):

    if args.training_type is 'Train':
        savefilename = osp.join(args.dataset1 + args.dataset2 + args.dataset3 +
                                '1')
    elif args.training_type is 'Test':
        savefilename = osp.join(
            args.tstfile,
            args.tstdataset + 'to' + args.dataset_target + args.snapshotnum)

    args.seed = init_random_seed(args.manual_seed)

    if args.training_type in ['Train', 'Test']:
        summary_writer = SummaryWriter(
            osp.join(args.results_path, 'log', savefilename))
        saver = Saver(args, savefilename)
        saver.print_config()

    ##################### load seed#####################

    #####################load datasets#####################

    if args.training_type is 'Train':

        data_loader1_real = get_dataset_loader(name=args.dataset1,
                                               getreal=True,
                                               batch_size=args.batchsize)
        data_loader1_fake = get_dataset_loader(name=args.dataset1,
                                               getreal=False,
                                               batch_size=args.batchsize)

        data_loader2_real = get_dataset_loader(name=args.dataset2,
                                               getreal=True,
                                               batch_size=args.batchsize)
        data_loader2_fake = get_dataset_loader(name=args.dataset2,
                                               getreal=False,
                                               batch_size=args.batchsize)

        data_loader3_real = get_dataset_loader(name=args.dataset3,
                                               getreal=True,
                                               batch_size=args.batchsize)
        data_loader3_fake = get_dataset_loader(name=args.dataset3,
                                               getreal=False,
                                               batch_size=args.batchsize)

        data_loader_target = get_tgtdataset_loader(name=args.dataset_target,
                                                   batch_size=args.batchsize)

    elif args.training_type is 'Test':

        data_loader_target = get_tgtdataset_loader(name=args.dataset_target,
                                                   batch_size=args.batchsize)

    ##################### load models#####################

    FeatExtmodel = models.create(args.arch_FeatExt)
    DepthEstmodel = models.create(args.arch_DepthEst)
    FeatEmbdmodel = models.create(args.arch_FeatEmbd,
                                  momentum=args.bn_momentum)

    if args.training_type is 'Train':

        FeatExt_restore = None
        DepthEst_restore = None
        FeatEmbd_restore = None

    elif args.training_type is 'Test':
        FeatExt_restore = osp.join('results', args.tstfile, 'snapshots',
                                   args.tstdataset,
                                   'FeatExtor-' + args.snapshotnum + '.pt')
        FeatEmbd_restore = osp.join('results', args.tstfile, 'snapshots',
                                    args.tstdataset,
                                    'FeatEmbder-' + args.snapshotnum + '.pt')
        DepthEst_restore = None

    else:
        raise NotImplementedError('method type [%s] is not implemented' %
                                  args.training_type)

    FeatExtor = init_model(net=FeatExtmodel,
                           init_type=args.init_type,
                           restore=FeatExt_restore,
                           parallel_reload=True)
    DepthEstor = init_model(net=DepthEstmodel,
                            init_type=args.init_type,
                            restore=DepthEst_restore,
                            parallel_reload=True)
    FeatEmbder = init_model(net=FeatEmbdmodel,
                            init_type=args.init_type,
                            restore=FeatEmbd_restore,
                            parallel_reload=False)

    print(">>> FeatExtor <<<")
    print(FeatExtor)
    print(">>> DepthEstor <<<")
    print(DepthEstor)
    print(">>> FeatEmbder <<<")
    print(FeatEmbder)
    ##################### tarining models#####################

    if args.training_type == 'Train':

        Train(args, FeatExtor, DepthEstor, FeatEmbder, data_loader1_real,
              data_loader1_fake, data_loader2_real, data_loader2_fake,
              data_loader3_real, data_loader3_fake, data_loader_target,
              summary_writer, saver, savefilename)

    elif args.training_type in ['Test']:

        Test(args, FeatExtor, FeatEmbder, data_loader_target, savefilename)

    else:
        raise NotImplementedError('method type [%s] is not implemented' %
                                  args.training_type)
Esempio n. 3
0
def main(args):

    if args.training_type is 'Train':
        savefilename = osp.join(args.dataset1 + args.dataset2 + args.dataset3 +
                                '1')
    elif args.training_type is 'Pre_train':
        savefilename = osp.join(args.dataset_target + '')
    elif args.training_type is 'Test':
        savefilename = osp.join(args.tstfile,
                                args.tstdataset + args.snapshotnum)

    args.seed = init_random_seed(args.manual_seed)

    if args.training_type in ['Train', 'Pre_train', 'Test']:
        summary_writer = SummaryWriter(
            osp.join(args.results_path, 'log', savefilename))
        saver = Saver(args, savefilename)
        saver.print_config()

    ##################### load seed#####################

    #####################load datasets#####################

    if args.training_type is 'Train':

        data_loader1_real = get_dataset_loader(name=args.dataset1,
                                               getreal=True,
                                               batch_size=args.batchsize)
        data_loader1_fake = get_dataset_loader(name=args.dataset1,
                                               getreal=False,
                                               batch_size=args.batchsize)

        data_loader2_real = get_dataset_loader(name=args.dataset2,
                                               getreal=True,
                                               batch_size=args.batchsize)
        data_loader2_fake = get_dataset_loader(name=args.dataset2,
                                               getreal=False,
                                               batch_size=args.batchsize)

        data_loader3_real = get_dataset_loader(name=args.dataset3,
                                               getreal=True,
                                               batch_size=args.batchsize)
        data_loader3_fake = get_dataset_loader(name=args.dataset3,
                                               getreal=False,
                                               batch_size=args.batchsize)

        data_loader_target = get_tgtdataset_loader(name=args.dataset_target,
                                                   batch_size=args.batchsize)

    elif args.training_type is 'Test':

        data_loader_target = get_tgtdataset_loader(name=args.dataset_target,
                                                   batch_size=args.batchsize)

    elif args.training_type is 'Pre_train':
        data_loader_real = get_dataset_loader(name=args.dataset_target,
                                              getreal=True,
                                              batch_size=args.batchsize)
        data_loader_fake = get_dataset_loader(name=args.dataset_target,
                                              getreal=False,
                                              batch_size=args.batchsize)

    ##################### load models#####################

    FeatExtmodel = models.create(args.arch_FeatExt)
    FeatExtmodel_pre1 = models.create(args.arch_FeatExt)
    FeatExtmodel_pre2 = models.create(args.arch_FeatExt)
    FeatExtmodel_pre3 = models.create(args.arch_FeatExt)

    FeatEmbdmodel = models.create(args.arch_FeatEmbd,
                                  embed_size=args.embed_size)
    DepthEstmodel = models.create(args.arch_DepthEst)

    Dismodel1 = models.create(args.arch_Dis1)
    Dismodel2 = models.create(args.arch_Dis2)
    Dismodel3 = models.create(args.arch_Dis3)

    if args.training_type is 'Train':

        FeatExtS1_restore = osp.join('results', 'Pre_train', 'snapshots',
                                     args.dataset1, 'DGFA-Ext-final.pt')
        FeatExtS2_restore = osp.join('results', 'Pre_train', 'snapshots',
                                     args.dataset2, 'DGFA-Ext-final.pt')
        FeatExtS3_restore = osp.join('results', 'Pre_train', 'snapshots',
                                     args.dataset3, 'DGFA-Ext-final.pt')

        FeatExtorS1 = init_model(net=FeatExtmodel_pre1,
                                 init_type=args.init_type,
                                 restore=FeatExtS1_restore)
        FeatExtorS2 = init_model(net=FeatExtmodel_pre2,
                                 init_type=args.init_type,
                                 restore=FeatExtS2_restore)
        FeatExtorS3 = init_model(net=FeatExtmodel_pre3,
                                 init_type=args.init_type,
                                 restore=FeatExtS3_restore)

        Dis_restore1 = None
        Dis_restore2 = None
        Dis_restore3 = None

        FeatExt_restore = None
        DepthEst_restore = None
        FeatEmbd_restore = None

        FeatEmbder = init_model(net=FeatEmbdmodel,
                                init_type=args.init_type,
                                restore=FeatEmbd_restore)

    elif args.training_type is 'Pre_train':
        FeatExt_restore = None
        DepthEst_restore = None

        Dis_restore1 = None
        Dis_restore2 = None
        Dis_restore3 = None

    elif args.training_type is 'Test':
        FeatExt_restore = osp.join('results', args.tstfile, 'snapshots',
                                   args.tstdataset,
                                   'DGFA-Ext-' + args.snapshotnum + '.pt')
        DepthEst_restore = osp.join('results', args.tstfile, 'snapshots',
                                    args.tstdataset,
                                    'DGFA-Depth-' + args.snapshotnum + '.pt')
        FeatEmbd_restore = osp.join('results', args.tstfile, 'snapshots',
                                    args.tstdataset,
                                    'DGFA-Embd-' + args.snapshotnum + '.pt')
        FeatEmbder = init_model(net=FeatEmbdmodel,
                                init_type=args.init_type,
                                restore=FeatEmbd_restore)

        Dis_restore1 = None
        Dis_restore2 = None
        Dis_restore3 = None

    else:
        raise NotImplementedError('method type [%s] is not implemented' %
                                  args.training_type)

    FeatExtor = init_model(net=FeatExtmodel,
                           init_type=args.init_type,
                           restore=FeatExt_restore)
    DepthEstor = init_model(net=DepthEstmodel,
                            init_type=args.init_type,
                            restore=DepthEst_restore)

    Discriminator1 = init_model(net=Dismodel1,
                                init_type=args.init_type,
                                restore=Dis_restore1)
    Discriminator2 = init_model(net=Dismodel2,
                                init_type=args.init_type,
                                restore=Dis_restore2)
    Discriminator3 = init_model(net=Dismodel3,
                                init_type=args.init_type,
                                restore=Dis_restore3)

    print(">>> FeatExtor <<<")
    print(FeatExtor)

    print(">>> FeatEmbder <<<")
    print(FeatEmbder)

    print(">>> DepthEstor <<<")
    print(DepthEstor)

    print(">>> Discriminator <<<")
    print(Discriminator1)

    ##################### tarining models#####################

    if args.training_type is 'Train':

        Train(args, FeatExtor, DepthEstor, FeatEmbder, Discriminator1,
              Discriminator2, Discriminator3, FeatExtorS1, FeatExtorS2,
              FeatExtorS3, data_loader1_real, data_loader1_fake,
              data_loader2_real, data_loader2_fake, data_loader3_real,
              data_loader3_fake, data_loader_target, summary_writer, saver,
              savefilename)

    elif args.training_type is 'Test':

        Test(args, FeatExtor, DepthEstor, FeatEmbder, data_loader_target,
             savefilename)

    elif args.training_type is 'Pre_train':

        Pre_train(args, FeatExtor, DepthEstor, data_loader_real,
                  data_loader_fake, summary_writer, saver, savefilename)
    else:
        raise NotImplementedError('method type [%s] is not implemented' %
                                  args.training_type)
Esempio n. 4
0
    logger = Logger(logs_path)

    # load dataset SM
    src_data_loader = get_data_loader(cfg.src_dataset)
    src_data_loader_eval = get_data_loader(cfg.src_dataset, train=False)
    tgt_data_loader = get_data_loader(cfg.tgt_dataset)
    tgt_data_loader_eval = get_data_loader(cfg.tgt_dataset, train=False)
    # load dataset UM MU
    # src_data_loader = get_data_loader(cfg.src_dataset, sample = True)
    # src_data_loader_eval = get_data_loader(cfg.src_dataset, train=False)
    # tgt_data_loader = get_data_loader(cfg.tgt_dataset, sample = True)
    # tgt_data_loader_eval = get_data_loader(cfg.tgt_dataset, train=False)
    # load models

    src_encoder = init_model(net=SythnetEncoder(inputc=cfg.inputc,
                                                nf=cfg.d_input_dims),
                             restore=cfg.src_encoder_restore)
    src_classifier = init_model(net=SythnetClassifier(nf=cfg.d_input_dims,
                                                      ncls=cfg.ncls),
                                restore=cfg.src_classifier_restore)
    tgt_classifier = init_model(net=SythnetClassifier(nf=cfg.d_input_dims,
                                                      ncls=cfg.ncls),
                                restore=cfg.src_classifier_restore)
    tgt_encoder = init_model(net=SythnetEncoder(inputc=cfg.inputc,
                                                nf=cfg.d_input_dims),
                             restore=cfg.tgt_encoder_restore)
    critic = init_model(Discriminator_feat(input_dims=cfg.d_input_dims,
                                           hidden_dims=cfg.d_hidden_dims,
                                           output_dims=cfg.d_output_dims),
                        restore=cfg.d_model_restore)
    generator = init_model(net=SythnetGenerator(input_dims=cfg.d_input_dims,
Esempio n. 5
0
        name=args.datasetname,
        train=True,
        split=args.split,
        batch_size=args.batchsize,
        image_size=args.img_size)

    nclass = len(knownclass)

    Encoderrestore = None
    Decoderrestore = None
    NorClsfierrestore = None
    SSDClsfierrestore = None

    Encoder = init_model(net=DenoiseResnet.ResnetEncoder(
        denoisemean=args.denoisemean,
        latent_size=args.latent_size,
        denoise=args.denoise),
                         init_type=args.init_type,
                         restore=Encoderrestore)
    Decoder = init_model(
        net=DenoiseResnet.ResnetDecoder(latent_size=args.latent_size),
        init_type=args.init_type,
        restore=Decoderrestore)

    NorClsfier = init_model(net=DenoiseResnet.NorClassifier(
        latent_size=args.latent_size, num_classes=nclass),
                            init_type=args.init_type,
                            restore=NorClsfierrestore)

    SSDClsfier = init_model(
        net=DenoiseResnet.SSDClassifier(latent_size=args.latent_size),
        init_type=args.init_type,
Esempio n. 6
0
from misc import params
from misc.utils import get_data_loader, init_model, init_random_seed
from models import Classifier, Discriminator, Generator

if __name__ == '__main__':
    # init random seed
    init_random_seed(params.manual_seed)

    # load dataset
    src_data_loader = get_data_loader(params.src_dataset)
    src_data_loader_test = get_data_loader(params.src_dataset, train=False)
    tgt_data_loader = get_data_loader(params.tgt_dataset)
    tgt_data_loader_test = get_data_loader(params.tgt_dataset, train=False)

    # init models
    classifier = init_model(net=Classifier(), restore=params.c_model_restore)
    generator = init_model(net=Generator(), restore=params.g_model_restore)
    critic = init_model(net=Discriminator(input_dims=params.d_input_dims,
                                          hidden_dims=params.d_hidden_dims,
                                          output_dims=params.d_output_dims),
                        restore=params.d_model_restore)

    # train models
    print("=== Training models ===")
    print(">>> Classifier <<<")
    print(classifier)
    print(">>> Generator <<<")
    print(generator)
    print(">>> Critic <<<")
    print(critic)
Esempio n. 7
0
    # init random seed
    init_random_seed(cfg.manual_seed)

    # speed up cudnn
    # enable_cudnn_benchmark()

    # load dataset
    source_dataset = get_data_loader(cfg.source_dataset, get_dataset=True)
    source_data_loader = get_data_loader(cfg.source_dataset)
    source_data_loader_test = get_data_loader(cfg.source_dataset, train=False)
    target_dataset = get_data_loader(cfg.target_dataset, get_dataset=True)
    # target_data_loader = get_data_loader(cfg.target_dataset)
    target_data_loader_test = get_data_loader(cfg.target_dataset, train=False)

    # init models
    F = init_model(net=EncoderA(), restore=cfg.model_restore["F"])
    F_1 = init_model(net=ClassifierA(cfg.dropout_keep["F_1"]),
                     restore=cfg.model_restore["F_1"])
    F_2 = init_model(net=ClassifierA(cfg.dropout_keep["F_2"]),
                     restore=cfg.model_restore["F_2"])
    F_t = init_model(net=ClassifierA(cfg.dropout_keep["F_t"]),
                     restore=cfg.model_restore["F_t"])

    # show model structure
    print(">>> F model <<<")
    print(F)
    print(">>> F_1 model <<<")
    print(F_1)
    print(">>> F_2 model <<<")
    print(F_2)
    print(">>> F_t model <<<")
Esempio n. 8
0
            print 'losses: ', loss.item()
            print 'Epoch: ', epoch
            if epoch % 10 == 0:
                save_checkpoint(model.state_dict())


def save_checkpoint(state, filename='checkpoint.pth.tar'):
    torch.save(state, filename)


if __name__ == '__main__':
    # init random seed
    init_random_seed(params.manual_seed)
    args = parser.parse_args()

    hnet = init_model(net=HNet.HNet(), restore=None)
    print("=== Training models ===")
    print(">>> hnet <<<")
    print(hnet)

    model = nn.DataParallel(hnet).cuda()

    criterion = ParamLossFunc()

    optimizer = optim.Adam(model.parameters(), args.lr, betas=(0.9, 0.99))
    cudnn.benchmark = True

    src_data_loader = get_tuSimple('train')
    print src_data_loader

    for epoch in range(args.start_epoch, args.epochs):
Esempio n. 9
0
            imgSz=imgsz,
            ccropSz=ccropsz)
        source_dataset = source_data_loader.dataset
        target_dataset = target_data_loader.dataset
        encsize = 7 * 7 * 512

    if 0:
        plt.figure()
        atr.showDataSet(target_data_loader_test)
        plt.waitforbuttonpress()

    # init models
    riw = False  # don't random init, we want the vgg pre-trained weights
    #!! F = init_model(net=EncoderA(), restore=model_restore["F"])
    F = init_model(net=EncoderVGG(),
                   restore=model_restore["F"],
                   randomInitialWeights=riw)

    #!! and flatten in train.py
    if 0:
        tmp = atr.createModel(True, 'vgg16', cfg.num_classes)
        tmp.cuda()
        F = tmp.features
        # doe snothing  F.append(nn.AdaptiveAvgPool2d((7, 7)))

    useBN = True
    if 0:
        # Use VGG
        riw = False
        F_1 = init_model(net=ClassifierVGG(cfg.num_classes,
                                           cfg.dropout_keep["F_1"],