Exemple #1
0
def initMasterNet(args):
    config = configparser.ConfigParser()
    config.read(args.m_conf_path)
    args_master = Bunch(config["default"])

    argDic = args.__dict__
    mastDic = args_master.__dict__

    for arg in mastDic:
        if not argDic[arg] is None:
            if not type(argDic[arg]) is bool:
                mastDic[arg] = type(argDic[arg])(mastDic[arg])
            else:
                mastDic[arg] = str2bool(mastDic[arg])
        else:
            mastDic[arg] = None

    for arg in argDic:
        if not arg in mastDic:
            mastDic[arg] = argDic[arg]

    master_net = modelBuilder.netBuilder(args_master)
    params = torch.load(args.m_net_path, map_location="cpu" if not args.cuda else None)
    master_net.load_state_dict(params, strict=True)
    master_net.eval()
    return master_net
Exemple #2
0
def main(argv=None):

    #Getting arguments from config file and command line
    #Building the arg reader
    argreader = ArgReader(argv)

    argreader.parser.add_argument('--comp_feat', action='store_true',help='To compute and write in a file the features of all images in the test set. All the arguments used to \
                                    build the model and the test data loader should be set.')
    argreader.parser.add_argument('--no_train', type=str,nargs=2,help='To use to re-evaluate a model at each epoch after training. At each epoch, the model is not trained but \
                                                                            the weights of the corresponding epoch are loaded and then the model is evaluated.\
                                                                            The values of this argument are the exp_id and the model_id of the model to get the weights from.')

    argreader = addInitArgs(argreader)
    argreader = addLossArgs(argreader)
    argreader = addOptimArgs(argreader)
    argreader = addValArgs(argreader)

    argreader = modelBuilder.addArgs(argreader)
    argreader = load_data.addArgs(argreader)

    #Reading the comand line arg
    argreader.getRemainingArgs()

    args = argreader.args

    if args.redirect_out:
        sys.stdout = open("python.out", 'w')

    #The folders where the experience file will be written
    if not (os.path.exists("../vis/{}".format(args.exp_id))):
        os.makedirs("../vis/{}".format(args.exp_id))
    if not (os.path.exists("../results/{}".format(args.exp_id))):
        os.makedirs("../results/{}".format(args.exp_id))
    if not (os.path.exists("../models/{}".format(args.exp_id))):
        os.makedirs("../models/{}".format(args.exp_id))

    #Write the arguments in a config file so the experiment can be re-run
    argreader.writeConfigFile("../models/{}/{}.ini".format(args.exp_id,args.model_id))

    writer = SummaryWriter("../results/{}".format(args.exp_id))

    print("Model :",args.model_id,"Experience :",args.exp_id)

    if args.comp_feat:

        testLoader = load_data.TestLoader(args.val_l,args.dataset_test,args.test_part_beg,args.test_part_end,args.img_size,\
                                          args.resize_image,args.exp_id,args.random_frame_val)

        if args.feat != "None":
            featModel = modelBuilder.buildFeatModel(args.feat,args.pretrain_dataset,args.lay_feat_cut)
            if args.cuda:
                featModel = featModel.cuda()
            if args.init_path_visual != "None":
                featModel.load_state_dict(torch.load(args.init_path_visual))
            elif args.init_path != "None":
                model = modelBuilder.netBuilder(args)
                params = torch.load(args.init_path)
                state_dict = {k.replace("module.cnn.","cnn.module."): v for k,v in params.items()}
                model.load_state_dict(state_dict)
                featModel = model.featModel

            featModel.eval()
        else:
            featModel = None

        with torch.no_grad():
            evalAllImages(args.exp_id,args.model_id,featModel,testLoader,args.cuda,args.log_interval)

    else:

        np.random.seed(args.seed)
        torch.manual_seed(args.seed)
        if args.cuda:
            torch.cuda.manual_seed(args.seed)

        paramToOpti = []

        trainLoader,trainDataset = load_data.buildSeqTrainLoader(args)

        valLoader = load_data.TestLoader(args.val_l,args.dataset_val,args.val_part_beg,args.val_part_end,\
                                            args.img_size,args.resize_image,\
                                            args.exp_id,args.random_frame_val)

        #Building the net
        net = modelBuilder.netBuilder(args)

        if args.cuda:
            net = net.cuda()

        trainFunc = epochSeqTr
        valFunc = epochSeqVal

        kwargsTr = {'log_interval':args.log_interval,'loader':trainLoader,'args':args,'writer':writer}
        kwargsVal = kwargsTr.copy()

        kwargsVal['loader'] = valLoader
        kwargsVal.update({"metricEarlyStop":args.metric_early_stop,"maximiseMetric":args.maximise_metric})

        if args.adv_weight > 0:
            kwargsTr["discrModel"] = modelBuilder.Discriminator(net.nbFeat,args.discr_dropout)
            kwargsTr["discrModel"] = kwargsTr["discrModel"].cuda() if args.cuda else kwargsTr["discrModel"].cpu()
            kwargsTr["discrLoader"] = load_data.buildFrameTrainLoader(args)
            kwargsTr["discrOptim"] = torch.optim.SGD(kwargsTr["discrModel"].parameters(), lr=args.lr,momentum=args.momentum)
        else:
            kwargsTr["discrModel"],kwargsTr["discrLoader"],kwargsTr["discrOptim"] = None,None,None

        for p in net.parameters():
            paramToOpti.append(p)

        paramToOpti = (p for p in paramToOpti)

        #Getting the contructor and the kwargs for the choosen optimizer
        optimConst,kwargsOpti = get_OptimConstructor_And_Kwargs(args.optim,args.momentum)

        startEpoch = initialize_Net_And_EpochNumber(net,args.exp_id,args.model_id,args.cuda,args.start_mode,args.init_path,args.init_path_visual_temp)

        #If no learning rate is schedule is indicated (i.e. there's only one learning rate),
        #the args.lr argument will be a float and not a float list.
        #Converting it to a list with one element makes the rest of processing easier
        if type(args.lr) is float:
            args.lr = [args.lr]

        lrCounter = 0

        metricLastVal = None

        outDictEpochs = {}
        targDictEpochs = {}

        for epoch in range(startEpoch, args.epochs + 1):

            kwargsOpti,kwargsTr,lrCounter = update.updateLR(epoch,args.epochs,args.lr,startEpoch,kwargsOpti,kwargsTr,lrCounter,net,optimConst)

            kwargsTr["epoch"],kwargsVal["epoch"] = epoch,epoch
            kwargsTr["model"],kwargsVal["model"] = net,net

            kwargsTr = resetAdvIter(kwargsTr)

            if not args.no_train:
                trainFunc(**kwargsTr)
            else:
                net.load_state_dict(torch.load("../models/{}/model{}_epoch{}".format(args.no_train[0],args.no_train[1],epoch)))

            kwargsVal["metricLastVal"] = metricLastVal

            #Checking if validation has already been done
            if len(glob.glob("../results/{}/{}_epoch{}_*".format(args.exp_id,args.model_id,epoch))) < len(kwargsVal["loader"].videoPaths):
                with torch.no_grad():
                    metricLastVal,outDict,targDict = valFunc(**kwargsVal)
                outDictEpochs[epoch] = outDict
                targDictEpochs[epoch] = targDict
                update.updateHist(writer,args.model_id,outDictEpochs,targDictEpochs)
            else:
                print("Validation epoch {} already done !".format(epoch))
Exemple #3
0
def run(args,trial=None):
    writer = SummaryWriter("../results/{}".format(args.exp_id))

    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if args.cuda:
        torch.cuda.manual_seed(args.seed)

    if not trial is None:
        args.lr = trial.suggest_float("lr", 1e-4, 1e-2, log=True)
        args.optim = trial.suggest_categorical("optim", OPTIM_LIST)
        args.batch_size = trial.suggest_int("batch_size", 10, args.max_batch_size, log=True)
        args.dropout = trial.suggest_float("dropout", 0, 0.6,step=0.2)
        args.weight_decay = trial.suggest_float("weight_decay", 1e-6, 1e-3, log=True)

        if args.optim == "SGD":
            args.momentum = trial.suggest_float("momentum", 0., 0.9,step=0.1)
            args.use_scheduler = trial.suggest_categorical("use_scheduler",[True,False])

        if args.opt_data_aug:
            args.brightness = trial.suggest_float("brightness", 0, 0.5, step=0.05)
            args.saturation = trial.suggest_float("saturation", 0, 0.9, step=0.1)
            args.crop_ratio = trial.suggest_float("crop_ratio", 0.8, 1, step=0.05)

        if args.master_net:
            args.kl_temp = trial.suggest_float("kl_temp", 1, 21, step=5)
            args.kl_interp = trial.suggest_float("kl_interp", 0.1, 1, step=0.1)

    trainLoader, trainDataset = load_data.buildTrainLoader(args,withSeg=args.with_seg,reprVec=args.repr_vec)
    valLoader,_ = load_data.buildTestLoader(args, "val",withSeg=args.with_seg,reprVec=args.repr_vec)

    # Building the net
    net = modelBuilder.netBuilder(args)

    trainFunc = epochSeqTr
    valFunc = epochImgEval

    kwargsTr = {'log_interval': args.log_interval, 'loader': trainLoader, 'args': args, 'writer': writer}
    kwargsVal = kwargsTr.copy()

    kwargsVal['loader'] = valLoader
    kwargsVal["metricEarlyStop"] = args.metric_early_stop

    startEpoch = initialize_Net_And_EpochNumber(net, args.exp_id, args.model_id, args.cuda, args.start_mode,
                                                args.init_path, args.strict_init)

    kwargsTr["optim"],scheduler = getOptim_and_Scheduler(args.optim, args.lr,args.momentum,args.weight_decay,args.use_scheduler,args.epochs,-1,net)

    epoch = startEpoch
    bestEpoch, worseEpochNb = getBestEpochInd_and_WorseEpochNb(args.start_mode, args.exp_id, args.model_id, epoch)

    if args.maximise_val_metric:
        bestMetricVal = -np.inf
        isBetter = lambda x, y: x > y
    else:
        bestMetricVal = np.inf
        isBetter = lambda x, y: x < y

    if args.master_net:
        kwargsTr["master_net"] = initMasterNet(args)
        kwargsVal["master_net"] = kwargsTr["master_net"]

    if not args.only_test and not args.grad_cam:
        while epoch < args.epochs + 1 and worseEpochNb < args.max_worse_epoch_nb:

            if args.bil_clus_soft_sched:
                update.updateBilClusSoftmSched(net,epoch,args.epochs)

            kwargsTr["epoch"], kwargsVal["epoch"] = epoch, epoch
            kwargsTr["model"], kwargsVal["model"] = net, net

            if not args.no_train:

                trainFunc(**kwargsTr)
                if not scheduler is None:
                    writer.add_scalars("LR", {args.model_id: scheduler.get_last_lr()}, epoch)
                    scheduler.step()
            else:
                if not args.no_val:
                    if args.model_id_no_train == "":
                        args.model_id_no_train = args.model_id
                    if args.exp_id_no_train == "":
                        args.exp_id_no_train = args.exp_id

                    net = preprocessAndLoadParams("../models/{}/model{}_epoch{}".format(args.exp_id_no_train, args.model_id_no_train, epoch),args.cuda,net,args.strict_init)

            if not args.no_val:
                with torch.no_grad():
                    metricVal = valFunc(**kwargsVal)

                bestEpoch, bestMetricVal, worseEpochNb = update.updateBestModel(metricVal, bestMetricVal, args.exp_id,
                                                                                args.model_id, bestEpoch, epoch, net,
                                                                                isBetter, worseEpochNb)
                if trial is not None:
                    trial.report(metricVal, epoch)

            epoch += 1

    if trial is None:
        if args.run_test or args.only_test:

            if os.path.exists("../results/{}/test_done.txt".format(args.exp_id)):
                test_done = np.genfromtxt("../results/{}/test_done.txt".format(args.exp_id),delimiter=",",dtype=str)

                if len(test_done.shape) == 1:
                    test_done = test_done[np.newaxis]
            else:
                test_done = None

            alreadyDone = (test_done==np.array([args.model_id,str(bestEpoch)])).any()

            if (test_done is None) or (alreadyDone and args.do_test_again) or (not alreadyDone):

                testFunc = valFunc

                kwargsTest = kwargsVal
                kwargsTest["mode"] = "test"

                testLoader,_ = load_data.buildTestLoader(args, "test",withSeg=args.with_seg,reprVec=args.repr_vec,shuffle=args.shuffle_test_set)

                kwargsTest['loader'] = testLoader

                net = preprocessAndLoadParams("../models/{}/model{}_best_epoch{}".format(args.exp_id, args.model_id, bestEpoch),args.cuda,net,args.strict_init)

                kwargsTest["model"] = net
                kwargsTest["epoch"] = bestEpoch

                if args.bil_clus_soft_sched:
                    update.updateBilClusSoftmSched(net,args.epochs,args.epochs)

                with torch.no_grad():
                    testFunc(**kwargsTest)

                with open("../results/{}/test_done.txt".format(args.exp_id),"a") as text_file:
                    print("{},{}".format(args.model_id,bestEpoch),file=text_file)

        if args.grad_cam:
            args.val_batch_size = 1
            testLoader,_ = load_data.buildTestLoader(args, "test",withSeg=args.with_seg)
            net = preprocessAndLoadParams("../models/{}/model{}_best_epoch{}".format(args.exp_id, args.model_id, bestEpoch),args.cuda,net,args.strict_init)
            resnet = net.firstModel.featMod
            resnet.fc = net.secondModel.linLay

            grad_cam = gradcam.GradCam(model=resnet, feature_module=resnet.layer4, target_layer_names=["1"], use_cuda=args.cuda)

            allMask = None
            latency_list = []
            batchSize_list = []
            for batch_idx, batch in enumerate(testLoader):
                data,target = batch[:2]
                if (batch_idx % args.log_interval == 0):
                    print("\t", batch_idx * len(data), "/", len(testLoader.dataset))

                if args.cuda:
                    data = data.cuda()

                lat_start_time = time.time()
                mask = grad_cam(data).detach().cpu()
                latency_list.append(time.time()-lat_start_time)
                batchSize_list.append(data.size(0))

                if allMask is None:
                    allMask = mask
                else:
                    allMask = torch.cat((allMask,mask),dim=0)

            np.save("../results/{}/gradcam_{}_epoch{}_test.npy".format(args.exp_id,args.model_id,bestEpoch),allMask.detach().cpu().numpy())

            latency_list = np.array(latency_list)[:,np.newaxis]
            batchSize_list = np.array(batchSize_list)[:,np.newaxis]
            latency_list = np.concatenate((latency_list,batchSize_list),axis=1)
            np.savetxt("../results/{}/latencygradcam_{}_epoch{}.csv".format(args.exp_id,args.model_id,bestEpoch),latency_list,header="latency,batch_size",delimiter=",")

    else:
        oldPath = "../models/{}/model{}_best_epoch{}".format(args.exp_id,args.model_id, bestEpoch)
        os.rename(oldPath, oldPath.replace("best_epoch","trial{}_best_epoch".format(trial.number)))

        return metricVal