def main(argv=None):

    #Getting arguments from config file and command line
    #Building the arg reader
    argreader = ArgReader(argv)

    ########### PLOT TSNE ################
    argreader.parser.add_argument('--tsne',action='store_true',help='To plot t-sne representation of feature extracted. Also plots the representation of a video side by side to make an image. \
                                    The --exp_id, --model_id, --seed and --dataset_test arguments should be set.')

    ########### PLOT SCORE EVOLUTION ALONG VIDEO ##################
    argreader.parser.add_argument('--plot_score',action="store_true",help='To plot the scene change probability of produced by a model for all the videos processed by this model during validation for all epochs.\
                                                                            The --model_id argument must be set, along with the --exp_id, --dataset_test and --epoch_to_plot arguments.')

    argreader.parser.add_argument('--epoch_to_plot',type=int,metavar="N",help='The epoch at which to plot the predictions when using the --plot_score argument')

    argreader.parser.add_argument('--untrained_exp_and_model_id',default=[None,None],type=str,nargs=2,help='To plot the distance between features computed by the network before training when using the --plot_score arg.\
                                                                                        The values are the exp_id and the model_id of the model not trained on scene change detection (i.e. the model with the ImageNet weights)')
    argreader.parser.add_argument('--plot_dist',action="store_true",help='To plot the distance when using the --plot_score argument')

    ########## COMPUTE METRICS AND PUT THEM IN AN LATEX TABLE #############
    argreader.parser.add_argument('--eval_model_leave_one_out',type=float,nargs=3,help='To evaluate a model by tuning its decision threshold on the video on which it is not\
                                    evaluated. The --model_id argument must be set, along with the --model_name, --exp_id and --dataset_test arguments. \
                                    The values of this args are the epoch at which to evaluate , followed by the minimum and maximum decision threshold \
                                    to evaluate. Use the --len_pond to ponderate the overflow and coverage by scene lengths.')

    argreader.parser.add_argument('--model_name',type=str,metavar="NAME",help='The name of the model as will appear in the latex table produced by the --eval_model_leave_one_out argument.')
    argreader.parser.add_argument('--len_pond',action="store_true",help='Use this argument to ponderate the coverage and overflow by the GT scene length when using --eval_model_leave_one_out.')

    argreader.parser.add_argument('--fine_tuned_thres',action="store_true",help='To automatically fine tune the decision threshold of the model. Only useful for the --bbc_annot_dist arg. \
                                                                                Check the help of this arg.')

    ######### Make a gif out of predictions ################

    argreader.parser.add_argument('--gif',action="store_true",help='Make a gif out of the predictions of a model on one dataset.')

    argreader.parser.add_argument('--model_id1',type=str,metavar="ID",help='The id of the first model to plot with --gif')
    argreader.parser.add_argument('--model_id2',type=str,metavar="ID",help='The id of the second model to plot with --gif')
    argreader.parser.add_argument('--model_name1',type=str,metavar="NAME",help='The name of the first model to plot with --gif')
    argreader.parser.add_argument('--model_name2',type=str,metavar="NAME",help='The name of the second model to plot with --gif')

    argreader = load_data.addArgs(argreader)

    #Reading the comand line arg
    argreader.getRemainingArgs()

    #Getting the args from command line and config file
    args = argreader.args

    if args.tsne:
        tsne(args.dataset_test,args.exp_id,args.model_id,args.seed)
    if args.plot_score:
        plotScore(args.exp_id,args.model_id,args.untrained_exp_and_model_id[0],args.untrained_exp_and_model_id[1],args.dataset_test,args.plot_dist,args.epoch_to_plot)
    if args.eval_model_leave_one_out:
        epoch = int(args.eval_model_leave_one_out[0])
        thresMin = args.eval_model_leave_one_out[1]
        thresMax = args.eval_model_leave_one_out[2]
        evalModel_leaveOneOut(args.exp_id,args.model_id,args.model_name,args.dataset_test,epoch,thresMin,thresMax,args.len_pond)
    if args.gif:
        makeGif(args.exp_id,args.model_id1,args.model_id2,args.model_name1,args.model_name2,args.dataset_test)
Beispiel #2
0
def main(argv=None):

    #Getting arguments from config file and command line
    #Building the arg reader
    argreader = ArgReader(argv)

    argreader.parser.add_argument('--comp_feat', action='store_true',help='To compute and write in a file the features of all images in the test set. All the arguments used to \
                                    build the model and the test data loader should be set.')
    argreader.parser.add_argument('--no_train', type=str,nargs=2,help='To use to re-evaluate a model at each epoch after training. At each epoch, the model is not trained but \
                                                                            the weights of the corresponding epoch are loaded and then the model is evaluated.\
                                                                            The values of this argument are the exp_id and the model_id of the model to get the weights from.')

    argreader = addInitArgs(argreader)
    argreader = addLossArgs(argreader)
    argreader = addOptimArgs(argreader)
    argreader = addValArgs(argreader)

    argreader = modelBuilder.addArgs(argreader)
    argreader = load_data.addArgs(argreader)

    #Reading the comand line arg
    argreader.getRemainingArgs()

    args = argreader.args

    if args.redirect_out:
        sys.stdout = open("python.out", 'w')

    #The folders where the experience file will be written
    if not (os.path.exists("../vis/{}".format(args.exp_id))):
        os.makedirs("../vis/{}".format(args.exp_id))
    if not (os.path.exists("../results/{}".format(args.exp_id))):
        os.makedirs("../results/{}".format(args.exp_id))
    if not (os.path.exists("../models/{}".format(args.exp_id))):
        os.makedirs("../models/{}".format(args.exp_id))

    #Write the arguments in a config file so the experiment can be re-run
    argreader.writeConfigFile("../models/{}/{}.ini".format(args.exp_id,args.model_id))

    writer = SummaryWriter("../results/{}".format(args.exp_id))

    print("Model :",args.model_id,"Experience :",args.exp_id)

    if args.comp_feat:

        testLoader = load_data.TestLoader(args.val_l,args.dataset_test,args.test_part_beg,args.test_part_end,args.img_size,\
                                          args.resize_image,args.exp_id,args.random_frame_val)

        if args.feat != "None":
            featModel = modelBuilder.buildFeatModel(args.feat,args.pretrain_dataset,args.lay_feat_cut)
            if args.cuda:
                featModel = featModel.cuda()
            if args.init_path_visual != "None":
                featModel.load_state_dict(torch.load(args.init_path_visual))
            elif args.init_path != "None":
                model = modelBuilder.netBuilder(args)
                params = torch.load(args.init_path)
                state_dict = {k.replace("module.cnn.","cnn.module."): v for k,v in params.items()}
                model.load_state_dict(state_dict)
                featModel = model.featModel

            featModel.eval()
        else:
            featModel = None

        with torch.no_grad():
            evalAllImages(args.exp_id,args.model_id,featModel,testLoader,args.cuda,args.log_interval)

    else:

        np.random.seed(args.seed)
        torch.manual_seed(args.seed)
        if args.cuda:
            torch.cuda.manual_seed(args.seed)

        paramToOpti = []

        trainLoader,trainDataset = load_data.buildSeqTrainLoader(args)

        valLoader = load_data.TestLoader(args.val_l,args.dataset_val,args.val_part_beg,args.val_part_end,\
                                            args.img_size,args.resize_image,\
                                            args.exp_id,args.random_frame_val)

        #Building the net
        net = modelBuilder.netBuilder(args)

        if args.cuda:
            net = net.cuda()

        trainFunc = epochSeqTr
        valFunc = epochSeqVal

        kwargsTr = {'log_interval':args.log_interval,'loader':trainLoader,'args':args,'writer':writer}
        kwargsVal = kwargsTr.copy()

        kwargsVal['loader'] = valLoader
        kwargsVal.update({"metricEarlyStop":args.metric_early_stop,"maximiseMetric":args.maximise_metric})

        if args.adv_weight > 0:
            kwargsTr["discrModel"] = modelBuilder.Discriminator(net.nbFeat,args.discr_dropout)
            kwargsTr["discrModel"] = kwargsTr["discrModel"].cuda() if args.cuda else kwargsTr["discrModel"].cpu()
            kwargsTr["discrLoader"] = load_data.buildFrameTrainLoader(args)
            kwargsTr["discrOptim"] = torch.optim.SGD(kwargsTr["discrModel"].parameters(), lr=args.lr,momentum=args.momentum)
        else:
            kwargsTr["discrModel"],kwargsTr["discrLoader"],kwargsTr["discrOptim"] = None,None,None

        for p in net.parameters():
            paramToOpti.append(p)

        paramToOpti = (p for p in paramToOpti)

        #Getting the contructor and the kwargs for the choosen optimizer
        optimConst,kwargsOpti = get_OptimConstructor_And_Kwargs(args.optim,args.momentum)

        startEpoch = initialize_Net_And_EpochNumber(net,args.exp_id,args.model_id,args.cuda,args.start_mode,args.init_path,args.init_path_visual_temp)

        #If no learning rate is schedule is indicated (i.e. there's only one learning rate),
        #the args.lr argument will be a float and not a float list.
        #Converting it to a list with one element makes the rest of processing easier
        if type(args.lr) is float:
            args.lr = [args.lr]

        lrCounter = 0

        metricLastVal = None

        outDictEpochs = {}
        targDictEpochs = {}

        for epoch in range(startEpoch, args.epochs + 1):

            kwargsOpti,kwargsTr,lrCounter = update.updateLR(epoch,args.epochs,args.lr,startEpoch,kwargsOpti,kwargsTr,lrCounter,net,optimConst)

            kwargsTr["epoch"],kwargsVal["epoch"] = epoch,epoch
            kwargsTr["model"],kwargsVal["model"] = net,net

            kwargsTr = resetAdvIter(kwargsTr)

            if not args.no_train:
                trainFunc(**kwargsTr)
            else:
                net.load_state_dict(torch.load("../models/{}/model{}_epoch{}".format(args.no_train[0],args.no_train[1],epoch)))

            kwargsVal["metricLastVal"] = metricLastVal

            #Checking if validation has already been done
            if len(glob.glob("../results/{}/{}_epoch{}_*".format(args.exp_id,args.model_id,epoch))) < len(kwargsVal["loader"].videoPaths):
                with torch.no_grad():
                    metricLastVal,outDict,targDict = valFunc(**kwargsVal)
                outDictEpochs[epoch] = outDict
                targDictEpochs[epoch] = targDict
                update.updateHist(writer,args.model_id,outDictEpochs,targDictEpochs)
            else:
                print("Validation epoch {} already done !".format(epoch))
Beispiel #3
0
def main(argv=None):
    # Getting arguments from config file and command line
    # Building the arg reader
    argreader = ArgReader(argv)

    argreader.parser.add_argument('--no_train', type=str2bool, help='To use to re-evaluate a model at each epoch after training. At each epoch, the model is not trained but \
                                                                            the weights of the corresponding epoch are loaded and then the model is evaluated.\
                                                                            The arguments --exp_id_no_train and the --model_id_no_train must be set')
    argreader.parser.add_argument('--exp_id_no_train', type=str,
                                  help="To use when --no_train is set to True. This is the exp_id of the model to get the weights from.")
    argreader.parser.add_argument('--model_id_no_train', type=str,
                                  help="To use when --no_train is set to True. This is the model_id of the model to get the weights from.")

    argreader.parser.add_argument('--no_val', type=str2bool, help='To not compute the validation')
    argreader.parser.add_argument('--only_test', type=str2bool, help='To only compute the test')

    argreader.parser.add_argument('--do_test_again', type=str2bool, help='Does the test evaluation even if it has already been done')
    argreader.parser.add_argument('--compute_latency', type=str2bool, help='To write in a file the latency at each forward pass.')
    argreader.parser.add_argument('--grad_cam', type=str2bool, help='To compute grad cam instead of training or testing.')
    argreader.parser.add_argument('--optuna', type=str2bool, help='To run a hyper-parameter study')
    argreader.parser.add_argument('--optuna_trial_nb', type=int, help='The number of hyper-parameter trial to run.')
    argreader.parser.add_argument('--opt_data_aug', type=str2bool, help='To optimise data augmentation hyper-parameter.')
    argreader.parser.add_argument('--max_batch_size', type=int, help='To maximum batch size to test.')

    argreader = addInitArgs(argreader)
    argreader = addOptimArgs(argreader)
    argreader = addValArgs(argreader)
    argreader = addLossTermArgs(argreader)

    argreader = modelBuilder.addArgs(argreader)
    argreader = load_data.addArgs(argreader)

    # Reading the comand line arg
    argreader.getRemainingArgs()

    args = argreader.args

    if args.redirect_out:
        sys.stdout = open("python.out", 'w')

    # The folders where the experience file will be written
    if not (os.path.exists("../vis/{}".format(args.exp_id))):
        os.makedirs("../vis/{}".format(args.exp_id))
    if not (os.path.exists("../results/{}".format(args.exp_id))):
        os.makedirs("../results/{}".format(args.exp_id))
    if not (os.path.exists("../models/{}".format(args.exp_id))):
        os.makedirs("../models/{}".format(args.exp_id))

    args = updateSeedAndNote(args)
    # Update the config args
    argreader.args = args
    # Write the arguments in a config file so the experiment can be re-run

    argreader.writeConfigFile("../models/{}/{}.ini".format(args.exp_id, args.model_id))
    print("Model :", args.model_id, "Experience :", args.exp_id)

    if args.distributed:
        size = args.distrib_size
        processes = []
        for rank in range(size):
            p = Process(target=init_process, args=(args, rank, size, run))
            p.start()
            processes.append(p)

        for p in processes:
            p.join()
    else:

        if args.optuna:
            def objective(trial):
                return run(args,trial=trial)

            study = optuna.create_study(direction="maximize" if args.maximise_val_metric else "minimize",\
                                        storage="sqlite:///../results/{}/{}_hypSearch.db".format(args.exp_id,args.model_id), \
                                        study_name=args.model_id,load_if_exists=True)

            con = sqlite3.connect("../results/{}/{}_hypSearch.db".format(args.exp_id,args.model_id))
            curr = con.cursor()

            failedTrials = 0
            for elem in curr.execute('SELECT trial_id,value FROM trials WHERE study_id == 1').fetchall():
                if elem[1] is None:
                    failedTrials += 1

            trialsAlreadyDone = len(curr.execute('SELECT trial_id,value FROM trials WHERE study_id == 1').fetchall())

            if trialsAlreadyDone-failedTrials < args.optuna_trial_nb:

                studyDone = False
                while not studyDone:
                    try:
                        print("N trials",args.optuna_trial_nb-trialsAlreadyDone+failedTrials)
                        study.optimize(objective,n_trials=args.optuna_trial_nb-trialsAlreadyDone+failedTrials)
                        studyDone = True
                    except RuntimeError as e:
                        if str(e).find("CUDA out of memory.") != -1:
                            gc.collect()
                            torch.cuda.empty_cache()
                            args.max_batch_size -= 5
                        else:
                            raise RuntimeError(e)

            curr.execute('SELECT trial_id,value FROM trials WHERE study_id == 1')
            query_res = curr.fetchall()

            query_res = list(filter(lambda x:not x[1] is None,query_res))

            trialIds = [id_value[0] for id_value in query_res]
            values = [id_value[1] for id_value in query_res]

            trialIds = trialIds[:args.optuna_trial_nb]
            values = values[:args.optuna_trial_nb]

            bestTrialId = trialIds[np.array(values).argmax()]

            curr.execute('SELECT param_name,param_value from trial_params WHERE trial_id == {}'.format(bestTrialId))
            query_res = curr.fetchall()

            bestParamDict = {key:value for key,value in query_res}

            args.lr,args.batch_size = bestParamDict["lr"],int(bestParamDict["batch_size"])
            args.optim = OPTIM_LIST[int(bestParamDict["optim"])]
            args.only_test = True

            bestPath = glob.glob("../models/{}/model{}_trial{}_best_epoch*".format(args.exp_id,args.model_id,bestTrialId-1))[0]

            copyfile(bestPath, bestPath.replace("_trial{}".format(bestTrialId-1),""))

            run(args)

        else:
            run(args)