def main(save_id,
         gen_itr,
         gen_vee,
         train_p,
         eval_p,
         backbone_id,
         full_p=False):
    print(
        "save_id: {0}, train_p : {1}, eval_p: {2}, backbone_id: {3}, full_p: {4}"
        .format(save_id, train_p, eval_p, backbone_id, full_p))

    if full_p:
        from exec_classifier_bottleneck import main as bottleneck_main
        bottleneck_main(save_id, train_p, eval_p, backbone_id)

    from model_def import define_model
    model_dict = define_model(backbone_id)

    num_segments = model_dict["num_segments"]
    bottleneck_size = model_dict["bottleneck_size"]
    dense_sample = model_dict["dense_sample"]
    dense_rate = model_dict["dense_rate"]

    dir_name = os.path.join("saved_models", save_id)  # lfd_params
    if not os.path.exists(dir_name):
        os.makedirs(dir_name)
    filename = os.path.join(dir_name, "../model")

    lfd_params = default_model_args(save_id=save_id,
                                    log_dir=dir_name,
                                    num_segments=num_segments,
                                    bottleneck_size=bottleneck_size,
                                    dense_sample=dense_sample,
                                    dense_rate=dense_rate)

    if gen_itr:
        print("Training Pipeline")
        model = ClassifierDITRL(lfd_params,
                                filename,
                                backbone_id,
                                use_feature_extractor=True,
                                use_spatial=False,
                                use_pipeline=True,
                                use_temporal=False,
                                spatial_train=False,
                                ditrl_pipeline_train=True,
                                use_gcn=True)
        model = train_pipeline(lfd_params, model)
        model.save_model()

        print("Generating ITR Files")
        generate_itr_files_gcn(lfd_params,
                               model,
                               "train",
                               backbone=backbone_id)
        generate_itr_files_gcn(lfd_params,
                               model,
                               "evaluation",
                               backbone=backbone_id)
    '''
    if gen_vee:
        model = ClassifierDITRL(lfd_params, filename, backbone_id, use_feature_extractor=True, use_spatial=False,
                                use_pipeline=True, use_temporal=False, spatial_train=False, ditrl_pipeline_train=False,
                                return_vee=True)

        print("Generating Sparse IAD Files")
        generate_binarized_iad_files(lfd_params, model, "train", backbone=backbone_id)
        generate_binarized_iad_files(lfd_params, model, "evaluation", backbone=backbone_id)
    '''
    if train_p:
        print("Training Policy")

        model = PolicyLearnerDITRL(lfd_params,
                                   filename,
                                   backbone_id,
                                   use_feature_extractor=False,
                                   use_spatial=False,
                                   use_pipeline=False,
                                   use_temporal=True,
                                   spatial_train=False,
                                   ditrl_pipeline_train=False,
                                   temporal_train=True,
                                   policy_train=True,
                                   use_gcn=True)
        model = train(lfd_params,
                      model,
                      input_dtype="gcn",
                      verbose=False,
                      ablation=False)  # make sure to use ITRs

        print("--------------")
        print("Saved Model")
        model.save_model()

    if eval_p:

        model = PolicyLearnerDITRL(lfd_params,
                                   filename,
                                   backbone_id,
                                   use_feature_extractor=False,
                                   use_spatial=False,
                                   use_pipeline=False,
                                   use_temporal=True,
                                   spatial_train=False,
                                   ditrl_pipeline_train=False,
                                   temporal_train=False,
                                   use_gcn=True)
        '''
        df = evaluate_single_action(lfd_params, model, input_dtype="itr")
        out_filename = os.path.join(lfd_params.args.output_dir, "output_" + save_id + "_single_action.csv")
        df.to_csv(out_filename)
        print("Output placed in: " + out_filename)
        '''

        df = evaluate_action_trace(lfd_params, model, input_dtype="gcn")
        out_filename = os.path.join(lfd_params.args.output_dir,
                                    "output_" + save_id + "_action_trace.csv")
        df.to_csv(out_filename)
        print("Output placed in: " + out_filename)

        df = evaluate_action_trace(lfd_params,
                                   model,
                                   input_dtype="gcn",
                                   ablation=True,
                                   verbose=False,
                                   mode="train")
        out_filename = os.path.join(
            lfd_params.args.output_dir,
            "output_" + save_id + "_action_trace_ablation_train.csv")
        df.to_csv(out_filename)
        print("Output placed in: " + out_filename)

        df = evaluate_action_trace(lfd_params,
                                   model,
                                   input_dtype="gcn",
                                   ablation=True,
                                   verbose=False,
                                   mode="evaluation")
        out_filename = os.path.join(
            lfd_params.args.output_dir,
            "output_" + save_id + "_action_trace_ablation_eval.csv")
        df.to_csv(out_filename)
        print("Output placed in: " + out_filename)
Example #2
0
def main(save_id,
         gen_itr,
         gen_vee,
         train_p,
         eval_p,
         backbone_id,
         full_p=False):

    if full_p:
        from exec_classifier_bottleneck import main as backbone_main
        backbone_main(save_id, train_p, eval_p, backbone_id)

    from model_def import define_model
    model_dict = define_model(backbone_id)

    num_segments = model_dict["num_segments"]
    bottleneck_size = model_dict["bottleneck_size"]
    dense_sample = model_dict["dense_sample"]
    dense_rate = model_dict["dense_rate"]

    dir_name = os.path.join("saved_models", save_id)  # lfd_params
    if not os.path.exists(dir_name):
        os.makedirs(dir_name)
    filename = os.path.join(dir_name, "../model")

    lfd_params = default_model_args(save_id=save_id,
                                    log_dir=dir_name,
                                    num_segments=num_segments,
                                    bottleneck_size=bottleneck_size,
                                    dense_sample=dense_sample,
                                    dense_rate=dense_rate)

    if gen_itr:

        print("Training Pipeline")
        model = ClassifierDITRL(lfd_params,
                                filename,
                                backbone_id,
                                use_feature_extractor=True,
                                use_spatial=False,
                                use_pipeline=True,
                                use_temporal=False,
                                spatial_train=False,
                                ditrl_pipeline_train=True)
        model = train_pipeline(lfd_params, model)
        model.save_model()

        print("Generating ITR Files")
        generate_itr_files(lfd_params, model, "train")
        generate_itr_files(lfd_params, model, "evaluation")

    if gen_vee:
        model = ClassifierDITRL(lfd_params,
                                filename,
                                backbone_id,
                                use_feature_extractor=True,
                                use_spatial=False,
                                use_pipeline=True,
                                use_temporal=False,
                                spatial_train=False,
                                ditrl_pipeline_train=False,
                                return_vee=True)

        print("Generating Sparse IAD Files")
        generate_binarized_iad_files(lfd_params,
                                     model,
                                     "train",
                                     backbone=backbone_id)
        generate_binarized_iad_files(lfd_params,
                                     model,
                                     "evaluation",
                                     backbone=backbone_id)

    if train_p:
        model = ClassifierDITRL(lfd_params,
                                filename,
                                backbone_id,
                                use_feature_extractor=False,
                                use_spatial=False,
                                use_pipeline=False,
                                use_temporal=True,
                                spatial_train=False,
                                ditrl_pipeline_train=False,
                                temporal_train=True)
        model = train(lfd_params, model, input_dtype="itr",
                      verbose=True)  # make sure to use ITRs
        model.save_model()

    if eval_p:
        print("Evaluating Model")
        model = ClassifierDITRL(lfd_params,
                                filename,
                                backbone_id,
                                use_feature_extractor=False,
                                use_spatial=False,
                                use_pipeline=False,
                                use_temporal=True,
                                spatial_train=False,
                                ditrl_pipeline_train=False,
                                temporal_train=False)

        train_df = evaluate(lfd_params, model, mode="train", input_dtype="itr")
        train_df["mode"] = ["train"] * len(train_df)
        eval_df = evaluate(lfd_params,
                           model,
                           mode="evaluation",
                           verbose=True,
                           input_dtype="itr")
        eval_df["mode"] = ["evaluation"] * len(eval_df)
        df = pd.concat([train_df, eval_df])
        df["repeat"] = [save_id] * len(df)

        out_filename = os.path.join(lfd_params.args.output_dir,
                                    "output_" + save_id + ".csv")
        df.to_csv(out_filename)
        print("Output placed in: " + out_filename)