Esempio n. 1
0
def main(save_id, train_p, eval_p, backbone_id, return_eval=False):
    print(
        "save_id: {0}, train_p : {1}, eval_p: {2}, backbone_id: {3}, ".format(
            save_id, train_p, eval_p, backbone_id))

    from model_def import define_model
    model_dict = define_model(backbone_id)

    num_segments = model_dict["num_segments"]
    bottleneck_size = model_dict["bottleneck_size"]
    dense_sample = model_dict["dense_sample"]
    dense_rate = model_dict["dense_rate"]

    dir_name = os.path.join("saved_models", save_id)  # lfd_params
    if not os.path.exists(dir_name):
        os.makedirs(dir_name)
    filename = os.path.join(dir_name, "../model")

    lfd_params = default_model_args(
        save_id=save_id,
        log_dir=dir_name,
        num_segments=num_segments,
        bottleneck_size=bottleneck_size,
        dense_sample=dense_sample,
        dense_rate=dense_rate)  # parse_model_args()

    if train_p:
        model = ClassifierDITRL(lfd_params,
                                filename,
                                backbone_id,
                                use_feature_extractor=True,
                                use_spatial=True,
                                spatial_train=True)

        model = train(lfd_params, model, verbose=True)
        model.save_model()

    if eval_p:
        model = ClassifierDITRL(lfd_params,
                                filename,
                                backbone_id,
                                use_feature_extractor=True,
                                use_spatial=True,
                                spatial_train=False)

        train_df = evaluate(lfd_params, model, mode="train")
        train_df["mode"] = ["train"] * len(train_df)
        eval_df = evaluate(lfd_params, model, mode="evaluation", verbose=True)
        eval_df["mode"] = ["evaluation"] * len(eval_df)
        df = pd.concat([train_df, eval_df])

        if return_eval:
            return df

        df["repeat"] = ["1"] * len(df)

        out_filename = os.path.join(lfd_params.args.output_dir,
                                    "output_" + save_id + "_spatial.csv")
        df.to_csv(out_filename)
        print("Output placed in: " + out_filename)
def main(save_id,
         gen_itr,
         gen_vee,
         train_p,
         eval_p,
         backbone_id,
         full_p=False):
    print(
        "save_id: {0}, train_p : {1}, eval_p: {2}, backbone_id: {3}, full_p: {4}"
        .format(save_id, train_p, eval_p, backbone_id, full_p))

    if full_p:
        from exec_classifier_bottleneck import main as bottleneck_main
        bottleneck_main(save_id, train_p, eval_p, backbone_id)

    from model_def import define_model
    model_dict = define_model(backbone_id)

    num_segments = model_dict["num_segments"]
    bottleneck_size = model_dict["bottleneck_size"]
    dense_sample = model_dict["dense_sample"]
    dense_rate = model_dict["dense_rate"]

    dir_name = os.path.join("saved_models", save_id)  # lfd_params
    if not os.path.exists(dir_name):
        os.makedirs(dir_name)
    filename = os.path.join(dir_name, "../model")

    lfd_params = default_model_args(save_id=save_id,
                                    log_dir=dir_name,
                                    num_segments=num_segments,
                                    bottleneck_size=bottleneck_size,
                                    dense_sample=dense_sample,
                                    dense_rate=dense_rate)

    if gen_itr:
        print("Training Pipeline")
        model = ClassifierDITRL(lfd_params,
                                filename,
                                backbone_id,
                                use_feature_extractor=True,
                                use_spatial=False,
                                use_pipeline=True,
                                use_temporal=False,
                                spatial_train=False,
                                ditrl_pipeline_train=True,
                                use_gcn=True)
        model = train_pipeline(lfd_params, model)
        model.save_model()

        print("Generating ITR Files")
        generate_itr_files_gcn(lfd_params,
                               model,
                               "train",
                               backbone=backbone_id)
        generate_itr_files_gcn(lfd_params,
                               model,
                               "evaluation",
                               backbone=backbone_id)
    '''
    if gen_vee:
        model = ClassifierDITRL(lfd_params, filename, backbone_id, use_feature_extractor=True, use_spatial=False,
                                use_pipeline=True, use_temporal=False, spatial_train=False, ditrl_pipeline_train=False,
                                return_vee=True)

        print("Generating Sparse IAD Files")
        generate_binarized_iad_files(lfd_params, model, "train", backbone=backbone_id)
        generate_binarized_iad_files(lfd_params, model, "evaluation", backbone=backbone_id)
    '''
    if train_p:
        print("Training Policy")

        model = PolicyLearnerDITRL(lfd_params,
                                   filename,
                                   backbone_id,
                                   use_feature_extractor=False,
                                   use_spatial=False,
                                   use_pipeline=False,
                                   use_temporal=True,
                                   spatial_train=False,
                                   ditrl_pipeline_train=False,
                                   temporal_train=True,
                                   policy_train=True,
                                   use_gcn=True)
        model = train(lfd_params,
                      model,
                      input_dtype="gcn",
                      verbose=False,
                      ablation=False)  # make sure to use ITRs

        print("--------------")
        print("Saved Model")
        model.save_model()

    if eval_p:

        model = PolicyLearnerDITRL(lfd_params,
                                   filename,
                                   backbone_id,
                                   use_feature_extractor=False,
                                   use_spatial=False,
                                   use_pipeline=False,
                                   use_temporal=True,
                                   spatial_train=False,
                                   ditrl_pipeline_train=False,
                                   temporal_train=False,
                                   use_gcn=True)
        '''
        df = evaluate_single_action(lfd_params, model, input_dtype="itr")
        out_filename = os.path.join(lfd_params.args.output_dir, "output_" + save_id + "_single_action.csv")
        df.to_csv(out_filename)
        print("Output placed in: " + out_filename)
        '''

        df = evaluate_action_trace(lfd_params, model, input_dtype="gcn")
        out_filename = os.path.join(lfd_params.args.output_dir,
                                    "output_" + save_id + "_action_trace.csv")
        df.to_csv(out_filename)
        print("Output placed in: " + out_filename)

        df = evaluate_action_trace(lfd_params,
                                   model,
                                   input_dtype="gcn",
                                   ablation=True,
                                   verbose=False,
                                   mode="train")
        out_filename = os.path.join(
            lfd_params.args.output_dir,
            "output_" + save_id + "_action_trace_ablation_train.csv")
        df.to_csv(out_filename)
        print("Output placed in: " + out_filename)

        df = evaluate_action_trace(lfd_params,
                                   model,
                                   input_dtype="gcn",
                                   ablation=True,
                                   verbose=False,
                                   mode="evaluation")
        out_filename = os.path.join(
            lfd_params.args.output_dir,
            "output_" + save_id + "_action_trace_ablation_eval.csv")
        df.to_csv(out_filename)
        print("Output placed in: " + out_filename)
    '''
    for mode in ["train", "evaluation"]:

        #vd = VideoDataset(root_path, mode, full_sample, image_tmpl=image_tmpl, num_segments=num_segments)
        vd = DatasetVideo(root_path, mode, image_tmpl=image_tmpl, num_segments=num_segments)
        if (mode == "train"):
            user = input("There are {0} files to choose from:".format(len(vd)))
        img_dict[mode] = vd.show(int(user))


    get_concat_v(img_dict["train"], img_dict["evaluation"]).save("analysis/fig/out.png")
    '''
    from parameter_parser import default_model_args
    lfd_params = default_model_args(save_id="",
                                    log_dir="",
                                    num_segments=num_segments,
                                    bottleneck_size="",
                                    dense_sample=False,
                                    dense_rate=8)

    # generate images from entire dataset
    vd = DatasetVideo(lfd_params,
                      root_path,
                      "train",
                      image_tmpl=image_tmpl,
                      num_segments=num_segments)
    for i in range(len(vd)):
        print("i:", i, len(vd))
        img = vd.show(i)
        img.save("analysis/dataset_fig/" + str(i).zfill(2) + "_clean.png")
    """
    parser = argparse.ArgumentParser(description='Generate IADs from input files')
Esempio n. 4
0
            large_frame.paste(iad_frame, (0, buffer_height))

            new_frames.append(large_frame)

        out_img = new_frames[0]
        for z in range(1, len(new_frames)):
            out_img = get_concat_h(out_img, new_frames[z])

        # save the image
        print(filename)
        out_id = filename.split('/')[-1].split('.')[0]
        out_img.save("analysis/fig/viz_"+out_id+".png")


if __name__ == '__main__':

    save_id = "policy_learning_ditrl_tsm_bn16_2"
    dir_name = os.path.join("saved_models", save_id)  # lfd_params
    assert os.path.exists(dir_name), "ERROR: run_iad_visualization.py: model filename not found"
    filename = os.path.join(dir_name, "model")

    lfd_params = default_model_args(save_id=save_id, log_dir=dir_name, num_segments=20)

    model = ClassifierDITRLTSM(lfd_params, filename, use_feature_extractor=True, use_spatial=False,
                               use_pipeline=True, use_temporal=False,
                               spatial_train=False, ditrl_pipeline_train=False, temporal_train=False)

    visualize(lfd_params, model, mode="train")
    visualize(lfd_params, model, mode="evaluation")

Esempio n. 5
0
def main(save_id,
         gen_itr,
         gen_vee,
         train_p,
         eval_p,
         backbone_id,
         full_p=False):

    if full_p:
        from exec_classifier_bottleneck import main as backbone_main
        backbone_main(save_id, train_p, eval_p, backbone_id)

    from model_def import define_model
    model_dict = define_model(backbone_id)

    num_segments = model_dict["num_segments"]
    bottleneck_size = model_dict["bottleneck_size"]
    dense_sample = model_dict["dense_sample"]
    dense_rate = model_dict["dense_rate"]

    dir_name = os.path.join("saved_models", save_id)  # lfd_params
    if not os.path.exists(dir_name):
        os.makedirs(dir_name)
    filename = os.path.join(dir_name, "../model")

    lfd_params = default_model_args(save_id=save_id,
                                    log_dir=dir_name,
                                    num_segments=num_segments,
                                    bottleneck_size=bottleneck_size,
                                    dense_sample=dense_sample,
                                    dense_rate=dense_rate)

    if gen_itr:

        print("Training Pipeline")
        model = ClassifierDITRL(lfd_params,
                                filename,
                                backbone_id,
                                use_feature_extractor=True,
                                use_spatial=False,
                                use_pipeline=True,
                                use_temporal=False,
                                spatial_train=False,
                                ditrl_pipeline_train=True)
        model = train_pipeline(lfd_params, model)
        model.save_model()

        print("Generating ITR Files")
        generate_itr_files(lfd_params, model, "train")
        generate_itr_files(lfd_params, model, "evaluation")

    if gen_vee:
        model = ClassifierDITRL(lfd_params,
                                filename,
                                backbone_id,
                                use_feature_extractor=True,
                                use_spatial=False,
                                use_pipeline=True,
                                use_temporal=False,
                                spatial_train=False,
                                ditrl_pipeline_train=False,
                                return_vee=True)

        print("Generating Sparse IAD Files")
        generate_binarized_iad_files(lfd_params,
                                     model,
                                     "train",
                                     backbone=backbone_id)
        generate_binarized_iad_files(lfd_params,
                                     model,
                                     "evaluation",
                                     backbone=backbone_id)

    if train_p:
        model = ClassifierDITRL(lfd_params,
                                filename,
                                backbone_id,
                                use_feature_extractor=False,
                                use_spatial=False,
                                use_pipeline=False,
                                use_temporal=True,
                                spatial_train=False,
                                ditrl_pipeline_train=False,
                                temporal_train=True)
        model = train(lfd_params, model, input_dtype="itr",
                      verbose=True)  # make sure to use ITRs
        model.save_model()

    if eval_p:
        print("Evaluating Model")
        model = ClassifierDITRL(lfd_params,
                                filename,
                                backbone_id,
                                use_feature_extractor=False,
                                use_spatial=False,
                                use_pipeline=False,
                                use_temporal=True,
                                spatial_train=False,
                                ditrl_pipeline_train=False,
                                temporal_train=False)

        train_df = evaluate(lfd_params, model, mode="train", input_dtype="itr")
        train_df["mode"] = ["train"] * len(train_df)
        eval_df = evaluate(lfd_params,
                           model,
                           mode="evaluation",
                           verbose=True,
                           input_dtype="itr")
        eval_df["mode"] = ["evaluation"] * len(eval_df)
        df = pd.concat([train_df, eval_df])
        df["repeat"] = [save_id] * len(df)

        out_filename = os.path.join(lfd_params.args.output_dir,
                                    "output_" + save_id + ".csv")
        df.to_csv(out_filename)
        print("Output placed in: " + out_filename)
Esempio n. 6
0
def main(save_id, gen_p, train_p, eval_p, backbone_id, use_bottleneck=True):
    from model_def import define_model
    model_dict = define_model(backbone_id)

    num_segments = model_dict["num_segments"]
    bottleneck_size = model_dict["bottleneck_size"]
    dense_sample = model_dict["dense_sample"]
    dense_rate = model_dict["dense_rate"]

    dir_name = os.path.join("saved_models", save_id)  # lfd_params
    if not os.path.exists(dir_name):
        os.makedirs(dir_name)
    filename = os.path.join(dir_name, "../model")

    lfd_params = default_model_args(save_id=save_id,
                                    log_dir=dir_name,
                                    num_segments=num_segments,
                                    bottleneck_size=bottleneck_size,
                                    dense_sample=dense_sample,
                                    dense_rate=dense_rate)

    if gen_p:
        # Generate IADs
        print("Generating ITR Files")
        model = Classifier(lfd_params,
                           filename,
                           backbone_id,
                           use_feature_extractor=True,
                           use_spatial_lstm=False,
                           spatial_train=False,
                           use_bottleneck=use_bottleneck)

        generate_iad_files(lfd_params, model, "train", backbone=backbone_id)
        generate_iad_files(lfd_params,
                           model,
                           "evaluation",
                           backbone=backbone_id)

    if train_p:
        print("Training Policy")
        model = PolicyLearner(lfd_params,
                              filename,
                              backbone_id,
                              use_feature_extractor=False,
                              use_spatial_lstm=True,
                              spatial_train=True,
                              policy_train=True,
                              use_bottleneck=use_bottleneck)

        # Train policy learner
        model = train(lfd_params,
                      model,
                      verbose=False,
                      input_dtype="iad",
                      ablation=False)
        model.save_model()

    if eval_p:
        model = PolicyLearner(lfd_params,
                              filename,
                              backbone_id,
                              use_feature_extractor=False,
                              use_spatial_lstm=True,
                              policy_train=False,
                              use_bottleneck=use_bottleneck)

        df = evaluate_action_trace(lfd_params,
                                   model,
                                   verbose=True,
                                   input_dtype="iad")
        out_filename = os.path.join(lfd_params.args.output_dir,
                                    "output_" + save_id + "_action_trace.csv")
        df.to_csv(out_filename)
        print("Output placed in: " + out_filename)

        df = evaluate_action_trace(lfd_params,
                                   model,
                                   input_dtype="iad",
                                   ablation=True,
                                   verbose=True,
                                   mode="train")
        out_filename = os.path.join(
            lfd_params.args.output_dir,
            "output_" + save_id + "_action_trace_ablation_train.csv")
        df.to_csv(out_filename)
        print("Output placed in: " + out_filename)

        df = evaluate_action_trace(lfd_params,
                                   model,
                                   input_dtype="iad",
                                   ablation=True,
                                   verbose=True,
                                   mode="evaluation")
        out_filename = os.path.join(
            lfd_params.args.output_dir,
            "output_" + save_id + "_action_trace_ablation_eval.csv")
        df.to_csv(out_filename)
        print("Output placed in: " + out_filename)