示例#1
0
        y_lim=[1362, 1369],
        data_points_triplets=[
            (base_sig.t_a_nn, final_res.a_nn_corrected, f"{Const.A}_corrected"),
            (base_sig.t_b_nn, final_res.b_nn_corrected, f"{Const.B}_corrected")
        ],
        x_label=Const.YEAR_UNIT, y_label=Const.TSI_UNIT, max_points=1e7)


if __name__ == "__main__":
    ignore_warnings()

    visualizer = Visualizer()
    visualizer.set_figsize()

    ARGS = parse_arguments()
    results_dir_path = create_results_dir(Const.RESULTS_DIR, f"{ARGS.model_type}_{ARGS.output_method}")
    create_logger(results_dir_path)

    mode = Mode.VIRGO

    data, t_field_name, a_field_name, b_field_name, _ = load_data_run(ARGS, mode)

    model, model_type, correction_method, exposure_method, output_model, output_method, outlier_fraction \
        = setup_run(ARGS, mode, results_dir_path)

    fitter = ModelFitter(data=data,
                         t_field_name=t_field_name,
                         a_field_name=a_field_name,
                         b_field_name=b_field_name,
                         exposure_method=exposure_method,
                         outlier_fraction=outlier_fraction)
示例#2
0
def analysis_job(dataset: Dataset, model_type: str, output_model_type: str,
                 model_params: str, correction_method: str):

    try:
        # Enforce optional params
        # TODO: improve this code
        # TODO: this code sets constants that are used within model and passes constants that are passed via init
        # TODO: this way all relevant constants are taken into account
        num_inducing_points = Gpc.NUM_INDUCING_POINTS
        points_in_window = Gpc.POINTS_IN_WINDOW
        model_params_dict = eval(model_params)

        for key in model_params_dict:
            if key == "NUM_INDUCING_POINTS":
                num_inducing_points = model_params_dict[key]
            elif key == "POINTS_IN_WINDOW":
                points_in_window = model_params_dict[key]

            else:
                setattr(Gpc, key, model_params_dict[key])

        # Load pickle
        status.update_progress("Loading dataset", 10)
        pickle_location = dataset.pickle_location
        with open(pickle_location, "rb") as f:
            fitter: ModelFitter = pickle.load(f)

        # Get models
        model, output_model = get_models(model_type, output_model_type,
                                         num_inducing_points, points_in_window)

        # Run Fitter
        result = fitter(model, output_model,
                        CorrectionMethod(correction_method), Mode.VIRGO)

        # Create result folder
        with app.app_context():
            results_dir_path = create_results_dir(
                os.path.join(current_app.root_path, "static", "results"),
                f"{model_type}_{output_model_type}")

        # Store signals
        save_modeling_result(results_dir_path, result, model_type)
        result.out.params_out.svgp_inducing_points = None

        # Plot results
        status.update_progress("Plotting results", 90)
        visualizer = Visualizer()
        visualizer.set_figsize()
        plot_results(visualizer, result, results_dir_path,
                     f"{model_type}_{output_model_type}")

        # Store folder location to status
        status.set_folder(basename(results_dir_path))

        # Close all figures from matplotlib
        plt.close("all")

        # Finish job
        status.set_percentage(100)

    except Exception as e:
        print(e)

    finally:
        status.release()
if __name__ == "__main__":
    ignore_warnings()

    visualizer = Visualizer()
    visualizer.set_figsize()

    run_type = "comparison"
    ARGS = parse_arguments()
    if ARGS.mode == "gen":
        mode = Mode.GENERATOR
        run_type = "gen_" + run_type
    else:
        mode = Mode.VIRGO

    results_dir_path = create_results_dir(Const.RESULTS_DIR, run_type)
    create_logger(results_dir_path)

    data, t_field_name, a_field_name, b_field_name, ground_truth = load_data_run(
        ARGS, mode)

    results = []
    for model_type in [
            "exp", "exp_lin", "spline", "isotonic", "smooth_monotonic",
            "ensemble"
    ]:
        ARGS.model_type = model_type
        model, _, correction_method, exposure_method, output_model, output_method, outlier_fraction \
            = setup_run(ARGS, mode, results_dir_path)

        fitter = ModelFitter(data=data,
示例#4
0
    visualizer.plot_signals([(frq[frq <= frq_range], a_s[frq <= frq_range], "{}_spectrum".format(Const.A), False)],
                            results_dir_path, "{}_spectrum".format(Const.A), legend="upper right",
                            y_label=Const.SPECTRUM_UNIT)

    b_s, fs, frq = fft_spectrum(b_inter, sampling_period)
    frq_range = fs / 2

    visualizer.plot_signals([(frq[frq <= frq_range], b_s[frq <= frq_range], "{}_spectrum".format(Const.B), False)],
                            results_dir_path, "{}_spectrum".format(Const.B), legend="upper right",
                            y_label=Const.SPECTRUM_UNIT)


if __name__ == "__main__":
    ARGS = parse_arguments()
    results_dir_path = create_results_dir(Const.RESULTS_DIR, "data_analysis")
    create_logger(results_dir_path)

    visualizer = Visualizer()
    visualizer.set_figsize()

    # Load data
    data = None
    if ARGS.data_file == "premos":
        pass
    elif ARGS.data_file == "virgo_tsi":
        data = load_data(Const.DATA_DIR, Const.VIRGO_TSI_FILE, "virgo_tsi")
        plot_base_virgo_tsi_signals(data)
    elif ARGS.data_file == "spm":
        data = load_data(Const.DATA_DIR, Const.SPM_FILE, "spm")
        plot_base_spm_signals(data)