eng = matlab.engine.start_matlab()
eng.addpath(CAUSALITY_ROOT + 'comparison_methods/Mooij16/cep')
eng.startup(nargout=0)
eng.local_config(nargout=0)

methodpars = eng.struct()
methodpars['FITC'] = 0
methodpars['minimize'] = 'minimize_lbfgsb'
methodpars['evaluation'] = 'pHSIC'

accuracy = 0
correct_decisions = 0
undecided = 0

for i in range(FIRST_ID - 1, LAST_ID):
    (x, y), true_direction = get_pair(i, BENCHMARK)
    if true_direction == -1:
        continue

    x_ = matlab.double(x.tolist())
    x_T = eng.transpose(x_)
    y_ = matlab.double(y.tolist())
    y_T = eng.transpose(y_)

    result = eng.cep_anm(x_T, y_T, methodpars)
    predicted_direction = result['decision']

    if predicted_direction == true_direction:
        correct_decisions += 1
    if predicted_direction == 0:
        undecided += 1
예제 #2
0

# prediction_file = './benchmark_predictions/{}_{}.txt'.format(BENCHMARK, NAME)
# if os.path.isfile(prediction_file):
#     c = 0
#     while os.path.isfile(prediction_file):
#         c += 1
#         prediction_file = './benchmark_predictions/{}_{}_{}.txt'.format(
#                 BENCHMARK, NAME, c)

accuracy = 0
sum_of_weights = 0
weighted_correct = 0

for i in range(FIRST_ID-1, LAST_ID):
    (x, y), true_direction, weight = get_pair(i, BENCHMARK)
    if true_direction == 0:
        continue

    df = pd.DataFrame(index=[0], columns=['SampleID', 'A', 'B'], dtype='object')
    df.SampleID.iloc[0] = 'pair1'
    df.A.iloc[0] = x
    df.B.iloc[0] = y
    model = cgnn.GNN(backend="TensorFlow")
    predictions = model.predict_dataset(df, printout='_printout.csv')
    predictions = pd.DataFrame(predictions, columns=["Predictions"])
    if predictions.iloc[0, 0] > 0:
        predicted_direction = 1
    else:
        predicted_direction = -1
    if predicted_direction == true_direction:
prediction_file = './benchmark_predictions/{}_{}.txt'.format(BENCHMARK, NAME)
if os.path.isfile(prediction_file):
    c = 0
    while os.path.isfile(prediction_file):
        c += 1
        prediction_file = './benchmark_predictions/{}_{}_{}.txt'.format(
                BENCHMARK, NAME, c)

np.random.seed(1)

accuracy = 0
sum_of_weights = 0
weighted_correct = 0

for i in range(FIRST_ID-1, LAST_ID):
    (x, y), true_direction, weight = get_pair(
                i, BENCHMARK, subsample_size=SUBSAMPLE)
    if true_direction == 0:
        continue

    scaler = MinMaxScaler(scale)
    x, y = scaler.fit_transform(np.array((x, y)).T).T

    minimizer = nifty5.RelaxedNewton(controller=nifty5.GradientNormController(
            tol_rel_gradnorm=TOL_REL_GRADNORM,
            iteration_limit=ITERATION_LIMIT,
            convergence_level=5,
            ))

    bcm = bayesian_causal_model_nifty.cause_model_shallow.CausalModelShallow(
        N_bins=N_BINS,
        noise_var=NOISE_VAR,
예제 #4
0
            "bcs_10samples",
            "tcep",
            "tcep_20samples",
    ]:

        fig = plt.figure(figsize=(16, 16))
        if benchmark.startswith("bcs"):
            subplots = fig.subplots(10, 10)
            num_plots = 100
        elif benchmark.startswith("tcep"):
            subplots = fig.subplots(11, 10)
            num_plots = 108

        idx = -1
        for i in range(num_plots):
            (x, y), true_direction, w = get_pair(i, benchmark)
            if true_direction == -1:
                c = color2
            elif true_direction == +1:
                c = color1
            else:
                continue
            idx += 1
            ax = subplots[idx // 10, idx % 10]
            ax.scatter(x, y, s=2, c=c)
            ax.tick_params(
                axis="both",
                which="both",
                bottom=False,
                top=False,
                left=False,
예제 #5
0
def main():
    parser = BCMParser()
    args = get_args(parser)
    verbosity = args.verbosity
    benchmark = args.benchmark

    print(
        f"performing {args.benchmark} benchmark for ids {args.first_id} to {args.last_id},\n"
        f"with N_bins: {args.n_bins},\n"
        f"noise variance: {args.noise_variance}\n"
        f"power spectrum beta: {args.power_spectrum_beta_str}\n"
        f"power spectrum f: {args.power_spectrum_f_str}\n"
        f"rho: {args.rho}\n"
        f"scale_max: {args.scale_max}\n"
        f"storing results with suffix {args.name}"
    )

    power_spectrum_beta = lambda q: eval(args.power_spectrum_beta_str)
    power_spectrum_f = lambda q: eval(args.power_spectrum_f_str)
    scale = (0, args.scale_max)

    prediction_file = "./benchmark_predictions/{}_{}.txt".format(benchmark, args.name)
    if os.path.isfile(prediction_file):
        c = 0
        while os.path.isfile(prediction_file):
            c += 1
            prediction_file = "./benchmark_predictions/{}_{}_{}.txt".format(
                benchmark, args.name, c
            )

    sum_of_weights = 0
    weighted_correct = 0

    for rep in range(args.repetitions):
        np.random.seed(rep)

        for i in range(args.first_id - 1, args.last_id):
            (x, y), true_direction, weight = get_pair(
                i, benchmark, subsample_size=args.subsample
            )
            if true_direction == 0:
                continue

            scaler = MinMaxScaler(scale)
            x, y = scaler.fit_transform(np.array((x, y)).T).T

            bcm = bayesian_causal_model.cause_model_shallow.CausalModelShallow(
                N_bins=args.n_bins,
                noise_var=args.noise_variance,
                rho=args.rho,
                power_spectrum_beta=power_spectrum_beta,
                power_spectrum_f=power_spectrum_f,
            )

            bcm.set_data(x, y)

            H1 = bcm.get_evidence(direction=1, verbosity=verbosity - 1)
            H2 = bcm.get_evidence(direction=-1, verbosity=verbosity - 1)
            predicted_direction = 1 if int(H1 < H2) else -1

            if predicted_direction == true_direction:
                fore = colorama.Fore.GREEN
                weighted_correct += weight
            else:
                fore = colorama.Fore.RED
            sum_of_weights += weight
            accuracy = weighted_correct / sum_of_weights

            if verbosity > 0:
                print(
                    "dataset {}, {} true direction: {}, predicted direction {}\n"
                    "H1: {:.2e},\n H2: {:.2e},\n{}"
                    "accuracy so far: {:.2f}".format(
                        i,
                        fore,
                        true_direction,
                        predicted_direction,
                        H1,
                        H2,
                        colorama.Style.RESET_ALL,
                        accuracy,
                    )
                )

            with open(prediction_file, "a") as f:
                f.write("{} {} {} {}\n".format(i + 1, predicted_direction, H1, H2))

    print("accuracy: {:.2f}".format(accuracy))

    benchmark_information = {
        "benchmark": benchmark,
        "n_bins": args.n_bins,
        "noise_var": args.noise_var,
        "rho": args.rho,
        "power_spectrum_beta": args.power_spectrum_beta_str,
        "power_spectrum_f": args.power_spectrum_f_str,
        "accuracy": accuracy,
        "prediction_file": prediction_file,
    }

    with open("benchmark_predictions/benchmarks_meta.txt", "a") as f:
        f.write(str(benchmark_information) + "\n")