def test_neural_entropy_uniform(num_experiments, scramble=True):
    # Uniform filling of all the bins
    transfer_entropy = []
    norm_entropy = []
    data_per_bin = 1
    num_data_points = int(.2 * BINS * BINS)
    print('num_data_points:{}'.format(num_data_points))
    brain_output = np.zeros((num_data_points, 2))
    row = 0
    counter = 0
    for i in range(BINS):
        for j in range(BINS):
            for _ in range(data_per_bin):
                counter += 1
                if counter >= num_data_points:
                    break
                brain_output[row, :] = [i / 100 + 0.0001, j / 100 + 0.0001]
                row += 1
    if scramble:
        rs = RandomState(1)
        for _ in range(num_experiments):
            rs.shuffle(brain_output)
            norm_entropy.append(get_shannon_entropy_2d(brain_output))
            transfer_entropy.append(get_transfer_entropy(brain_output))
    else:
        for _ in range(num_experiments):
            norm_entropy.append(get_shannon_entropy_2d(brain_output))
            transfer_entropy.append(get_transfer_entropy(brain_output))

    print("Transfer Entropy on uniform bin data: {}".format(transfer_entropy))
    print("Shannon Entropy on uniform bin data: {}".format(norm_entropy))
    # plt.plot(brain_output)
    # plt.show()
    return norm_entropy, transfer_entropy
def test_coupled_oscillators(num_experiments):
    from dyadic_interaction.dynamical_systems import spring_mass_system
    transfer_entropy = []
    norm_entropy = []
    rs = RandomState(0)
    for _ in range(num_experiments):
        spring_data = spring_mass_system(masses=rs.uniform(1.0, 10.0, 2),
                                         constants=rs.uniform(1.0, 50.0, 2),
                                         lengths=rs.uniform(0.1, 5.0, 2))
        pos = np.column_stack((spring_data[:, 0], spring_data[:, 2]))
        # transfer_entropy, local_te = get_transfer_entropy(pos, local=True)
        norm_entropy.append(
            get_shannon_entropy_2d(pos, min_v=pos.min(), max_v=pos.max()))
        transfer_entropy.append(
            get_transfer_entropy(pos, min_v=pos.min(), max_v=pos.max()))

        print("Transfer Entropy of spring positions: {}".format(
            transfer_entropy))
        print("Shannon Entropy of spring positions: {}".format(norm_entropy))
        # plt.plot(pos)
        # plt.show()
        # vel = np.column_stack((spring_data[:, 1], spring_data[:, 3]))
        # transfer_entropy = get_transfer_entropy(vel, log=True)
        # norm_entropy = get_shannon_entropy_2d(vel)
        # print("Transfer Entropy of spring velocities: {}".format(transfer_entropy))
        # print("Shannon Entropy of spring velocities: {}".format(norm_entropy))
        # plt.plot(vel)
        # plt.show()
        # plt.plot(local_te[0])
        # plt.plot(local_te[1])
        # plt.show()
    return norm_entropy, transfer_entropy
def test_neural_entropy_random(num_experiments,
                               num_data_points,
                               distribution='uniform'):
    """
    Simulate uncorrelated random arrays.
    :param num_experiments: how many simulations to run
    :param num_data_points: how many data points per time series
    :param distribution: normal or uniform
    """
    transfer_entropy = []
    norm_entropy = []
    rs = RandomState(0)
    if distribution == 'normal':
        brain_output = rs.normal(0, 1, (num_experiments, num_data_points, 2))
        for i in range(num_experiments):
            norm_entropy.append(
                get_shannon_entropy_2d(brain_output[i, :, :],
                                       min_v=-3.,
                                       max_v=3.))
            transfer_entropy.append(
                get_transfer_entropy(brain_output[i, :, :],
                                     min_v=-3.,
                                     max_v=3.))
    else:
        brain_output = rs.rand(num_experiments, num_data_points, 2)
        for i in range(num_experiments):
            norm_entropy.append(get_shannon_entropy_2d(brain_output[i, :, :]))
            transfer_entropy.append(get_transfer_entropy(
                brain_output[i, :, :]))
    print("Simulated {} experiments of {} data points".format(
        num_experiments, num_data_points))
    print("Transfer Entropy on random {} data: {}".format(
        distribution, transfer_entropy))
    print("Shannon Entropy on random {} data: {}".format(
        distribution, norm_entropy))
    # plt.plot(brain_output)
    # plt.show()
    return norm_entropy, transfer_entropy
def test_neural_entropy_single(num_experiments, num_data_points):
    # Constant arrays of the same value (single bin)
    transfer_entropy = []
    norm_entropy = []
    brain_output = np.ones((num_data_points, 2))
    for i in range(num_experiments):
        rs = RandomState(1)
        brain_output = add_noise(brain_output, rs, noise_level=1e-8)
        norm_entropy.append(get_shannon_entropy_2d(brain_output))
        transfer_entropy.append(get_transfer_entropy(brain_output))
    print("Transfer Entropy on 1D constant data: {}".format(transfer_entropy))
    print("Shannon Entropy on 1D constant data: {}".format(norm_entropy))
    # plt.plot(brain_output)
    # plt.show()
    return norm_entropy, transfer_entropy
def test_neural_entropy_constant(num_experiments, num_data_points):
    # Correlated and constant arrays
    transfer_entropy = []
    norm_entropy = []
    source = np.ones(num_data_points)
    destination = np.ones(num_data_points) / 2.
    brain_output = np.column_stack((source, destination))
    for _ in range(num_experiments):
        rs = RandomState(1)
        brain_output = add_noise(brain_output, rs,
                                 noise_level=1e-8)  # does rs keep going?
        norm_entropy.append(get_shannon_entropy_2d(brain_output))
        transfer_entropy.append(get_transfer_entropy(brain_output))
    print("Transfer Entropy on 2D constant data: {}".format(transfer_entropy))
    print("Shannon Entropy on 2D constant data: {}".format(norm_entropy))
    # plt.plot(brain_output)
    # plt.show()
    return norm_entropy, transfer_entropy
def test_neural_entropy_correlated(num_experiments,
                                   num_data_points,
                                   cov=0.99,
                                   delay=1):
    # One series random, the other correlated with the first at some delay
    transfer_entropy = []
    norm_entropy = []
    corr_expected = cov / (1 * math.sqrt(cov**2 + (1 - cov)**2))
    entropy_expected = -0.5 * math.log(1 - corr_expected**2)
    rs = RandomState(0)
    for _ in range(num_experiments):
        brain_output = generate_correlated_data(num_data_points, cov, delay,
                                                rs)
        norm_entropy.append(
            get_shannon_entropy_2d(brain_output, min_v=-3., max_v=3.))
        transfer_entropy.append(
            get_transfer_entropy(brain_output,
                                 delay,
                                 log=True,
                                 min_v=-3.,
                                 max_v=3.))

    # transfer_entropy, local_te = get_transfer_entropy(brain_output, delay, local=True)
    # local_te = np.array(local_te)
    # plt.plot(brain_output)
    # plt.show()
    # plt.plot(local_te[0])
    # plt.plot(local_te[1])
    # plt.show()

    print(
        "Transfer Entropy on correlated data ({} data points, covariance {}, delay {}): {}\n"
        "Expected TE: {}".format(num_data_points, cov, delay, transfer_entropy,
                                 entropy_expected))
    print("Shannon Entropy on correlated data: {}".format(norm_entropy))
    return norm_entropy, transfer_entropy