def test_nan(self):
     #checking for na values
     self.assertTrue(np.isnan(polyval(np.nan, np.nan)))
     self.assertTrue(np.isnan(polyval(2, np.nan)))
     self.assertTrue(
         np.isnan(polyval(self.a, np.nan))[0]
         and np.isnan(polyval(self.a, np.nan))[1])
    def test_full_coverage(self):
        self.assertEqual(polyval(5, 5), 5)
        self.assertEqual(polyval(2, self.a), 5)

        r1 = polyval([5, 4], self.a)
        self.assertEqual(r1[0], 11)
        self.assertEqual(r1[1], 9)

        r2 = polyval([[5, 5], [5, 5]], self.a)
        self.assertTrue(r2[0][0] == r2[0][1] == r2[1][0] == r2[1][1] == 11)

        # Path Coverage
        # loop none
        b = [1]
        r3 = polyval([5, 4], b)
        self.assertEqual(r3[0], 1)
        self.assertEqual(r3[1], 1)

        r4 = polyval([[5, 5], [5, 5]], b)
        self.assertTrue(r4[0][0] == r4[0][1] == r4[1][0] == r4[1][1] == 1)

        # loop once
        c = [1, 2]

        r5 = polyval([5, 4], c)
        self.assertEqual(r5[0], 11)
        self.assertEqual(r5[1], 9)

        r6 = polyval([[5, 5], [5, 5]], c)
        self.assertTrue(r6[0][0] == r6[0][1] == r6[1][0] == r6[1][1] == 11)

        # loop twice
        d = [1, 2, 3]

        r7 = polyval([5, 4], d)
        self.assertEqual(r7[0], 86)
        self.assertEqual(r7[1], 57)

        r8 = polyval([[5, 5], [5, 5]], d)
        self.assertTrue(r8[0][0] == r8[0][1] == r8[1][0] == r8[1][1] == 86)
def bernstein_error_partition_cuda(
    nn,
    f,
    degree_bound,
    input_box,
    output_index,
    activation,
    filename,
):
    global step
    step += 1
    import error_bound
    eps = error_bound.error_bound
    input_dim = len(degree_bound)
    lips, network_output_range = lipschitz(nn, input_box, output_index,
                                           activation)

    distance_estimate = 0
    for idxState in range(input_dim):
        diff = np.diff(input_box[idxState])[0]
        if diff > distance_estimate:
            distance_estimate = diff

    LD_estimate = lips * distance_estimate * np.sqrt(input_dim)
    num_partition = int(np.ceil(LD_estimate // eps + 1))

    partition = [num_partition] * input_dim

    print('---------------' + filename + '-------------------')
    print('step: {}'.format(step))
    print('degree bound: {}'.format(degree_bound))
    print('number of partition: {}'.format(num_partition))
    print('Lipschitz constant: {}'.format(lips))

    all_comb_lists = sample_points_list(partition, input_dim)

    if isinstance(lips, np.ndarray):
        lips = lips[0]

    sample_times = (num_partition + 1)**input_dim
    large_sample_times = False
    if sample_times < 1e7:
        all_sample_points = np.zeros(
            ((num_partition + 1)**input_dim, input_dim), dtype=np.float32)
        all_shift_points = np.zeros(
            ((num_partition + 1)**input_dim, input_dim), dtype=np.float32)
    else:
        large_sample_times = True
        os.system('rm ./cach.hdf5')
        hdf5_store = h5py.File('./cach.hdf5', 'a')
        all_sample_points = hdf5_store.create_dataset(
            "all_sample_points", (sample_times, input_dim), compression='gzip')
        all_shift_points = hdf5_store.create_dataset("all_shift_points",
                                                     (sample_times, input_dim),
                                                     compression='gzip')

    partition_box = np.zeros(input_dim, dtype=np.float64)
    for j in range(input_dim):
        alpha_j = np.float64(input_box[j][0])
        beta_j = np.float64(input_box[j][1])
        partition_box[j] = (beta_j - alpha_j) / num_partition

    for idxState in range(input_dim):
        alpha_j = np.float64(input_box[idxState][0])
        beta_j = np.float64(input_box[idxState][1])
        all_sample_points[:, idxState] = (
            (beta_j - alpha_j) *
            (points_list(all_comb_lists, idxState) / num_partition) + alpha_j)
        all_shift_points = point_shift_all(all_sample_points, input_box,
                                           large_sample_times,
                                           all_shift_points)
    if large_sample_times:
        hdf5_store.close()

    order_list, coeffs_list = nn_poly_approx_bernstein_cuda(
        f, degree_bound, input_box, output_index)
    poly = polyval(order_list, degree_bound, coeffs_list, 'test')

    if large_sample_times:
        with h5py.File('./cach.hdf5', 'r') as hdf5_store:
            all_sample_points = hdf5_store['all_sample_points'][:]
            all_shift_points = hdf5_store['all_shift_points'][:]

    if filename[:4] == 'nn_5' or filename[:4] == 'nn_2':
        batch_size = 1e5
    else:
        batch_size = 1e7
    batch_num = math.ceil(all_sample_points.shape[0] / batch_size)
    batch_idx = np.arange(1, batch_num) * batch_size
    batch_idx = batch_idx.astype(int)
    all_sample_points_batches = np.split(all_sample_points, batch_idx, axis=0)
    all_shift_points_batches = np.split(all_shift_points, batch_idx, axis=0)

    poly_results = np.zeros((all_sample_points.shape[0], 1))
    nn_results = np.zeros((all_sample_points.shape[0], 1))

    with U.make_session() as sess:
        sess.run(tf.global_variables_initializer())
        batch_pointer = 0
        print('number of sampling points: {}'.format(
            all_sample_points.shape[0]))
        for sample_points, shift_points in zip(all_sample_points_batches,
                                               all_shift_points_batches):
            batch_range = range(batch_pointer,
                                batch_pointer + sample_points.shape[0])
            print('batch_range: {}'.format(batch_range))
            poly_results[batch_range, :] = poly(sess, shift_points)
            nn_results[batch_range, :] = nn(sess, sample_points)
            batch_pointer += sample_points.shape[0]

    sample_error = np.max(np.absolute(poly_results[:, 0] - nn_results[:, 0]))
    error = sample_error + lips * LA.norm(partition_box)
    print('bp to nn error: {}'.format(error))

    return error
def bernstein_error_partition_cuda(f_details, f, d, box, output_index, activation, filename, eps=1e-2):
    if filename == 'nn_12_relu':
        eps = 1e-2
    elif filename == 'nn_12_sigmoid':
        eps = 1e-2
    elif filename == 'nn_12_tanh':
        eps = 1e-2
    elif filename == 'nn_12_relu_tanh':
        eps = 1e-3
    elif filename == 'nn_13_relu':
        eps = 1e-3
    elif filename == 'nn_13_sigmoid':
        eps = 5e-4
    elif filename == 'nn_13_tanh':
        eps = 1e-2
    elif filename == 'nn_13_relu_tanh':
        eps = 1e-2
    elif filename == 'nn_13_relu_tanh_1':
        eps = 1e-2
    elif filename == 'nn_13_relu_tanh_100':
        eps = 1e-2
    elif filename == 'nn_13_relu_tanh_origin':
        eps = 1e-2
    elif filename == 'nn_14_relu':
        eps = 1e-2
    elif filename == 'nn_14_sigmoid':
        eps = 5e-3
    elif filename == 'nn_14_tanh':
        eps = 1e-2
    elif filename == 'nn_14_relu_sigmoid':
        eps = 5e-3
    elif filename == 'nn_tora_relu_retrained':
        eps = 1e-2
    elif filename == 'nn_tora_tanh':
        eps = 2e-2
    elif filename == 'nn_tora_relu_tanh':
        eps = 1e-2
    elif filename == 'nn_tora_sigmoid':
        eps = 1e-2
    elif filename == 'nn_16_relu':
        eps = 5e-3
    elif filename == 'nn_16_sigmoid':
        eps = 1e-2
    elif filename == 'nn_16_tanh':
        eps = 1e-2
    elif filename == 'nn_16_relu_tanh':
        eps = 1e-2
    elif filename == 'nn_18_relu':
        eps = 4e-3
    elif filename == 'nn_18_relu_tanh':
        eps = 4e-3
    elif filename == 'nn_18_sigmoid':
        eps = 4e-3
    elif filename == 'nn_18_tanh_new':
        eps = 4e-3

    m = len(d)
    lips, network_output_range = lipschitz(f_details, box, output_index, activation)

    distance_estimate = 0
    for j in range(m):
        diff = np.diff(box[j])[0]
        if diff > distance_estimate:
            distance_estimate = diff

    LD_estimate = lips * distance_estimate * np.sqrt(m)
    num_partition = int(np.ceil(LD_estimate // eps + 1))

    partition = [num_partition]*m
    all_comb_lists = degree_comb_lists(partition, m)

    if isinstance(lips, np.ndarray):
        lips = lips[0]

    all_sample_points = np.zeros((len(all_comb_lists),m), dtype=np.float64)
    all_shift_points = np.zeros((len(all_comb_lists),m), dtype=np.float64)
    partition_box = np.zeros(m, dtype=np.float64)
    for j in range(m):
        alpha_j = np.float64(box[j][0])
        beta_j = np.float64(box[j][1])
        partition_box[j] = (beta_j - alpha_j) / num_partition

    all_comb_lists = np.array(all_comb_lists)
    for idxState in range(m):
        alpha_j = np.float64(box[idxState][0])
        beta_j = np.float64(box[idxState][0])
        all_sample_points[:, idxState] = (
            (beta_j - alpha_j) * (all_comb_lists[:, idxState]/num_partition)
            + alpha_j
        )
        all_shift_points = point_shift_all(all_sample_points, box)

    degree_list, coef_list = nn_poly_approx_bernstein_cuda(f, d, box, output_index)
    poly = polyval(degree_list, d, coef_list, 'test')
    with U.make_session() as sess:
        sess.run(tf.global_variables_initializer())
        poly_results = poly(sess, all_shift_points)
        nn_results = f_details(sess, all_sample_points)

    # nn_results = np.zeros(len(all_sample_points), dtype=np.float64)
    # for index in range(all_sample_points.shape[0]):
    #     point = all_sample_points[index,:]
    #     nn_results[index] = f(point)[output_index]


    sample_error = np.max(np.absolute(poly_results[:,0] - nn_results[:,0]))
    # max_index = np.argmax(np.absolute(poly_results - nn_results))
    # print(max_index)
    # print(all_sample_points[max_index, :])
    # print(nn_results[max_index])
    # print(all_shift_points[max_index, :])
    # print(poly_results[max_index])
    error = sample_error + lips * LA.norm(partition_box)

    return error