コード例 #1
0
    def test_deepiv_models(self):
        n = 2000
        s1 = 2
        s2 = 2
        e = np.random.uniform(low=-0.5, high=0.5, size=(n, 1))
        z = np.random.uniform(size=(n, 1))
        x = np.random.uniform(size=(n, 1)) + e
        p = x + z * e + np.random.uniform(size=(n, 1))
        y = p * x + e

        losses = []
        marg_effs = []

        z_fresh = np.random.uniform(size=(n, 1))
        e_fresh = np.random.uniform(low=-0.5, high=0.5, size=(n, 1))
        x_fresh = np.random.uniform(size=(n, 1)) + e_fresh
        p_fresh = x_fresh + z_fresh * e_fresh + np.random.uniform(size=(n, 1))
        y_fresh = p_fresh * x_fresh + e_fresh

        for (n1, u, n2) in [(2, False, None), (2, True, None), (1, False, 1)]:
            treatment_model = keras.Sequential([
                keras.layers.Dense(10, activation='relu', input_shape=(2, )),
                keras.layers.Dense(10, activation='relu'),
                keras.layers.Dense(10, activation='relu')
            ])

            hmodel = keras.Sequential([
                keras.layers.Dense(10, activation='relu', input_shape=(2, )),
                keras.layers.Dense(10, activation='relu'),
                keras.layers.Dense(1)
            ])

            deepIv = DeepIVEstimator(
                10,
                lambda z, x: treatment_model(keras.layers.concatenate([z, x])),
                lambda t, x: hmodel(keras.layers.concatenate([t, x])),
                n_samples=n1,
                use_upper_bound_loss=u,
                n_gradient_samples=n2,
                s1=s1,
                s2=s2)
            deepIv.fit(y, p, x, z)

            losses.append(
                np.mean(np.square(y_fresh - deepIv.predict(p_fresh, x_fresh))))
            marg_effs.append(
                deepIv.marginal_effect(np.array([[0.3], [0.5], [0.7]]),
                                       np.array([[0.4], [0.6], [0.2]])))
        print("losses: {}".format(losses))
        print("marg_effs: {}".format(marg_effs))
コード例 #2
0
    def test_deepiv_arbitrary_covariance(self):
        d = 5
        n = 5000
        # to generate a random symmetric positive semidefinite covariance matrix, we can use A*A^T
        A1 = np.random.normal(size=(d, d))
        cov1 = np.matmul(A1, np.transpose(A1))
        # convex combinations of semidefinite covariance matrices are themselves semidefinite
        A2 = np.random.normal(size=(d, d))
        cov2 = np.matmul(A2, np.transpose(A2))
        m1 = np.random.normal(size=(d,))
        m2 = np.random.normal(size=(d,))
        x = np.random.uniform(size=(n, 1))
        z = np.random.uniform(size=(n, 1))
        alpha = (x * x + z * z) / 2  # in range [0,1]
        t = np.array([np.random.multivariate_normal(m1 + alpha[i] * (m2 - m1),
                                                    cov1 + alpha[i] * (cov2 - cov1)) for i in range(n)])
        y = np.expand_dims(np.einsum('nx,nx->n', t, t), -1) + x
        results = []
        s = 6
        for (n1, u, n2) in [(2, False, None), (2, True, None), (1, False, 1)]:
            treatment_model = keras.Sequential([keras.layers.Dense(90, activation='relu', input_shape=(2,)),
                                                keras.layers.Dropout(0.2),
                                                keras.layers.Dense(60, activation='relu'),
                                                keras.layers.Dropout(0.2),
                                                keras.layers.Dense(30, activation='relu')])

            hmodel = keras.Sequential([keras.layers.Dense(90, activation='relu', input_shape=(d + 1,)),
                                       keras.layers.Dropout(0.2),
                                       keras.layers.Dense(60, activation='relu'),
                                       keras.layers.Dropout(0.2),
                                       keras.layers.Dense(30, activation='relu'),
                                       keras.layers.Dropout(0.2),
                                       keras.layers.Dense(1)])

            deepIv = DeepIVEstimator(s,
                                     lambda z, x: treatment_model(keras.layers.concatenate([z, x])),
                                     lambda t, x: hmodel(keras.layers.concatenate([t, x])),
                                     n_samples=n1, use_upper_bound_loss=u, n_gradient_samples=n2,
                                     first_stage_options={'epochs': 20}, second_stage_options={'epochs': 20})
            deepIv.fit(y[:n // 2], t[:n // 2], x[:n // 2], z[:n // 2])

            results.append({'s': s, 'n1': n1, 'u': u, 'n2': n2,
                            'loss': np.mean(np.square(y[n // 2:] - deepIv.predict(t[n // 2:], x[n // 2:]))),
                            'marg': deepIv.marginal_effect(np.array([[0.5] * d]), np.array([[1.0]]))})
        print(results)