Example #1
0
    def test_FBeq(self):
        item_order = np.array([0, 1, 2])
        click_vector = np.array([1, 0, 0])

        vars_dic = {
            'phi_A': np.array([0.5, 0.4, 0.3]),
            'phi_S': np.array([0.9, 0.85, 0.9]),
            'gamma': np.array([0.7, 0.7, 0.7])
        }

        H, zeta = GCM._compute_IO_HMM_est(click_vector, vars_dic, item_order,
                                          self._model_def_czm, 0)

        with open("H_tensor.pl", "rb") as f:
            H_expected = pl.load(f)
        np.testing.assert_allclose(H, H_expected)

        with open("zeta_tensor.pl", "rb") as f:
            zeta_expected = pl.load(f)
        np.testing.assert_allclose(zeta, zeta_expected)
Example #2
0
    def test_ubm_transmat_creation(self):
        # Construct the gamma matrix:
        item_order = [0, 1, 2]
        list_size = 3
        no_states = (list_size + 1) * 4 + 1

        np.random.seed(1992)
        A = np.triu(
            np.random.gamma(1, 1, (list_size + 1)**2).reshape(
                list_size + 1, list_size + 1)) / 10

        # Positive matrix for gamma:
        gamma_diag = np.vstack((np.hstack(
            (np.kron(A, np.tile(np.array([0, 1, 0, 0]), (4, 1))),
             np.zeros((no_states - 1, 1)))), np.zeros(no_states)))

        gamma_upper_tr = np.triu(np.tile(
            np.hstack((np.zeros(4), np.tile(A[3], list_size), np.zeros(1))),
            (no_states, 1)),
                                 k=4)

        gamma_pos = gamma_diag + gamma_upper_tr

        # Negative matrix for gamma:
        gamma_neg = np.vstack((np.hstack(
            (np.kron(1 - A, np.tile(np.array([1, 0, 1, 0]), (4, 1))),
             np.zeros((no_states - 1, 1)))), np.zeros(no_states)))

        vars_dic = {
            'phi_A': np.array([0.5, 0.4, 0.3]),
            'gamma': np.tile((gamma_pos + gamma_neg).flatten(), 3)
        }

        trans_matrices = GCM._get_trans_mat(self._model_def_ubm,
                                            vars_dic,
                                            item_order,
                                            i=0)

        with open("ubm_trans.pl", "rb") as f:
            trans_matrices_expected = pl.load(f)
        np.testing.assert_allclose(trans_matrices, trans_matrices_expected)
Example #3
0
        .pivot(index='session', columns='item_order', values='click') \
        .to_numpy()

    item_pos_mat = click_data.loc[:, ['session', 'item_order', 'item']] \
        .pivot(index='session', columns='item_order', values='item') \
        .to_numpy()

    # Ensure that the order is correct
    item_feature_mat_A = pd.get_dummies(
        click_data['item'].sort_values().unique())

    model_phi_A = Sequential()
    model_phi_A.add(
        Dense(1,
              input_dim=item_feature_mat_A.shape[1],
              activation='sigmoid',
              use_bias=False))
    model_phi_A.compile(loss=GCM.pos_log_loss, optimizer=RMSprop())

    var_dic = {'phi_A': item_feature_mat_A}
    var_models = {'phi_A': model_phi_A}

    res = GCM.runEM(click_mat,
                    var_dic,
                    var_models,
                    item_pos_mat,
                    model_def,
                    verbose=True,
                    n_jobs=1)

    print(res[2])
Example #4
0
    model_gamma = Sequential()
    # First compute the kernel
    model_gamma.add(
        Dense(var_dic['gamma'].shape[1],
              use_bias=False,
              activation=alt_softmax,
              kernel_initializer=gamma_initializer,
              kernel_constraint=LowerDiagWeight()))
    model_gamma.compile(loss=GCM.pos_log_loss, optimizer=RMSprop())

    model_tau = Sequential()
    model_tau.add(
        Dense(var_dic['tau'].shape[1],
              input_dim=var_dic['tau'].shape[1],
              activation=None,
              use_bias=False,
              kernel_initializer=Identity(),
              trainable=False))
    model_tau.compile('rmsprop', 'binary_crossentropy'
                      )  # No trainable weights, so doesn't really matter

    var_models = {'phi_A': model_phi_A, 'gamma': model_gamma, 'tau': model_tau}

    res = GCM.runEM(click_mat.to_numpy(),
                    var_dic,
                    var_models,
                    item_pos_mat.to_numpy(),
                    model_def,
                    verbose=True)

    print(res[2])
Example #5
0
              use_bias=False))
    model_phi_A.compile(loss=GCM.pos_log_loss, optimizer=Adagrad())

    model_gamma = Sequential()
    # First compute the kernel
    model_gamma.add(
        Dense(1,
              input_dim=item_feature_mat_gamma.shape[1],
              activation='sigmoid',
              use_bias=False))
    model_gamma.add(RepeatVector(no_states**2))
    model_gamma.compile(loss=GCM.pos_log_loss, optimizer=Adagrad())

    var_dic = {'phi_A': item_feature_mat_A, 'gamma': item_feature_mat_gamma}
    var_models = {'phi_A': model_phi_A, 'gamma': model_gamma}

    res = GCM.runEM(click_mat,
                    var_dic,
                    var_models,
                    item_pos_mat,
                    model_def,
                    verbose=True,
                    earlystop_patience=10,
                    n_jobs=1)

    pl.dump(res[1], open("./data/small_example/state_prob.pl", "wb"))
    pl.dump(res[2], open("./data/small_example/convergence.pl", "wb"))
    pl.dump(res[3], open("./data/small_example/click_probs.pl", "wb"))

    print(res[2])