示例#1
0
    def __init__(self,
                 seqs,
                 adj,
                 nodes_features,
                 epochs,
                 key,
                 use_gcn,
                 batch_size,
                 use_gru=True):
        self.epochs = epochs
        self.seqs = seqs.astype('float32')
        self.seqs_noised = seqs.copy().astype('float32')
        self.max_s = seqs[key].max()
        self.seqs_noised[key] = np.random.normal(
            self.max_s / 2.0, self.max_s / 10.0,
            size=(seqs.shape[0])).astype('float32')
        self.key = key

        self.gen_optimizer = SGD(lr, adam_beta_1)
        self.desc_optimizer = SGD(lr, adam_beta_1)

        self.adj = normalized_laplacian(adj.astype('float32'))
        self.adj_expanded = tf.expand_dims(normalized_laplacian(
            adj.astype('float32')),
                                           axis=0)
        self.nodes_features = nodes_features.astype('float32')
        self.nodes_f_expanded = tf.expand_dims(
            nodes_features.astype('float32'), axis=0)
        self.generator = model.make_generator('generator', batch_size,
                                              self.adj, self.nodes_features,
                                              use_gcn, use_gru)
        self.discriminator = model.make_discriminator('discriminator',
                                                      batch_size, self.adj,
                                                      self.nodes_features,
                                                      use_gcn, use_gru)
        self.d_loss_fn, self.g_loss_fn = losses.get_wasserstein_losses_fn()
        self.wsst_hist = []
        self.cos_simi = []
        self.var_hist = []
        self.rmse_hist = []
        self.mae_hist = []
        self.r2_hist = []
        self.g_loss_hist = []
        self.d_loss_hist = []
示例#2
0
    def __call__(self, graph):
        if "a" not in graph:
            raise ValueError("The graph must have an adjacency matrix")
        assert (
            self.k < graph.n_nodes
        ), f"k = {self.k} must be smaller than graph.n_nodes = {graph.n_nodes}"

        l = normalized_laplacian(graph.a)
        _, eigvec = eigsh(l, k=self.k, which="SM")

        if "x" not in graph:
            graph.x = eigvec
        else:
            graph.x = np.concatenate((graph.x, eigvec), axis=-1)

        return graph
示例#3
0
文件: estimator.py 项目: cgarciae/nfl
def process_data(df, params, toy):

    if toy:
        plays = cz.take(10, df.groupby("PlayId"))
    else:
        plays = df.groupby("PlayId")

    plays = list(plays)

    Xs = []
    As = []
    Es = []

    for play_id, df_play in tqdm(plays, desc="Processing Plays"):

        pos = df_play[["X", "Y"]].to_numpy()
        A = np.sum((pos[:, np.newaxis, :] - pos[np.newaxis, :, :])**2, axis=-1)
        A = 1.0 / (1.0 + A) * (1 - np.eye(22))
        As.append(normalized_laplacian(A))

        E = np.expand_dims(A, axis=-1)
        Es.append(E)

        # features
        features = dict(
            X=df_play["X"].to_numpy(),
            Y=df_play["Y"].to_numpy(),
            S=df_play["S"].to_numpy(),
            A=df_play["A"].to_numpy(),
            Orientation=df_play["Orientation"].to_numpy(),
            Dir=df_play["Dir"].to_numpy(),
            Team=df_play["Team"].to_numpy(),
            NflId=df_play["NflId"].to_numpy(),
            is_rusher=df_play["is_rusher"].to_numpy(),
            Yards=df_play["Yards"].to_numpy(),
        )
        Xs.append(features)

    Xs = {
        feature: np.stack([x[feature] for x in Xs], axis=0)
        for feature in Xs[0]
    }
    Es = np.stack(Es, axis=0).astype(np.float32)
    As = np.stack(As, axis=0).astype(np.float32)

    return Xs, As, Es
示例#4
0
 def preprocess(a):
     a = normalized_laplacian(a)
     a = rescale_laplacian(a)
     return a
示例#5
0
# Parameters
l2_reg = 5e-4         # Regularization rate for l2
learning_rate = 1e-3  # Learning rate for SGD
batch_size = 32       # Batch size
epochs = 20000        # Number of training epochs
es_patience = 200     # Patience fot early stopping

# Load data
X_train, y_train, X_val, y_val, X_test, y_test, adj = mnist.load_data()
X_train, X_val, X_test = X_train[..., None], X_val[..., None], X_test[..., None]
N = X_train.shape[-2]      # Number of nodes in the graphs
F = X_train.shape[-1]      # Node features dimensionality
n_out = y_train.shape[-1]  # Dimension of the target

fltr = normalized_laplacian(adj)

# Model definition
X_in = Input(shape=(N, F))
# Pass A as a fixed tensor, otherwise Keras will complain about inputs of
# different rank.
A_in = Input(tensor=sp_matrix_to_sp_tensor(fltr))

graph_conv = GraphConv(32,
                       activation='elu',
                       kernel_regularizer=l2(l2_reg),
                       use_bias=True)([X_in, A_in])
graph_conv = GraphConv(32,
                       activation='elu',
                       kernel_regularizer=l2(l2_reg),
                       use_bias=True)([graph_conv, A_in])
示例#6
0
 def preprocess(A):
     L = normalized_laplacian(A)
     L = rescale_laplacian(L)
     return L
示例#7
0
 def preprocess(A):
     fltr = normalized_laplacian(A, symmetric=True)
     fltr = rescale_laplacian(fltr, lmax=2)
     return fltr
示例#8
0
# Parameters
ARMA_T = 1              # Depth of each ARMA_1 filter
ARMA_K = 2              # Number of parallel ARMA_1 filters
recurrent = True        # Share weights like a recurrent net in each head
N = X.shape[0]          # Number of nodes in the graph
F = X.shape[1]          # Original feature dimensionality
n_classes = y.shape[1]  # Number of classes
dropout_rate = 0.75     # Dropout rate applied to the input of GCN layers
l2_reg = 5e-4           # Regularization rate for l2
learning_rate = 1e-2    # Learning rate for SGD
epochs = 20000          # Number of training epochs
es_patience = 200       # Patience for early stopping

# Preprocessing operations
fltr = normalized_laplacian(A, symmetric=True)
fltr = rescale_laplacian(fltr, lmax=2)

# Model definition
X_in = Input(shape=(F, ))
fltr_in = Input((N, ), sparse=True)

dropout_1 = Dropout(dropout_rate)(X_in)
graph_conv_1 = ARMAConv(16,
                        T=ARMA_T,
                        K=ARMA_K,
                        recurrent=recurrent,
                        dropout_rate=dropout_rate,
                        activation='elu',
                        gcn_activation='elu',
                        kernel_regularizer=l2(l2_reg))([dropout_1, fltr_in])