def knockoff_stats(X, y, met, epoch, loss='mean_squared_error', verb=2): p = X.shape[1] // 2 # implement DeepPINK dp = Sequential() dp.add(PairwiseConnected(input_shape=(2 * p, ))) dp.add( Dense(p, activation=met, kernel_regularizer=keras.regularizers.l1(0.01))) dp.add( Dense(1, activation=None, kernel_regularizer=keras.regularizers.l1(0.01))) dp.compile(loss=loss, optimizer=keras.optimizers.Adam()) dp.fit(X, y, epochs=epoch, batch_size=32, verbose=verb) # calculate knockoff statistics W_j weights = dp.get_weights() # w = weights[1] @ weights[3] w = np.matmul(weights[1], weights[3]) w = w.reshape(p, ) z = weights[0][:p] z_tilde = weights[0][p:] W = (w * z)**2 - (w * z_tilde)**2 return W
def knockoff_stats(X, y, met, epoch, l1, lr, loss='mean_squared_error', verb=2): p = X.shape[1] // 2 # implement DeepPINK es = EarlyStopping(monitor='val_loss', patience=30, verbose=2) dp = Sequential() dp.add(PairwiseConnected(input_shape=(2 * p, ))) dp.add( Dense(p, activation=met, kernel_regularizer=keras.regularizers.l1(l1=l1))) dp.add(Dense(1, activation=None)) dp.compile(loss=loss, optimizer=keras.optimizers.Adam(learning_rate=lr)) dp.fit(X, y, epochs=epoch, batch_size=32, verbose=verb, validation_split=0.1, callbacks=[es]) # calculate knockoff statistics W_j weights = dp.get_weights() w = weights[1] @ weights[3] w = w.reshape(p, ) z = weights[0][:p] z_tilde = weights[0][p:] W = (w * z)**2 - (w * z_tilde)**2 return W
random.seed(58 * i) ######### load data ######### ind_train = np.random.choice(np.arange(n), n_train, False) ind_test = list(set(np.arange(n)) - set(ind_train)) Xnew_train = Xnew[ind_train, :] Xnew_test = Xnew[ind_test, :] y_train = y[ind_train] y_test = y[ind_test] indmat[:, i] = ind_train ############################# dp = Sequential() dp.add(PairwiseConnected(input_shape=(2 * d, ))) dp.add( Dense(d, activation=dnn_met, kernel_regularizer=keras.regularizers.l1_l2(l1=0.001, l2=0.001))) dp.add(Dropout(0.4)) # play with this number, such as 0.4, 0.6, 0.7 dp.add( Dense(1, activation='relu', kernel_regularizer=keras.regularizers.l1_l2(l1=0.001, l2=0.001))) dp.compile(loss=dnn_loss, optimizer=keras.optimizers.Adam()) dp.fit(Xnew_train, y_train, epochs=dnn_epoch, batch_size=bs, verbose=dnn_verb)