def rnn_forward(): import numpy as np from tensorflow.keras import layers, Model from nn.layers import LSTMCell from utils.tools import rel_error import numpy import tensorflow.keras.backend as K N, D, H = 3, 10, 4 x = np.random.uniform(size=(N, D)) prev_h = np.random.uniform(size=(N, H)) prev_c = np.random.uniform(size=(N, H)) lstm_cell = LSTMCell(in_features=D, units=H) out, cell = lstm_cell.forward([x, prev_c, prev_h]) # compare with the keras implementation keras_x = layers.Input(batch_shape=(N, None, D), name='x') hidden_states = K.variable(value=prev_h) cell_states = K.variable(value=prev_c) keras_rnn = layers.LSTM(units=H, use_bias=False, recurrent_activation='sigmoid', stateful=True)(keras_x) keras_model = Model(inputs=keras_x, outputs=keras_rnn) keras_model.layers[1].states[0] = hidden_states keras_model.layers[1].states[1] = cell_states keras_model.layers[1].set_weights( [lstm_cell.kernel, lstm_cell.recurrent_kernel]) keras_out = keras_model.predict_on_batch([x[:, None, :]]) print(keras_model.layers[1].get_weights) # print([a-lstm_cell.kernel,b-lstm_cell.recurrent_kernel]) print('Relative error (<1e-5 will be fine): {}'.format( rel_error(keras_out, out)))
def pool_layer(): import numpy as np import warnings warnings.filterwarnings('ignore') from nn.layers import Pool2D from utils.tools import rel_error from tensorflow.keras import Sequential from tensorflow.keras.layers import MaxPooling2D input = np.random.uniform(size=(10, 3, 30, 30)) keras_input = input.transpose(0, 2, 3, 1) params = { 'pool_type': 'max', 'pool_height': 4, 'pool_width': 4, 'pad': 2, 'stride': 2, } pool = Pool2D(params) out = pool.forward(input) keras_pool = Sequential([ MaxPooling2D(pool_size=(params['pool_height'], params['pool_width']), strides=params['stride'], padding='same', data_format='channels_last', input_shape=keras_input.shape[1:]) ]) keras_out = keras_pool.predict(keras_input, batch_size=input.shape[0]) print('Relative error (<1e-6 will be fine): ', rel_error(out, keras_out.transpose(0, 3, 1, 2)))
print('Testing Fully Connected Layer...') inputs = np.random.uniform(size=(10, 3, 20)) fclayer = FCLayer(in_features=inputs.shape[-1], out_features=100) out = fclayer.forward(inputs) keras_model = keras.Sequential() keras_layer = layers.Dense(100, input_shape=inputs.shape[1:], use_bias=True, kernel_initializer='random_normal', bias_initializer='zeros') keras_model.add(keras_layer) sgd = optimizers.SGD(lr=0.01) keras_model.compile(loss='mean_squared_error', optimizer='sgd') keras_layer.set_weights([fclayer.weights, fclayer.bias]) keras_out = keras_model.predict(inputs, batch_size=inputs.shape[0]) print('Relative error (<1e-6 will be fine): ', rel_error(out, keras_out)) in_grads = np.random.uniform(size=(10, 3, 100)) check_grads_layer(fclayer, inputs, in_grads) print('Testing TemporalPooling Layer...') inputs = np.random.uniform(size=(10, 3, 20)) pooling_layer = TemporalPooling() out = pooling_layer.forward(inputs) keras_model = keras.Sequential() keras_layer = layers.GlobalAveragePooling1D(input_shape=inputs.shape[1:]) keras_model.add(keras_layer) sgd = optimizers.SGD(lr=0.01) keras_model.compile(loss='mean_squared_error', optimizer='sgd') keras_out = keras_model.predict(inputs, batch_size=inputs.shape[0]) print('Relative error (<1e-6 will be fine): ', rel_error(out, keras_out)) in_grads = np.random.uniform(size=(10, 20))
x = np.random.uniform(size=(N, T, D)) x[0, -1:, :] = np.nan x[1, -2:, :] = np.nan h0 = np.random.uniform(size=(N, H)) hr = np.random.uniform(size=(N, H)) rnn_cell = RNNCell(in_features=D, units=H) brnn = BidirectionalRNN(rnn_cell, h0=h0, hr=hr) out = brnn.forward(x) keras_x = layers.Input(shape=(T, D), name='x') keras_h0 = layers.Input(shape=(H, ), name='h0') keras_hr = layers.Input(shape=(H, ), name='hr') keras_x_masked = layers.Masking(mask_value=0.)(keras_x) keras_rnn = layers.RNN(layers.SimpleRNNCell(H), return_sequences=True) keras_brnn = layers.Bidirectional(keras_rnn, merge_mode='concat', name='brnn')( keras_x_masked, initial_state=[keras_h0, keras_hr]) keras_model = keras.Model(inputs=[keras_x, keras_h0, keras_hr], outputs=keras_brnn) keras_model.get_layer('brnn').set_weights([ brnn.forward_rnn.kernel, brnn.forward_rnn.recurrent_kernel, brnn.forward_rnn.bias, brnn.backward_rnn.kernel, brnn.backward_rnn.recurrent_kernel, brnn.backward_rnn.bias ]) keras_out = keras_model.predict_on_batch([np.nan_to_num(x), h0, hr]) nan_indices = np.where(np.any(np.isnan(x), axis=2)) keras_out[nan_indices[0], nan_indices[1], :] = np.nan print('Relative error (<1e-5 will be fine): {}'.format( rel_error(keras_out, out)))
keras_model = keras.Sequential() keras_layer = layers.Conv2D(filters=params['out_channel'], kernel_size=(params['kernel_h'], params['kernel_w']), strides=(params['stride'], params['stride']), padding='valid', data_format='channels_first', input_shape=inputs.shape[1:]) keras_model.add(keras_layer) sgd = optimizers.SGD(lr=0.01) keras_model.compile(loss='mean_squared_error', optimizer='sgd') weights = np.transpose(layer.weights, (2, 3, 1, 0)) keras_layer.set_weights([weights, layer.bias]) keras_out = keras_model.predict(inputs, batch_size=inputs.shape[0]) print('conv forward: Relative error (<1e-6 will be fine): ', rel_error(out, keras_out)) # inputs = np.random.uniform(size=(10, 3, 30, 30)) # params = { 'pool_type': 'max', # 'pool_height': 5, # 'pool_width': 5, # 'pad': 0, # 'stride': 2, # } # layer = Pooling(params) # out = layer.forward(inputs) # # keras_model = keras.Sequential() # keras_layer = layers.MaxPooling2D(pool_size=(params['pool_height'], params['pool_width']), # strides=params['stride'], # padding='valid',