def __init__(self, filters=1, kernel_size=80, rank=1, strides=1, padding='valid', data_format='channels_last', dilation_rate=1, activation=None, use_bias=True, fsHz=1000., fc_initializer=initializers.RandomUniform(minval=10, maxval=400), n_order_initializer=initializers.constant(4.), amp_initializer=initializers.constant(10 ** 5), beta_initializer=initializers.RandomNormal(mean=30, stddev=6), bias_initializer='zeros', **kwargs): super(Conv1D_gammatone, self).__init__(**kwargs) self.rank = rank self.filters = filters self.kernel_size_ = kernel_size self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank, 'kernel_size') self.strides = conv_utils.normalize_tuple(strides, rank, 'strides') self.padding = conv_utils.normalize_padding(padding) self.data_format = conv_utils.normalize_data_format(data_format) self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, rank, 'dilation_rate') self.activation = activations.get(activation) self.use_bias = use_bias self.bias_initializer = initializers.get(bias_initializer) self.fc_initializer = initializers.get(fc_initializer) self.n_order_initializer = initializers.get(n_order_initializer) self.amp_initializer = initializers.get(amp_initializer) self.beta_initializer = initializers.get(beta_initializer) self.input_spec = InputSpec(ndim=self.rank + 2) self.fsHz = fsHz self.t = tf.range(start=0, limit=kernel_size / float(fsHz), delta=1 / float(fsHz), dtype=K.floatx()) self.t = tf.expand_dims(input=self.t, axis=-1)
def __init__(self, vocab_size, embed_dim, hidden_size=256, log_dir="./logs"): self.log_dir = log_dir init_embed = initializers.RandomUniform(minval=-0.1, maxval=0.1, seed=None) init_lstm_norm = 1 / np.sqrt(hidden_size) init_lstm = initializers.RandomUniform(minval=-init_lstm_norm, maxval=init_lstm_norm) self.model = Sequential() self.model.add( Embedding(vocab_size, embed_dim, input_length=None, name="embedding", embeddings_initializer=init_embed) ) self.model.add(Dropout(0.2)) self.model.add( Bidirectional( CuDNNLSTM( hidden_size, return_sequences=True, kernel_initializer=init_lstm, recurrent_initializer=init_lstm ) ) ) self.model.add(Dropout(0.3)) self.model.add( Bidirectional( CuDNNLSTM( hidden_size, return_sequences=True, kernel_initializer=init_lstm, recurrent_initializer=init_lstm ) ) ) self.model.add(Dropout(0.3)) self.model.add(Bidirectional(CuDNNLSTM(embed_dim // 2, return_sequences=True))) self.model.add(Dropout(0.3)) self.model.add( TimeDistributed( TiedEmbeddingsTransposed(tied_to=self.model.get_layer(name="embedding"), activation="softmax") ) ) self.optim = optimizers.SGD(lr=1.0, decay=1e-4, momentum=0.9, nesterov=True, clipnorm=0.5) self.model.compile(loss="sparse_categorical_crossentropy", optimizer=self.optim, metrics=["accuracy"]) print(self.model.summary())
tot_mae = [] tot_acc = [] for i in ["1", "2", "3", "4", "5"]: model = Sequential() X_train, y_train, X_test, y_test = read_data_100k_gen_dataset(i) model_scalar = StandardScaler() model_scalar.fit(X_train) X_train = model_scalar.transform(X_train) X_test = model_scalar.transform(X_test) initVar = initializers.RandomUniform() inputDimen = np.shape(X_train)[1] outputDimen = 5 model.add( Dense(units=1000, input_dim=inputDimen, trainable=False, kernel_initializer=initVar)) model.add(Activation('linear')) model.add(Dense(units=2000)) model.add(Activation('relu')) model.add(Dense(units=outputDimen))