def _make_network(cls, input_layer, waveform_length, filters_size, n_neigh, padding): K1, K2 = filters_size W1 = weight_variable([waveform_length, 1, 1, K1]) b1 = bias_variable([K1]) W11 = weight_variable([1, 1, K1, K2]) b11 = bias_variable([K2]) W2 = weight_variable([1, n_neigh, K2, 1]) b2 = bias_variable([1]) vars_dict = {"W1": W1, "W11": W11, "W2": W2, "b1": b1, "b11": b11, "b2": b2} # first temporal layer # FIXME: old training code was using conv2d_VALID, old graph building # for prediction was using conv2d, that's why I need to add the # padding parameter, otherwise it breaks. we need to fix it layer1 = tf.nn.relu(conv2d(input_layer, W1, padding) + b1) # second temporal layer layer11 = tf.nn.relu(conv2d(layer1, W11) + b11) return vars_dict, layer11
def __init__(self, path_to_triage_model): """ Initializes the attributes for the class NeuralNetDetector. Parameters: ----------- config: configuration file """ self.path_to_triage_model = path_to_triage_model path_to_filters = change_extension(path_to_triage_model, 'yaml') self.filters_dict = load_yaml(path_to_filters) R1 = self.filters_dict['size'] K1, K2 = self.filters_dict['filters'] C = self.filters_dict['n_neighbors'] self.W1 = weight_variable([R1, 1, 1, K1]) self.b1 = bias_variable([K1]) self.W11 = weight_variable([1, 1, K1, K2]) self.b11 = bias_variable([K2]) self.W2 = weight_variable([1, C, K2, 1]) self.b2 = bias_variable([1]) self.saver = tf.train.Saver({ "W1": self.W1, "W11": self.W11, "W2": self.W2, "b1": self.b1, "b11": self.b11, "b2": self.b2 })
def __init__(self, path_to_detector_model, path_to_ae_model): """ Initializes the attributes for the class NeuralNetDetector. Parameters: ----------- """ self.path_to_detector_model = path_to_detector_model self.path_to_ae_model = path_to_ae_model path_to_filters = change_extension(path_to_detector_model, 'yaml') self.filters_dict = load_yaml(path_to_filters) R1 = self.filters_dict['size'] K1, K2 = self.filters_dict['filters'] C = self.filters_dict['n_neighbors'] self.W1 = weight_variable([R1, 1, 1, K1]) self.b1 = bias_variable([K1]) self.W11 = weight_variable([1, 1, K1, K2]) self.b11 = bias_variable([K2]) self.W2 = weight_variable([1, C, K2, 1]) self.b2 = bias_variable([1]) # output of ae encoding (1st layer) path_to_filters_ae = change_extension(path_to_ae_model, 'yaml') ae_dict = load_yaml(path_to_filters_ae) n_input = ae_dict['n_input'] n_features = ae_dict['n_features'] self.W_ae = tf.Variable( tf.random_uniform((n_input, n_features), -1.0 / np.sqrt(n_input), 1.0 / np.sqrt(n_input))) self.saver_ae = tf.train.Saver({"W_ae": self.W_ae}) self.saver = tf.train.Saver({ "W1": self.W1, "W11": self.W11, "W2": self.W2, "b1": self.b1, "b11": self.b11, "b2": self.b2 })
def __init__(self, path_to_detector_model): """ Initializes the attributes for the class NeuralNetDetector. Parameters: ----------- path_to_detector_model: str location of trained neural net detectior """ # add locations as attributes self.path_to_detector_model = path_to_detector_model # load nn parameter files path_to_filters = change_extension(path_to_detector_model, 'yaml') self.filters_dict = load_yaml(path_to_filters) # initialize neural net weights and add as attributes R1 = self.filters_dict['size'] K1, K2 = self.filters_dict['filters'] C = self.filters_dict['n_neighbors'] self.W1 = weight_variable([R1, 1, 1, K1]) self.b1 = bias_variable([K1]) self.W11 = weight_variable([1, 1, K1, K2]) self.b11 = bias_variable([K2]) self.W2 = weight_variable([1, C, K2, 1]) self.b2 = bias_variable([1]) # create saver variables self.saver = tf.train.Saver({ "W1": self.W1, "W11": self.W11, "W2": self.W2, "b1": self.b1, "b11": self.b11, "b2": self.b2 })
def __init__(self, path_to_triage_model): """ Initializes the attributes for the class NeuralNetTriage. Parameters: ----------- path_to_detector_model: str location of trained neural net triage """ # save path to the model as an attribute self.path_to_triage_model = path_to_triage_model # load necessary parameters path_to_filters = change_extension(path_to_triage_model, 'yaml') self.filters_dict = load_yaml(path_to_filters) R1 = self.filters_dict['size'] K1, K2 = self.filters_dict['filters'] C = self.filters_dict['n_neighbors'] # initialize and save nn weights self.W1 = weight_variable([R1, 1, 1, K1]) self.b1 = bias_variable([K1]) self.W11 = weight_variable([1, 1, K1, K2]) self.b11 = bias_variable([K2]) self.W2 = weight_variable([1, C, K2, 1]) self.b2 = bias_variable([1]) # initialize savers self.saver = tf.train.Saver({ "W1": self.W1, "W11": self.W11, "W2": self.W2, "b1": self.b1, "b11": self.b11, "b2": self.b2 })
def _make_network(cls, input_tensor, filters_size, waveform_length, n_neighbors): """Mates tensorflow network, from first layer to output layer """ K1, K2 = filters_size # initialize and save nn weights W1 = weight_variable([waveform_length, 1, 1, K1]) b1 = bias_variable([K1]) W11 = weight_variable([1, 1, K1, K2]) b11 = bias_variable([K2]) W2 = weight_variable([1, n_neighbors, K2, 1]) b2 = bias_variable([1]) # first layer: temporal feature layer1 = tf.nn.relu( conv2d_VALID(tf.expand_dims(input_tensor, -1), W1) + b1) # second layer: feataure mapping layer11 = tf.nn.relu(conv2d(layer1, W11) + b11) # third layer: spatial convolution o_layer = conv2d_VALID(layer11, W2) + b2 vars_dict = { "W1": W1, "W11": W11, "W2": W2, "b1": b1, "b11": b11, "b2": b2 } return o_layer, vars_dict
def __init__(self, path_to_model, filters_size, waveform_length, n_neighbors, threshold, channel_index, n_iter=50000, n_batch=512, l2_reg_scale=0.00000005, train_step_size=0.001, load_test_set=False): """ Initializes the attributes for the class NeuralNetDetector. Parameters: ----------- path_to_model: str location of trained neural net detectior """ self.logger = logging.getLogger(__name__) self.path_to_model = path_to_model self.model_name = Path(path_to_model).name.replace('.ckpt', '') self.filters_size = filters_size self.n_neighbors = n_neighbors self.waveform_length = waveform_length self.threshold = threshold self.n_batch = n_batch self.l2_reg_scale = l2_reg_scale self.train_step_size = train_step_size self.n_iter = n_iter # variables K1, K2 = filters_size W1 = weight_variable([waveform_length, 1, 1, K1]) b1 = bias_variable([K1]) W11 = weight_variable([1, 1, K1, K2]) b11 = bias_variable([K2]) W2 = weight_variable([1, self.n_neighbors, K2, 1]) b2 = bias_variable([1]) self.vars_dict = { "W1": W1, "W11": W11, "W2": W2, "b1": b1, "b11": b11, "b2": b2 } # graphs (self.x_tf, self.spike_index_tf, self.probability_tf, self.waveform_tf) = (NeuralNetDetector._make_recordings_graph( threshold, channel_index, waveform_length, filters_size, n_neighbors, self.vars_dict)) (self.x_tf_tr, self.y_tf_tr, self.o_layer_tr, self.sigmoid_tr) = (NeuralNetDetector._make_training_graph( self.waveform_length, self.n_neighbors, self.vars_dict)) # create saver variables self.saver = tf.train.Saver(self.vars_dict) if load_test_set: self._load_test_set()
def train_triage(x_train, y_train, n_filters, n_iter, n_batch, l2_reg_scale, train_step_size, nn_name): """ Trains the triage network Parameters: ----------- x_train: np.array [number of data, temporal length, number of channels] training data for the triage network. y_train: np.array [number of data] training label for the triage network. nn_name: string name of the .ckpt to be saved. """ # get parameters ndata, R, C = x_train.shape K1, K2 = n_filters # x and y input tensors x_tf = tf.placeholder("float", [n_batch, R, C]) y_tf = tf.placeholder("float", [n_batch]) # first layer: temporal feature W1 = weight_variable([R, 1, 1, K1]) b1 = bias_variable([K1]) layer1 = tf.nn.relu(conv2d_VALID(tf.expand_dims(x_tf, -1), W1) + b1) # second layer: feataure mapping W11 = weight_variable([1, 1, K1, K2]) b11 = bias_variable([K2]) layer11 = tf.nn.relu(conv2d(layer1, W11) + b11) # third layer: spatial convolution W2 = weight_variable([1, C, K2, 1]) b2 = bias_variable([1]) o_layer = tf.squeeze(conv2d_VALID(layer11, W2) + b2) # cross entropy cross_entropy = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=o_layer, labels=y_tf)) # regularization term weights = tf.trainable_variables() l2_regularizer = tf.contrib.layers.l2_regularizer(scale=l2_reg_scale) regularization_penalty = tf.contrib.layers.apply_regularization( l2_regularizer, weights) regularized_loss = cross_entropy + regularization_penalty # train step train_step = tf.train.AdamOptimizer(train_step_size).minimize( regularized_loss) # saver saver = tf.train.Saver({ "W1": W1, "W11": W11, "W2": W2, "b1": b1, "b11": b11, "b2": b2 }) ############ # training # ############ bar = progressbar.ProgressBar(maxval=n_iter) with tf.Session() as sess: init_op = tf.global_variables_initializer() sess.run(init_op) for i in range(0, n_iter): idx_batch = np.random.choice(ndata, n_batch, replace=False) sess.run(train_step, feed_dict={ x_tf: x_train[idx_batch], y_tf: y_train[idx_batch] }) bar.update(i + 1) saver.save(sess, nn_name) idx_batch = np.random.choice(ndata, n_batch, replace=False) output = sess.run(o_layer, feed_dict={x_tf: x_train[idx_batch]}) y_test = y_train[idx_batch] tp = np.mean(output[y_test == 1] > 0) fp = np.mean(output[y_test == 0] > 0) print('Approximate training true positive rate: ' + str(tp) + ', false positive rate: ' + str(fp)) bar.finish()