Example #1
0
 def _create_encoder(self, n_layers, dropout):
   """Create the encoder layers."""
   prev_layer = self._features
   for i in range(len(self._filter_sizes)):
     filter_size = self._filter_sizes[i]
     kernel_size = self._kernel_sizes[i]
     if dropout > 0.0:
       prev_layer = layers.Dropout(dropout, in_layers=prev_layer)
     prev_layer = layers.Conv1D(
         filters=filter_size,
         kernel_size=kernel_size,
         in_layers=prev_layer,
         activation_fn=tf.nn.relu)
   prev_layer = layers.Flatten(prev_layer)
   prev_layer = layers.Dense(
       self._decoder_dimension, in_layers=prev_layer, activation_fn=tf.nn.relu)
   prev_layer = layers.BatchNorm(prev_layer)
   if self._variational:
     self._embedding_mean = layers.Dense(
         self._embedding_dimension,
         in_layers=prev_layer,
         name='embedding_mean')
     self._embedding_stddev = layers.Dense(
         self._embedding_dimension, in_layers=prev_layer, name='embedding_std')
     prev_layer = layers.CombineMeanStd(
         [self._embedding_mean, self._embedding_stddev], training_only=True)
   return prev_layer
Example #2
0
 def test_dropout(self):
     """Test invoking Dropout in eager mode."""
     with context.eager_mode():
         rate = 0.5
         input = np.random.rand(5, 10).astype(np.float32)
         layer = layers.Dropout(rate)
         result1 = layer(input, training=False)
         assert np.allclose(result1, input)
         result2 = layer(input, training=True)
         assert not np.allclose(result2, input)
         nonzero = result2.numpy() != 0
         assert np.allclose(result2.numpy()[nonzero], input[nonzero] / rate)
Example #3
0
 def _create_decoder(self, n_layers, dropout):
   """Create the decoder layers."""
   prev_layer = layers.Repeat(
       self._max_output_length, in_layers=self.embedding)
   for i in range(n_layers):
     if dropout > 0.0:
       prev_layer = layers.Dropout(dropout, in_layers=prev_layer)
     prev_layer = layers.GRU(
         self._embedding_dimension, self.batch_size, in_layers=prev_layer)
   return layers.Dense(
       len(self._output_tokens),
       in_layers=prev_layer,
       activation_fn=tf.nn.softmax)
Example #4
0
 def _create_encoder(self, n_layers, dropout):
   """Create the encoder layers."""
   prev_layer = self._features
   for i in range(n_layers):
     if dropout > 0.0:
       prev_layer = layers.Dropout(dropout, in_layers=prev_layer)
     prev_layer = layers.GRU(
         self._embedding_dimension, self.batch_size, in_layers=prev_layer)
   prev_layer = layers.Gather(in_layers=[prev_layer, self._gather_indices])
   if self._variational:
     self._embedding_mean = layers.Dense(
         self._embedding_dimension, in_layers=prev_layer)
     self._embedding_stddev = layers.Dense(
         self._embedding_dimension, in_layers=prev_layer)
     prev_layer = layers.CombineMeanStd(
         [self._embedding_mean, self._embedding_stddev], training_only=True)
   return prev_layer
Example #5
0
 def __init__(self,
              seq_length,
              use_RNN=False,
              num_tasks=1,
              num_filters=15,
              kernel_size=15,
              pool_width=35,
              L1=0,
              dropout=0.0,
              verbose=True,
              **kwargs):
     super(SequenceDNN, self).__init__(**kwargs)
     self.num_tasks = num_tasks
     self.verbose = verbose
     self.add(layers.Conv2D(num_filters, kernel_size=kernel_size))
     self.add(layers.Dropout(dropout))
     self.add(layers.Flatten())
     self.add(layers.Dense(self.num_tasks, activation_fn=tf.nn.relu))
Example #6
0
import tensorflow as tf
import matplotlib.pyplot as plot

# Build the model.

model = dc.models.TensorGraph(model_dir='rnai')
features = layers.Feature(shape=(None, 21, 4))
labels = layers.Label(shape=(None, 1))
prev = features
for i in range(2):
    prev = layers.Conv1D(filters=10,
                         kernel_size=10,
                         activation=tf.nn.relu,
                         padding='same',
                         in_layers=prev)
    prev = layers.Dropout(dropout_prob=0.3, in_layers=prev)
output = layers.Dense(out_channels=1,
                      activation_fn=tf.sigmoid,
                      in_layers=layers.Flatten(prev))
model.add_output(output)
loss = layers.ReduceMean(layers.L2Loss(in_layers=[labels, output]))
model.set_loss(loss)

# Load the data.

train = dc.data.DiskDataset('train_siRNA')
valid = dc.data.DiskDataset('valid_siRNA')

# Train the model, tracking its performance on the training and validation datasets.

metric = dc.metrics.Metric(dc.metrics.pearsonr, mode='regression')