Пример #1
0
    def test_conv_1d(self):
        """Test invoking Conv1D in eager mode."""
        with context.eager_mode():
            with tfe.IsolateTest():
                width = 5
                in_channels = 2
                filters = 3
                kernel_size = 2
                batch_size = 10
                input = np.random.rand(batch_size, width,
                                       in_channels).astype(np.float32)
                layer = layers.Conv1D(filters, kernel_size)
                result = layer(input)
                self.assertEqual(result.shape[0], batch_size)
                self.assertEqual(result.shape[2], filters)
                assert len(layer.variables) == 2

                # Creating a second layer should produce different results, since it has
                # different random weights.

                layer2 = layers.Conv1D(filters, kernel_size)
                result2 = layer2(input)
                assert not np.allclose(result, result2)

                # But evaluating the first layer again should produce the same result as before.

                result3 = layer(input)
                assert np.allclose(result, result3)
Пример #2
0
 def _create_encoder(self, n_layers, dropout):
   """Create the encoder layers."""
   prev_layer = self._features
   for i in range(len(self._filter_sizes)):
     filter_size = self._filter_sizes[i]
     kernel_size = self._kernel_sizes[i]
     if dropout > 0.0:
       prev_layer = layers.Dropout(dropout, in_layers=prev_layer)
     prev_layer = layers.Conv1D(
         filters=filter_size,
         kernel_size=kernel_size,
         in_layers=prev_layer,
         activation_fn=tf.nn.relu)
   prev_layer = layers.Flatten(prev_layer)
   prev_layer = layers.Dense(
       self._decoder_dimension, in_layers=prev_layer, activation_fn=tf.nn.relu)
   prev_layer = layers.BatchNorm(prev_layer)
   if self._variational:
     self._embedding_mean = layers.Dense(
         self._embedding_dimension,
         in_layers=prev_layer,
         name='embedding_mean')
     self._embedding_stddev = layers.Dense(
         self._embedding_dimension, in_layers=prev_layer, name='embedding_std')
     prev_layer = layers.CombineMeanStd(
         [self._embedding_mean, self._embedding_stddev], training_only=True)
   return prev_layer
Пример #3
0
import deepchem as dc
import deepchem.models.tensorgraph.layers as layers
import tensorflow as tf
import matplotlib.pyplot as plot

# Build the model.

model = dc.models.TensorGraph(model_dir='rnai')
features = layers.Feature(shape=(None, 21, 4))
labels = layers.Label(shape=(None, 1))
prev = features
for i in range(2):
    prev = layers.Conv1D(filters=10,
                         kernel_size=10,
                         activation=tf.nn.relu,
                         padding='same',
                         in_layers=prev)
    prev = layers.Dropout(dropout_prob=0.3, in_layers=prev)
output = layers.Dense(out_channels=1,
                      activation_fn=tf.sigmoid,
                      in_layers=layers.Flatten(prev))
model.add_output(output)
loss = layers.ReduceMean(layers.L2Loss(in_layers=[labels, output]))
model.set_loss(loss)

# Load the data.

train = dc.data.DiskDataset('train_siRNA')
valid = dc.data.DiskDataset('valid_siRNA')