Example #1
0
 def test_reduce_mean(self):
     """Test invoking ReduceMean in eager mode."""
     with context.eager_mode():
         input = np.random.rand(5, 10).astype(np.float32)
         result = layers.ReduceMean(axis=1)(input)
         assert result.shape == (5, )
         assert np.allclose(result, np.mean(input, axis=1))
Example #2
0
 def _create_loss(self):
   """Create the loss function."""
   prob = layers.ReduceSum(self.output * self._labels, axis=2)
   mask = layers.ReduceSum(self._labels, axis=2)
   log_prob = layers.Log(prob + 1e-20) * mask
   loss = -layers.ReduceMean(
       layers.ReduceSum(log_prob, axis=1), name='cross_entropy_loss')
   if self._variational:
     mean_sq = self._embedding_mean * self._embedding_mean
     stddev_sq = self._embedding_stddev * self._embedding_stddev
     kl = mean_sq + stddev_sq - layers.Log(stddev_sq + 1e-20) - 1
     anneal_steps = self._annealing_final_step - self._annealing_start_step
     if anneal_steps > 0:
       current_step = tf.to_float(
           self.get_global_step()) - self._annealing_start_step
       anneal_frac = tf.maximum(0.0, current_step) / anneal_steps
       kl_scale = layers.TensorWrapper(
           tf.minimum(1.0, anneal_frac * anneal_frac), name='kl_scale')
     else:
       kl_scale = 1.0
     loss += 0.5 * kl_scale * layers.ReduceMean(kl)
   return loss
Example #3
0
  def create_generator_loss(self, discrim_output):
    """Create the loss function for the generator.

    The default implementation is appropriate for most cases.  Subclasses can
    override this if the need to customize it.

    Parameters
    ----------
    discrim_output: Layer
      the output from the discriminator on a batch of generated data.  This is
      its estimate of the probability that each sample is training data.

    Returns
    -------
    A Layer object that outputs the loss function to use for optimizing the
    generator.
    """
    return -layers.ReduceMean(layers.Log(discrim_output + 1e-10))
Example #4
0
    def test_tensorboard(self):
        """Test creating an Estimator from a TensorGraph that logs information to TensorBoard."""
        n_samples = 10
        n_features = 3
        n_tasks = 2

        # Create a dataset and an input function for processing it.

        np.random.seed(123)
        X = np.random.rand(n_samples, n_features)
        y = np.zeros((n_samples, n_tasks))
        dataset = dc.data.NumpyDataset(X, y)

        def input_fn(epochs):
            x, y, weights = dataset.make_iterator(batch_size=n_samples,
                                                  epochs=epochs).get_next()
            return {'x': x, 'weights': weights}, y

        # Create a TensorGraph model.

        model = dc.models.TensorGraph()
        features = layers.Feature(shape=(None, n_features))
        dense = layers.Dense(out_channels=n_tasks, in_layers=features)
        dense.set_summary('histogram')
        model.add_output(dense)
        labels = layers.Label(shape=(None, n_tasks))
        loss = layers.ReduceMean(layers.L2Loss(in_layers=[labels, dense]))
        model.set_loss(loss)

        # Create an estimator from it.

        x_col = tf.feature_column.numeric_column('x', shape=(n_features, ))
        estimator = model.make_estimator(feature_columns=[x_col])

        # Train the model.

        estimator.train(input_fn=lambda: input_fn(100))
Example #5
0
model = dc.models.TensorGraph(model_dir='rnai')
features = layers.Feature(shape=(None, 21, 4))
labels = layers.Label(shape=(None, 1))
prev = features
for i in range(2):
    prev = layers.Conv1D(filters=10,
                         kernel_size=10,
                         activation=tf.nn.relu,
                         padding='same',
                         in_layers=prev)
    prev = layers.Dropout(dropout_prob=0.3, in_layers=prev)
output = layers.Dense(out_channels=1,
                      activation_fn=tf.sigmoid,
                      in_layers=layers.Flatten(prev))
model.add_output(output)
loss = layers.ReduceMean(layers.L2Loss(in_layers=[labels, output]))
model.set_loss(loss)

# Load the data.

train = dc.data.DiskDataset('train_siRNA')
valid = dc.data.DiskDataset('valid_siRNA')

# Train the model, tracking its performance on the training and validation datasets.

metric = dc.metrics.Metric(dc.metrics.pearsonr, mode='regression')
for i in range(20):
    model.fit(train, nb_epoch=10)
    print(model.evaluate(train, [metric])['pearsonr'][0])
    print(model.evaluate(valid, [metric])['pearsonr'][0])
Example #6
0
 def create_discriminator_loss(self, discrim_output_train, discrim_output_gen):
   gradient_penalty = GradientPenaltyLayer(discrim_output_train, self)
   return gradient_penalty + layers.ReduceMean(discrim_output_train -
                                               discrim_output_gen)
Example #7
0
 def create_generator_loss(self, discrim_output):
   return layers.ReduceMean(discrim_output)
conv2d_1 = layers.Conv2D(num_outputs=32,
                         activation_fn=tf.nn.relu,
                         in_layers=make_image)
conv2d_2 = layers.Conv2D(num_outputs=64,
                         activation_fn=tf.nn.relu,
                         in_layers=conv2d_1)

flatten = layers.Flatten(in_layers=conv2d_2)
dense1 = layers.Dense(out_channels=1024,
                      activation_fn=tf.nn.relu,
                      in_layers=flatten)
dense2 = layers.Dense(out_channels=10, activation_fn=None, in_layers=dense1)

# Computes the loss for every sample
smce = layers.SoftMaxCrossEntropy(in_layers=[label, dense2])
# Average all the losses
loss = layers.ReduceMean(in_layers=smce)
model.set_loss(loss)

# Convert the output from logits to probs
output = layers.SoftMax(in_layers=dense2)
model.add_output(output)

model.fit(train_dataset, nb_epoch=1)  # nb_epoch=10

# Use as metric accuracy: the fraction of labels that are correctly predicted
metric = dc.metrics.Metric(dc.metrics.accuracy_score)

train_scores = model.evaluate(train_dataset, [metric])
test_scores = model.evaluate(test_dataset, [metric])
def create_model():
    """
    Create our own MNIST model from scratch
    :return:
    :rtype:
    """
    mnist = input_data.read_data_sets("MNIST_DATA/", one_hot=True)

    # the layers from deepchem are the building blocks of what we will use to make our deep learning architecture

    # now we wrap our dataset into a NumpyDataset

    train_dataset = dc.data.NumpyDataset(mnist.train.images,
                                         mnist.train.labels)
    test_dataset = dc.data.NumpyDataset(mnist.test.images, mnist.test.labels)

    # we will create a model that will take an input, add multiple layers, where each layer takes input from the
    # previous layers.

    model = dc.models.TensorGraph(model_dir='mnist')

    # 784 corresponds to an image of size 28 X 28
    # 10 corresponds to the fact that there are 10 possible digits (0-9)
    # the None indicates that we can accept any size input (e.g. an empty array or 500 items each with 784 features)
    # our data is also categorical so we must one hot encode, set single array element to 1 and the rest to 0
    feature = layers.Feature(shape=(None, 784))
    labels = layers.Label(shape=(None, 10))

    # in order to apply convolutional layers to our input, we convert flat vector of 785 to 28X28
    # in_layers means it takes our feature layer as an input
    make_image = layers.Reshape(shape=(None, 28, 28), in_layers=feature)

    # now that we have reshaped the input, we pass to convolution layers

    conv2d_1 = layers.Conv2D(num_outputs=32,
                             activation_fn=tf.nn.relu,
                             in_layers=make_image)

    conv2d_2 = layers.Conv2D(num_outputs=64,
                             activation_fn=tf.nn.relu,
                             in_layers=conv2d_1)

    # we want to end by applying fully connected (Dense) layers to the outputs of our convolutional layer
    # but first, we must flatten the layer from a 2d matrix to a 1d vector

    flatten = layers.Flatten(in_layers=conv2d_2)
    dense1 = layers.Dense(out_channels=1024,
                          activation_fn=tf.nn.relu,
                          in_layers=flatten)

    # note that this is final layer so out_channels of 10 represents the 10 outputs and no activation_fn
    dense2 = layers.Dense(out_channels=10,
                          activation_fn=None,
                          in_layers=dense1)

    # next we want to connect this output to a loss function, so we can train the output

    # compute the value of loss function for every sample then average of all samples to get final loss (ReduceMean)
    smce = layers.SoftMaxCrossEntropy(in_layers=[labels, dense2])
    loss = layers.ReduceMean(in_layers=smce)
    model.set_loss(loss)

    # for MNIST we want the probability that a given sample represents one of the 10 digits
    # we can achieve this using a softmax function to get the probabilities, then cross entropy to get the labels

    output = layers.SoftMax(in_layers=dense2)
    model.add_output(output)

    # if our model takes long to train, reduce nb_epoch to 1
    model.fit(train_dataset, nb_epoch=10)

    # our metric is accuracy, the fraction of labels that are accurately predicted
    metric = dc.metrics.Metric(dc.metrics.accuracy_score)

    train_scores = model.evaluate(train_dataset, [metric])
    test_scores = model.evaluate(test_dataset, [metric])

    print('train_scores', train_scores)
    print('test_scores', test_scores)