示例#1
0
 def test_softmax(self):
     """Test invoking SoftMax in eager mode."""
     with context.eager_mode():
         input = np.random.rand(5, 10).astype(np.float32)
         result = layers.SoftMax()(input)
         expected = tf.nn.softmax(input)
         assert np.allclose(result, expected)
示例#2
0
conv2d_1 = layers.Conv2D(num_outputs=32,
                         activation_fn=tf.nn.relu,
                         in_layers=make_image)
conv2d_2 = layers.Conv2D(num_outputs=64,
                         activation_fn=tf.nn.relu,
                         in_layers=conv2d_1)

flatten = layers.Flatten(in_layers=conv2d_2)
dense1 = layers.Dense(out_channels=1024,
                      activation_fn=tf.nn.relu,
                      in_layers=flatten)
dense2 = layers.Dense(out_channels=10, activation_fn=None, in_layers=dense1)

# Computes the loss for every sample
smce = layers.SoftMaxCrossEntropy(in_layers=[label, dense2])
# Average all the losses
loss = layers.ReduceMean(in_layers=smce)
model.set_loss(loss)

# Convert the output from logits to probs
output = layers.SoftMax(in_layers=dense2)
model.add_output(output)

model.fit(train_dataset, nb_epoch=1)  # nb_epoch=10

# Use as metric accuracy: the fraction of labels that are correctly predicted
metric = dc.metrics.Metric(dc.metrics.accuracy_score)

train_scores = model.evaluate(train_dataset, [metric])
test_scores = model.evaluate(test_dataset, [metric])
示例#3
0
    def __init__(self, n_generators=1, n_discriminators=1, **kwargs):
        """Construct a GAN.

    In addition to the parameters listed below, this class accepts all the
    keyword arguments from TensorGraph.

    Parameters
    ----------
    n_generators: int
      the number of generators to include
    n_discriminators: int
      the number of discriminators to include
    """
        super(GAN, self).__init__(use_queue=False, **kwargs)
        self.n_generators = n_generators
        self.n_discriminators = n_discriminators

        # Create the inputs.

        self.noise_input = layers.Feature(shape=self.get_noise_input_shape())
        self.data_inputs = []
        for shape in self.get_data_input_shapes():
            self.data_inputs.append(layers.Feature(shape=shape))
        self.conditional_inputs = []
        for shape in self.get_conditional_input_shapes():
            self.conditional_inputs.append(layers.Feature(shape=shape))

        # Create the generators.

        self.generators = []
        for i in range(n_generators):
            generator = self.create_generator(self.noise_input,
                                              self.conditional_inputs)
            if not isinstance(generator, Sequence):
                raise ValueError(
                    'create_generator() must return a list of Layers')
            if len(generator) != len(self.data_inputs):
                raise ValueError(
                    'The number of generator outputs must match the number of data inputs'
                )
            for g, d in zip(generator, self.data_inputs):
                if g.shape != d.shape:
                    raise ValueError(
                        'The shapes of the generator outputs must match the shapes of the data inputs'
                    )
            for g in generator:
                self.add_output(g)
            self.generators.append(generator)

        # Create the discriminators.

        self.discrim_train = []
        self.discrim_gen = []
        for i in range(n_discriminators):
            discrim_train = self.create_discriminator(self.data_inputs,
                                                      self.conditional_inputs)
            self.discrim_train.append(discrim_train)

            # Make a copy of the discriminator that takes each generator's output as
            # its input.

            for generator in self.generators:
                replacements = {}
                for g, d in zip(generator, self.data_inputs):
                    replacements[d] = g
                for c in self.conditional_inputs:
                    replacements[c] = c
                discrim_gen = discrim_train.copy(replacements, shared=True)
                self.discrim_gen.append(discrim_gen)

        # Make a list of all layers in the generators and discriminators.

        def add_layers_to_set(layer, layers):
            if layer not in layers:
                layers.add(layer)
                for i in layer.in_layers:
                    add_layers_to_set(i, layers)

        gen_layers = set()
        for generator in self.generators:
            for layer in generator:
                add_layers_to_set(layer, gen_layers)
        discrim_layers = set()
        for discriminator in self.discrim_train:
            add_layers_to_set(discriminator, discrim_layers)
        discrim_layers -= gen_layers

        # Compute the loss functions.

        gen_losses = [self.create_generator_loss(d) for d in self.discrim_gen]
        discrim_losses = []
        for i in range(n_discriminators):
            for j in range(n_generators):
                discrim_losses.append(
                    self.create_discriminator_loss(
                        self.discrim_train[i],
                        self.discrim_gen[i * n_generators + j]))
        if n_generators == 1 and n_discriminators == 1:
            total_gen_loss = gen_losses[0]
            total_discrim_loss = discrim_losses[0]
        else:
            # Create learnable weights for the generators and discriminators.

            gen_alpha = layers.Variable(np.ones((1, n_generators)))
            gen_weights = layers.SoftMax(gen_alpha)
            discrim_alpha = layers.Variable(np.ones((1, n_discriminators)))
            discrim_weights = layers.SoftMax(discrim_alpha)

            # Compute the weighted errors

            weight_products = layers.Reshape(
                (n_generators * n_discriminators, ),
                in_layers=layers.Reshape(
                    (n_discriminators, 1), in_layers=discrim_weights) *
                layers.Reshape((1, n_generators), in_layers=gen_weights))
            total_gen_loss = layers.WeightedError(
                (layers.Stack(gen_losses, axis=0), weight_products))
            total_discrim_loss = layers.WeightedError(
                (layers.Stack(discrim_losses, axis=0), weight_products))
            gen_layers.add(gen_alpha)
            discrim_layers.add(gen_alpha)
            discrim_layers.add(discrim_alpha)

            # Add an entropy term to the loss.

            entropy = -(layers.ReduceSum(layers.Log(gen_weights)) /
                        n_generators + layers.ReduceSum(
                            layers.Log(discrim_weights)) / n_discriminators)
            total_discrim_loss += entropy

        # Create submodels for training the generators and discriminators.

        self.generator_submodel = self.create_submodel(layers=gen_layers,
                                                       loss=total_gen_loss)
        self.discriminator_submodel = self.create_submodel(
            layers=discrim_layers, loss=total_discrim_loss)
def create_model():
    """
    Create our own MNIST model from scratch
    :return:
    :rtype:
    """
    mnist = input_data.read_data_sets("MNIST_DATA/", one_hot=True)

    # the layers from deepchem are the building blocks of what we will use to make our deep learning architecture

    # now we wrap our dataset into a NumpyDataset

    train_dataset = dc.data.NumpyDataset(mnist.train.images,
                                         mnist.train.labels)
    test_dataset = dc.data.NumpyDataset(mnist.test.images, mnist.test.labels)

    # we will create a model that will take an input, add multiple layers, where each layer takes input from the
    # previous layers.

    model = dc.models.TensorGraph(model_dir='mnist')

    # 784 corresponds to an image of size 28 X 28
    # 10 corresponds to the fact that there are 10 possible digits (0-9)
    # the None indicates that we can accept any size input (e.g. an empty array or 500 items each with 784 features)
    # our data is also categorical so we must one hot encode, set single array element to 1 and the rest to 0
    feature = layers.Feature(shape=(None, 784))
    labels = layers.Label(shape=(None, 10))

    # in order to apply convolutional layers to our input, we convert flat vector of 785 to 28X28
    # in_layers means it takes our feature layer as an input
    make_image = layers.Reshape(shape=(None, 28, 28), in_layers=feature)

    # now that we have reshaped the input, we pass to convolution layers

    conv2d_1 = layers.Conv2D(num_outputs=32,
                             activation_fn=tf.nn.relu,
                             in_layers=make_image)

    conv2d_2 = layers.Conv2D(num_outputs=64,
                             activation_fn=tf.nn.relu,
                             in_layers=conv2d_1)

    # we want to end by applying fully connected (Dense) layers to the outputs of our convolutional layer
    # but first, we must flatten the layer from a 2d matrix to a 1d vector

    flatten = layers.Flatten(in_layers=conv2d_2)
    dense1 = layers.Dense(out_channels=1024,
                          activation_fn=tf.nn.relu,
                          in_layers=flatten)

    # note that this is final layer so out_channels of 10 represents the 10 outputs and no activation_fn
    dense2 = layers.Dense(out_channels=10,
                          activation_fn=None,
                          in_layers=dense1)

    # next we want to connect this output to a loss function, so we can train the output

    # compute the value of loss function for every sample then average of all samples to get final loss (ReduceMean)
    smce = layers.SoftMaxCrossEntropy(in_layers=[labels, dense2])
    loss = layers.ReduceMean(in_layers=smce)
    model.set_loss(loss)

    # for MNIST we want the probability that a given sample represents one of the 10 digits
    # we can achieve this using a softmax function to get the probabilities, then cross entropy to get the labels

    output = layers.SoftMax(in_layers=dense2)
    model.add_output(output)

    # if our model takes long to train, reduce nb_epoch to 1
    model.fit(train_dataset, nb_epoch=10)

    # our metric is accuracy, the fraction of labels that are accurately predicted
    metric = dc.metrics.Metric(dc.metrics.accuracy_score)

    train_scores = model.evaluate(train_dataset, [metric])
    test_scores = model.evaluate(test_dataset, [metric])

    print('train_scores', train_scores)
    print('test_scores', test_scores)