def create_discriminator(self, data_inputs, conditional_inputs): discrim_in = layers.Concat(data_inputs + conditional_inputs) dense = layers.Dense(10, in_layers=discrim_in, activation_fn=tf.nn.relu) return layers.Dense(1, in_layers=dense, activation_fn=tf.sigmoid)
def test_concat(self): """Test invoking Concat in eager mode.""" with context.eager_mode(): input1 = np.random.rand(5, 10).astype(np.float32) input2 = np.random.rand(5, 4).astype(np.float32) result = layers.Concat()(input1, input2) assert result.shape == (5, 14) assert np.array_equal(input1, result[:, :10]) assert np.array_equal(input2, result[:, 10:])
def create_generator(self, noise_input, conditional_inputs): gen_in = layers.Concat([noise_input] + conditional_inputs) return [layers.Dense(1, in_layers=gen_in)]
dataset, seed=123) # Create the model. learning_rate = dc.models.optimizers.ExponentialDecay(0.01, 0.9, 250) model = dc.models.TensorGraph(learning_rate=learning_rate, model_dir='models/segmentation') features = layers.Feature(shape=(None, 520, 696, 1)) / 255.0 labels = layers.Label(shape=(None, 520, 696, 1)) / 255.0 # Downsample three times. conv1 = layers.Conv2D(16, kernel_size=5, stride=2, in_layers=features) conv2 = layers.Conv2D(32, kernel_size=5, stride=2, in_layers=conv1) conv3 = layers.Conv2D(64, kernel_size=5, stride=2, in_layers=conv2) # Do a 1x1 convolution. conv4 = layers.Conv2D(64, kernel_size=1, stride=1, in_layers=conv3) # Upsample three times. concat1 = layers.Concat(in_layers=[conv3, conv4], axis=3) deconv1 = layers.Conv2DTranspose(32, kernel_size=5, stride=2, in_layers=concat1) concat2 = layers.Concat(in_layers=[conv2, deconv1], axis=3) deconv2 = layers.Conv2DTranspose(16, kernel_size=5, stride=2, in_layers=concat2) concat3 = layers.Concat(in_layers=[conv1, deconv2], axis=3) deconv3 = layers.Conv2DTranspose(1, kernel_size=5, stride=2, in_layers=concat3) # Compute the final output. concat4 = layers.Concat(in_layers=[features, deconv3], axis=3) logits = layers.Conv2D(1, kernel_size=5,
import deepchem.models.tensorgraph.layers as layers import tensorflow as tf import numpy as np # Build the model. model = dc.models.TensorGraph(batch_size=1000, model_dir='chromatin') features = layers.Feature(shape=(None, 101, 4)) accessibility = layers.Feature(shape=(None, 1)) labels = layers.Label(shape=(None, 1)) weights = layers.Weights(shape=(None, 1)) prev = features for i in range(3): prev = layers.Conv1D(filters=15, kernel_size=10, activation=tf.nn.relu, padding='same', in_layers=prev) prev = layers.Dropout(dropout_prob=0.5, in_layers=prev) prev = layers.Concat([layers.Flatten(prev), accessibility]) logits = layers.Dense(out_channels=1, in_layers=prev) output = layers.Sigmoid(logits) model.add_output(output) loss = layers.SigmoidCrossEntropy(in_layers=[labels, logits]) weighted_loss = layers.WeightedError(in_layers=[loss, weights]) model.set_loss(weighted_loss) # Load the data. train = dc.data.DiskDataset('train_dataset') valid = dc.data.DiskDataset('valid_dataset') span_accessibility = {} for line in open('accessibility.txt'): fields = line.split() span_accessibility[fields[0]] = float(fields[1])