Exemplo n.º 1
0
 def test_flatten(self):
   """Test that Flatten can be invoked."""
   in_dim_1 = 2
   in_dim_2 = 2
   out_dim = 4
   batch_size = 10
   in_tensor = np.random.rand(batch_size, in_dim_1, in_dim_2)
   with self.session() as sess:
     in_tensor = tf.convert_to_tensor(in_tensor, dtype=tf.float32)
     out_tensor = Flatten()(in_tensor)
     out_tensor = out_tensor.eval()
     assert out_tensor.shape == (batch_size, out_dim)
Exemplo n.º 2
0
  def test_neighbor_list_vina(self):
    """Test under conditions closer to Vina usage."""
    N_atoms = 5
    M_nbrs = 2
    ndim = 3
    start = 0
    stop = 4
    nbr_cutoff = 1

    X = NumpyDataset(start + np.random.rand(N_atoms, ndim) * (stop - start))

    coords = Feature(shape=(N_atoms, ndim))

    # Now an (N, M) shape
    nbr_list = NeighborList(
        N_atoms, M_nbrs, ndim, nbr_cutoff, start, stop, in_layers=[coords])

    nbr_list = ToFloat(in_layers=[nbr_list])
    flattened = Flatten(in_layers=[nbr_list])
    dense = Dense(out_channels=1, in_layers=[flattened])
    output = ReduceSum(in_layers=[dense])

    tg = dc.models.TensorGraph(learning_rate=0.1, use_queue=False)
    tg.set_loss(output)

    databag = Databag({coords: X})
    tg.fit_generator(databag.iterbatches(epochs=1))
Exemplo n.º 3
0
 def create_layers(self, state, **kwargs):
   d1 = Flatten(in_layers=state)
   d2 = Dense(
       in_layers=[d1],
       activation_fn=tf.nn.relu,
       normalizer_fn=tf.nn.l2_normalize,
       normalizer_params={"dim": 1},
       out_channels=64)
   d3 = Dense(
       in_layers=[d2],
       activation_fn=tf.nn.relu,
       normalizer_fn=tf.nn.l2_normalize,
       normalizer_params={"dim": 1},
       out_channels=32)
   d4 = Dense(
       in_layers=[d3],
       activation_fn=tf.nn.relu,
       normalizer_fn=tf.nn.l2_normalize,
       normalizer_params={"dim": 1},
       out_channels=16)
   d4 = BatchNorm(in_layers=[d4])
   d5 = Dense(in_layers=[d4], activation_fn=None, out_channels=9)
   value = Dense(in_layers=[d4], activation_fn=None, out_channels=1)
   value = Squeeze(squeeze_dims=1, in_layers=[value])
   probs = SoftMax(in_layers=[d5])
   return {'action_prob': probs, 'value': value}
Exemplo n.º 4
0
    def _build_graph(self):

        self.one_hot_seq = Feature(shape=(None, self.pad_length,
                                          self.num_amino_acids),
                                   dtype=tf.float32)

        conv1 = Conv1D(kernel_size=2,
                       filters=512,
                       in_layers=[self.one_hot_seq])

        maxpool1 = MaxPool1D(strides=2, padding="VALID", in_layers=[conv1])
        conv2 = Conv1D(kernel_size=3, filters=512, in_layers=[maxpool1])
        flattened = Flatten(in_layers=[conv2])
        dense1 = Dense(out_channels=400,
                       in_layers=[flattened],
                       activation_fn=tf.nn.tanh)
        dropout = Dropout(dropout_prob=self.dropout_p, in_layers=[dense1])
        output = Dense(out_channels=1, in_layers=[dropout], activation_fn=None)
        self.add_output(output)

        if self.mode == "regression":
            label = Label(shape=(None, 1))
            loss = L2Loss(in_layers=[label, output])
        else:
            raise NotImplementedError(
                "Classification support not added yet. Missing details in paper."
            )
        weights = Weights(shape=(None, ))
        weighted_loss = WeightedError(in_layers=[loss, weights])
        self.set_loss(weighted_loss)
Exemplo n.º 5
0
    def test_graph_save(self):
        n_samples = 10
        n_features = 11
        n_tasks = 1
        batch_size = 10
        X = np.random.rand(batch_size, n_samples, n_features)
        y = np.ones(shape=(n_samples, n_tasks))
        ids = np.arange(n_samples)

        dataset = dc.data.NumpyDataset(X, y, None, ids)
        g = TensorGraph(model_dir='/tmp/tmpss5_ki5_')

        inLayer = Input(shape=(None, n_samples, n_features))
        g.add_feature(inLayer)

        flatten = Flatten()
        g.add_layer(flatten, parents=[inLayer])

        dense = Dense(out_channels=1)
        g.add_layer(dense, parents=[flatten])
        g.add_output(dense)

        label_out = Input(shape=(None, 1))
        g.add_label(label_out)

        loss = LossLayer()
        g.add_layer(loss, parents=[dense, label_out])
        g.set_loss(loss)

        g.fit(dataset, nb_epoch=100)
        g.save()
        g1 = TensorGraph.load_from_dir('/tmp/tmpss5_ki5_')
        print(g1)
        print(g1.predict_on_batch(X))
Exemplo n.º 6
0
def test_Flatten_pickle():
  tg = TensorGraph()
  feature = Feature(shape=(tg.batch_size, 1))
  layer = Flatten(in_layers=feature)
  tg.add_output(layer)
  tg.set_loss(layer)
  tg.build()
  tg.save()
Exemplo n.º 7
0
  def build_graph(self):
    self.vertex_features = Feature(shape=(None, self.max_atoms, 75))
    self.adj_matrix = Feature(shape=(None, self.max_atoms, 1, self.max_atoms))
    self.mask = Feature(shape=(None, self.max_atoms, 1))

    gcnn1 = BatchNorm(
        GraphCNN(
            num_filters=64,
            in_layers=[self.vertex_features, self.adj_matrix, self.mask]))
    gcnn1 = Dropout(self.dropout, in_layers=gcnn1)
    gcnn2 = BatchNorm(
        GraphCNN(num_filters=64, in_layers=[gcnn1, self.adj_matrix, self.mask]))
    gcnn2 = Dropout(self.dropout, in_layers=gcnn2)
    gc_pool, adj_matrix = GraphCNNPool(
        num_vertices=32, in_layers=[gcnn2, self.adj_matrix, self.mask])
    gc_pool = BatchNorm(gc_pool)
    gc_pool = Dropout(self.dropout, in_layers=gc_pool)
    gcnn3 = BatchNorm(GraphCNN(num_filters=32, in_layers=[gc_pool, adj_matrix]))
    gcnn3 = Dropout(self.dropout, in_layers=gcnn3)
    gc_pool2, adj_matrix2 = GraphCNNPool(
        num_vertices=8, in_layers=[gcnn3, adj_matrix])
    gc_pool2 = BatchNorm(gc_pool2)
    gc_pool2 = Dropout(self.dropout, in_layers=gc_pool2)
    flattened = Flatten(in_layers=gc_pool2)
    readout = Dense(
        out_channels=256, activation_fn=tf.nn.relu, in_layers=flattened)
    costs = []
    self.my_labels = []
    for task in range(self.n_tasks):
      if self.mode == 'classification':
        classification = Dense(
            out_channels=2, activation_fn=None, in_layers=[readout])

        softmax = SoftMax(in_layers=[classification])
        self.add_output(softmax)

        label = Label(shape=(None, 2))
        self.my_labels.append(label)
        cost = SoftMaxCrossEntropy(in_layers=[label, classification])
        costs.append(cost)
      if self.mode == 'regression':
        regression = Dense(
            out_channels=1, activation_fn=None, in_layers=[readout])
        self.add_output(regression)

        label = Label(shape=(None, 1))
        self.my_labels.append(label)
        cost = L2Loss(in_layers=[label, regression])
        costs.append(cost)
    if self.mode == "classification":
      entropy = Stack(in_layers=costs, axis=-1)
    elif self.mode == "regression":
      entropy = Stack(in_layers=costs, axis=1)
    self.my_task_weights = Weights(shape=(None, self.n_tasks))
    loss = WeightedError(in_layers=[entropy, self.my_task_weights])
    self.set_loss(loss)
Exemplo n.º 8
0
    def __init__(self,
                 img_rows=224,
                 img_cols=224,
                 weights="imagenet",
                 classes=1000,
                 **kwargs):
        super(ResNet50, self).__init__(use_queue=False, **kwargs)
        self.img_cols = img_cols
        self.img_rows = img_rows
        self.weights = weights
        self.classes = classes

        input = Feature(shape=(None, self.img_rows, self.img_cols, 3))
        labels = Label(shape=(None, self.classes))

        conv1 = Conv2D(num_outputs=64,
                       kernel_size=7,
                       stride=2,
                       activation='linear',
                       padding='same',
                       in_layers=[input])
        bn1 = BatchNorm(in_layers=[conv1])
        ac1 = ReLU(bn1)
        pool1 = MaxPool2D(ksize=[1, 3, 3, 1], in_layers=[bn1])

        cb1 = self.conv_block(pool1, 3, [64, 64, 256], 1)
        id1 = self.identity_block(cb1, 3, [64, 64, 256])
        id1 = self.identity_block(id1, 3, [64, 64, 256])

        cb2 = self.conv_block(id1, 3, [128, 128, 512])
        id2 = self.identity_block(cb2, 3, [128, 128, 512])
        id2 = self.identity_block(id2, 3, [128, 128, 512])
        id2 = self.identity_block(id2, 3, [128, 128, 512])

        cb3 = self.conv_block(id2, 3, [256, 256, 1024])
        id3 = self.identity_block(cb3, 3, [256, 256, 1024])
        id3 = self.identity_block(id3, 3, [256, 256, 1024])
        id3 = self.identity_block(id3, 3, [256, 256, 1024])
        id3 = self.identity_block(cb3, 3, [256, 256, 1024])
        id3 = self.identity_block(id3, 3, [256, 256, 1024])

        cb4 = self.conv_block(id3, 3, [512, 512, 2048])
        id4 = self.identity_block(cb4, 3, [512, 512, 2048])
        id4 = self.identity_block(id4, 3, [512, 512, 2048])

        pool2 = AvgPool2D(ksize=[1, 7, 7, 1], in_layers=[id4])

        flatten = Flatten(in_layers=[pool2])
        dense = Dense(classes, in_layers=[flatten])

        loss = SoftMaxCrossEntropy(in_layers=[labels, dense])
        loss = ReduceMean(in_layers=[loss])
        self.set_loss(loss)
        self.add_output(dense)
Exemplo n.º 9
0
    def test_mnist(self):
        from tensorflow.examples.tutorials.mnist import input_data
        mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
        train = dc.data.NumpyDataset(mnist.train.images, mnist.train.labels)
        valid = dc.data.NumpyDataset(mnist.validation.images,
                                     mnist.validation.labels)

        # Images are square 28x28 (batch, height, width, channel)
        feature = Feature(shape=(None, 784), name="Feature")
        make_image = Reshape(shape=(-1, 28, 28, 1), in_layers=[feature])

        conv2d_1 = Conv2D(num_outputs=32,
                          normalizer_fn=tf.contrib.layers.batch_norm,
                          in_layers=[make_image])
        maxpool_1 = MaxPool(in_layers=[conv2d_1])

        conv2d_2 = Conv2D(num_outputs=64,
                          normalizer_fn=tf.contrib.layers.batch_norm,
                          in_layers=[maxpool_1])
        maxpool_2 = MaxPool(in_layers=[conv2d_2])
        flatten = Flatten(in_layers=[maxpool_2])

        dense1 = Dense(out_channels=1024,
                       activation_fn=tf.nn.relu,
                       in_layers=[flatten])
        dense2 = Dense(out_channels=10, in_layers=[dense1])
        label = Label(shape=(None, 10), name="Label")
        smce = SoftMaxCrossEntropy(in_layers=[label, dense2])
        loss = ReduceMean(in_layers=[smce])
        output = SoftMax(in_layers=[dense2])

        tg = dc.models.TensorGraph(model_dir='/tmp/mnist',
                                   batch_size=1000,
                                   use_queue=True)
        tg.add_output(output)
        tg.set_loss(loss)
        tg.fit(train, nb_epoch=2)

        prediction = np.squeeze(tg.predict_proba_on_batch(valid.X))

        fpr = dict()
        tpr = dict()
        roc_auc = dict()
        for i in range(10):
            fpr[i], tpr[i], thresh = roc_curve(valid.y[:, i], prediction[:, i])
            roc_auc[i] = auc(fpr[i], tpr[i])
            assert_true(roc_auc[i] > 0.99)
Exemplo n.º 10
0
            activation_fn=tf.nn.relu,
            normalizer_fn=tf.contrib.layers.batch_norm)
        self.out_tensor = tf.nn.max_pool(out_tensor,
                                         ksize=[1, 2, 2, 1],
                                         strides=[1, 2, 2, 1],
                                         padding='SAME')
        return self.out_tensor


conv2d_1 = Conv2d(num_outputs=32)
tg.add_layer(conv2d_1, parents=[make_image])

conv2d_2 = Conv2d(num_outputs=64)
tg.add_layer(conv2d_2, parents=[conv2d_1])

flatten = Flatten()
tg.add_layer(flatten, parents=[conv2d_2])

dense1 = Dense(out_channels=1024, activation_fn=tf.nn.relu)
tg.add_layer(dense1, parents=[flatten])

dense2 = Dense(out_channels=10)
tg.add_layer(dense2, parents=[dense1])

label = Input(shape=(None, 10))
tg.add_label(label)

smce = SoftMaxCrossEntropy()
tg.add_layer(smce, parents=[label, dense2])

loss = ReduceMean()