Пример #1
0
 def test_add(self):
   """Test that Add can be invoked."""
   value1 = np.random.uniform(size=(2, 3)).astype(np.float32)
   value2 = np.random.uniform(size=(2, 3)).astype(np.float32)
   value3 = np.random.uniform(size=(2, 3)).astype(np.float32)
   with self.test_session() as sess:
     out_tensor = Add(weights=[1, 2, 1])(
         tf.constant(value1), tf.constant(value2), tf.constant(value3))
     assert np.array_equal(value1 + 2 * value2 + value3, out_tensor.eval())
Пример #2
0
 def test_reshape_inputs(self):
     """Test that layers can automatically reshape inconsistent inputs."""
     value1 = np.random.uniform(size=(2, 3)).astype(np.float32)
     value2 = np.random.uniform(size=(1, 6, 1)).astype(np.float32)
     with self.test_session() as sess:
         out_tensor = Add()(tf.constant(value1), tf.constant(value2))
         result = out_tensor.eval()
         assert result.shape == (1, 6, 1)
         assert np.array_equal(value1.reshape((1, 6, 1)) + value2, result)
Пример #3
0
 def test_add(self):
   """Test that Add can be invoked."""
   value1 = np.random.uniform(size=(2, 3)).astype(np.float32)
   value2 = np.random.uniform(size=(2, 3)).astype(np.float32)
   value3 = np.random.uniform(size=(2, 3)).astype(np.float32)
   with self.session() as sess:
     out_tensor = Add(weights=[1, 2, 1])(tf.constant(value1),
                                         tf.constant(value2),
                                         tf.constant(value3))
     assert np.array_equal(value1 + 2 * value2 + value3, out_tensor.eval())
Пример #4
0
    def identity_block(self, input, kernel_size, filters):
        filters1, filters2, filters3 = filters

        output = Conv2D(num_outputs=filters1,
                        kernel_size=1,
                        activation='linear',
                        padding='same',
                        in_layers=[input])
        output = BatchNorm(in_layers=[output])
        output = ReLU(output)

        output = Conv2D(num_outputs=filters2,
                        kernel_size=kernel_size,
                        activation='linear',
                        padding='same',
                        in_layers=[input])
        output = BatchNorm(in_layers=[output])
        output = ReLU(output)

        output = Conv2D(num_outputs=filters3,
                        kernel_size=1,
                        activation='linear',
                        padding='same',
                        in_layers=[input])
        output = BatchNorm(in_layers=[output])

        output = Add(in_layers=[output, input])
        output = ReLU(output)

        return output
Пример #5
0
def test_Constant_pickle():
  tg = TensorGraph()
  feature = Feature(shape=(tg.batch_size, 1))
  layer = Constant(np.array([15.0]))
  output = Add(in_layers=[feature, layer])
  tg.add_output(output)
  tg.set_loss(output)
  tg.build()
  tg.save()
Пример #6
0
    def build_graph(self):
        print("building")
        features = Feature(shape=(None, self.n_features))
        last_layer = features
        for layer_size in self.encoder_layers:
            last_layer = Dense(in_layers=last_layer,
                               activation_fn=tf.nn.elu,
                               out_channels=layer_size)

        self.mean = Dense(in_layers=last_layer,
                          activation_fn=None,
                          out_channels=1)
        self.std = Dense(in_layers=last_layer,
                         activation_fn=None,
                         out_channels=1)

        readout = CombineMeanStd([self.mean, self.std], training_only=True)
        last_layer = readout
        for layer_size in self.decoder_layers:
            last_layer = Dense(in_layers=readout,
                               activation_fn=tf.nn.elu,
                               out_channels=layer_size)

        self.reconstruction = Dense(in_layers=last_layer,
                                    activation_fn=None,
                                    out_channels=self.n_features)
        weights = Weights(shape=(None, self.n_features))
        reproduction_loss = L2Loss(
            in_layers=[features, self.reconstruction, weights])
        reproduction_loss = ReduceSum(in_layers=reproduction_loss, axis=0)
        global_step = TensorWrapper(self._get_tf("GlobalStep"))
        kl_loss = KLDivergenceLoss(
            in_layers=[self.mean, self.std, global_step],
            annealing_start_step=self.kl_annealing_start_step,
            annealing_stop_step=self.kl_annealing_stop_step)
        loss = Add(in_layers=[kl_loss, reproduction_loss], weights=[0.5, 1])

        self.add_output(self.mean)
        self.add_output(self.reconstruction)
        self.set_loss(loss)
Пример #7
0
    def conv_block(self, input, kernel_size, filters, strides=2):
        filters1, filters2, filters3 = filters

        output = Conv2D(num_outputs=filters1,
                        kernel_size=1,
                        stride=strides,
                        activation='linear',
                        padding='same',
                        in_layers=[input])
        output = BatchNorm(in_layers=[output])
        output = ReLU(output)

        output = Conv2D(num_outputs=filters2,
                        kernel_size=kernel_size,
                        activation='linear',
                        padding='same',
                        in_layers=[output])
        output = BatchNorm(in_layers=[output])
        output = ReLU(output)

        output = Conv2D(num_outputs=filters3,
                        kernel_size=1,
                        activation='linear',
                        padding='same',
                        in_layers=[output])
        output = BatchNorm(in_layers=[output])

        shortcut = Conv2D(num_outputs=filters3,
                          kernel_size=1,
                          stride=strides,
                          activation='linear',
                          padding='same',
                          in_layers=[input])
        shortcut = BatchNorm(in_layers=[shortcut])
        output = Add(in_layers=[shortcut, output])
        output = ReLU(output)

        return output
def sluice_model(batch_size, tasks):
    model = TensorGraph(model_dir=model_dir,
                        batch_size=batch_size,
                        use_queue=False,
                        tensorboard=True)
    atom_features = Feature(shape=(None, 75))
    degree_slice = Feature(shape=(None, 2), dtype=tf.int32)
    membership = Feature(shape=(None, ), dtype=tf.int32)

    sluice_loss = []
    deg_adjs = []
    for i in range(0, 10 + 1):
        deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32)
        deg_adjs.append(deg_adj)

    gc1 = GraphConv(64,
                    activation_fn=tf.nn.relu,
                    in_layers=[atom_features, degree_slice, membership] +
                    deg_adjs)

    as1 = AlphaShare(in_layers=[gc1, gc1])
    sluice_loss.append(gc1)

    batch_norm1a = BatchNorm(in_layers=[as1[0]])
    batch_norm1b = BatchNorm(in_layers=[as1[1]])

    gp1a = GraphPool(in_layers=[batch_norm1a, degree_slice, membership] +
                     deg_adjs)
    gp1b = GraphPool(in_layers=[batch_norm1b, degree_slice, membership] +
                     deg_adjs)

    gc2a = GraphConv(64,
                     activation_fn=tf.nn.relu,
                     in_layers=[gp1a, degree_slice, membership] + deg_adjs)
    gc2b = GraphConv(64,
                     activation_fn=tf.nn.relu,
                     in_layers=[gp1b, degree_slice, membership] + deg_adjs)

    as2 = AlphaShare(in_layers=[gc2a, gc2b])
    sluice_loss.append(gc2a)
    sluice_loss.append(gc2b)

    batch_norm2a = BatchNorm(in_layers=[as2[0]])
    batch_norm2b = BatchNorm(in_layers=[as2[1]])

    gp2a = GraphPool(in_layers=[batch_norm2a, degree_slice, membership] +
                     deg_adjs)
    gp2b = GraphPool(in_layers=[batch_norm2b, degree_slice, membership] +
                     deg_adjs)

    densea = Dense(out_channels=128, activation_fn=None, in_layers=[gp2a])
    denseb = Dense(out_channels=128, activation_fn=None, in_layers=[gp2b])

    batch_norm3a = BatchNorm(in_layers=[densea])
    batch_norm3b = BatchNorm(in_layers=[denseb])

    as3 = AlphaShare(in_layers=[batch_norm3a, batch_norm3b])
    sluice_loss.append(batch_norm3a)
    sluice_loss.append(batch_norm3b)

    gg1a = GraphGather(batch_size=batch_size,
                       activation_fn=tf.nn.tanh,
                       in_layers=[as3[0], degree_slice, membership] + deg_adjs)
    gg1b = GraphGather(batch_size=batch_size,
                       activation_fn=tf.nn.tanh,
                       in_layers=[as3[1], degree_slice, membership] + deg_adjs)

    costs = []
    labels = []
    count = 0
    for task in tasks:
        if count < len(tasks) / 2:
            classification = Dense(out_channels=2,
                                   activation_fn=None,
                                   in_layers=[gg1a])
            print("first half:")
            print(task)
        else:
            classification = Dense(out_channels=2,
                                   activation_fn=None,
                                   in_layers=[gg1b])
            print('second half')
            print(task)
        count += 1

        softmax = SoftMax(in_layers=[classification])
        model.add_output(softmax)

        label = Label(shape=(None, 2))
        labels.append(label)
        cost = SoftMaxCrossEntropy(in_layers=[label, classification])
        costs.append(cost)

    entropy = Concat(in_layers=costs)
    task_weights = Weights(shape=(None, len(tasks)))
    task_loss = WeightedError(in_layers=[entropy, task_weights])

    s_cost = SluiceLoss(in_layers=sluice_loss)

    total_loss = Add(in_layers=[task_loss, s_cost])
    model.set_loss(total_loss)

    def feed_dict_generator(dataset, batch_size, epochs=1):
        for epoch in range(epochs):
            for ind, (X_b, y_b, w_b, ids_b) in enumerate(
                    dataset.iterbatches(batch_size, pad_batches=True)):
                d = {}
                for index, label in enumerate(labels):
                    d[label] = to_one_hot(y_b[:, index])
                d[task_weights] = w_b
                multiConvMol = ConvMol.agglomerate_mols(X_b)
                d[atom_features] = multiConvMol.get_atom_features()
                d[degree_slice] = multiConvMol.deg_slice
                d[membership] = multiConvMol.membership
                for i in range(1, len(multiConvMol.get_deg_adjacency_lists())):
                    d[deg_adjs[i -
                               1]] = multiConvMol.get_deg_adjacency_lists()[i]
                yield d

    return model, feed_dict_generator, labels, task_weights