Esempio n. 1
0
 def test_reduce_sum(self):
   """Test that ReduceSum can be invoked."""
   batch_size = 10
   n_features = 5
   in_tensor = np.random.rand(batch_size, n_features)
   with self.session() as sess:
     in_tensor = tf.convert_to_tensor(in_tensor, dtype=tf.float32)
     out_tensor = ReduceSum()(in_tensor)
     out_tensor = out_tensor.eval()
     assert isinstance(out_tensor, np.float32)
Esempio n. 2
0
 def test_reduce_sum(self):
     """Test that ReduceSum can be invoked."""
     batch_size = 10
     n_features = 5
     in_tensor = np.random.rand(batch_size, n_features)
     with self.session() as sess:
         in_tensor = tf.convert_to_tensor(in_tensor, dtype=tf.float32)
         out_tensor = ReduceSum()(in_tensor)
         out_tensor = out_tensor.eval()
         assert isinstance(out_tensor, np.float32)
Esempio n. 3
0
  def test_neighbor_list_vina(self):
    """Test under conditions closer to Vina usage."""
    N_atoms = 5
    M_nbrs = 2
    ndim = 3
    start = 0
    stop = 4
    nbr_cutoff = 1

    X = NumpyDataset(start + np.random.rand(N_atoms, ndim) * (stop - start))

    coords = Feature(shape=(N_atoms, ndim))

    # Now an (N, M) shape
    nbr_list = NeighborList(
        N_atoms, M_nbrs, ndim, nbr_cutoff, start, stop, in_layers=[coords])

    nbr_list = ToFloat(in_layers=[nbr_list])
    flattened = Flatten(in_layers=[nbr_list])
    dense = Dense(out_channels=1, in_layers=[flattened])
    output = ReduceSum(in_layers=[dense])

    tg = dc.models.TensorGraph(learning_rate=0.1, use_queue=False)
    tg.set_loss(output)

    databag = Databag({coords: X})
    tg.fit_generator(databag.iterbatches(epochs=1))
Esempio n. 4
0
    def test_neighbor_list_simple(self):
        """Test that neighbor lists can be constructed."""
        N_atoms = 10
        start = 0
        stop = 12
        nbr_cutoff = 3
        ndim = 3
        M = 6
        X = np.random.rand(N_atoms, ndim)
        y = np.random.rand(N_atoms, 1)
        dataset = NumpyDataset(X, y)

        features = Feature(shape=(N_atoms, ndim))
        labels = Label(shape=(N_atoms, ))
        nbr_list = NeighborList(N_atoms,
                                M,
                                ndim,
                                nbr_cutoff,
                                start,
                                stop,
                                in_layers=[features])
        nbr_list = ToFloat(in_layers=[nbr_list])
        # This isn't a meaningful loss, but just for test
        loss = ReduceSum(in_layers=[nbr_list])
        tg = dc.models.TensorGraph(use_queue=False)
        tg.add_output(nbr_list)
        tg.set_loss(loss)

        tg.build()
Esempio n. 5
0
    def test_save_load(self):
        n_data_points = 20
        n_features = 2
        X = np.random.rand(n_data_points, n_features)
        y = [[0, 1] for x in range(n_data_points)]
        dataset = NumpyDataset(X, y)
        features = Feature(shape=(None, n_features))
        dense = Dense(out_channels=2, in_layers=[features])
        output = SoftMax(in_layers=[dense])
        label = Label(shape=(None, 2))
        smce = SoftMaxCrossEntropy(in_layers=[label, dense])
        loss = ReduceMean(in_layers=[smce])
        tg = dc.models.TensorGraph(learning_rate=0.01)
        tg.add_output(output)
        tg.set_loss(loss)
        submodel_loss = ReduceSum(in_layers=smce)
        submodel_opt = Adam(learning_rate=0.002)
        submodel = tg.create_submodel(layers=[dense],
                                      loss=submodel_loss,
                                      optimizer=submodel_opt)
        tg.fit(dataset, nb_epoch=1)
        prediction = np.squeeze(tg.predict_on_batch(X))
        tg.save()

        dirpath = tempfile.mkdtemp()
        shutil.rmtree(dirpath)
        shutil.move(tg.model_dir, dirpath)

        tg1 = TensorGraph.load_from_dir(dirpath)
        prediction2 = np.squeeze(tg1.predict_on_batch(X))
        assert_true(np.all(np.isclose(prediction, prediction2, atol=0.01)))
    def _build_graph(self):
        self.atom_flags = Feature(shape=(None,
                                         self.max_atoms * self.max_atoms))
        self.atom_feats = Feature(shape=(None, self.max_atoms * self.n_feat))

        reshaped_atom_feats = Reshape(in_layers=[self.atom_feats],
                                      shape=(-1, self.max_atoms, self.n_feat))
        reshaped_atom_flags = Reshape(in_layers=[self.atom_flags],
                                      shape=(-1, self.max_atoms,
                                             self.max_atoms))

        previous_layer = reshaped_atom_feats

        Hiddens = []
        for n_hidden in self.layer_structures:
            Hidden = Dense(out_channels=n_hidden,
                           activation_fn=tf.nn.tanh,
                           in_layers=[previous_layer])
            Hiddens.append(Hidden)
            previous_layer = Hiddens[-1]

        regression = Dense(out_channels=1 * self.n_tasks,
                           activation_fn=None,
                           in_layers=[Hiddens[-1]])
        output = BPGather(self.max_atoms,
                          in_layers=[regression, reshaped_atom_flags])
        self.add_output(output)

        label = Label(shape=(None, self.n_tasks, 1))
        loss = ReduceSum(L2Loss(in_layers=[label, output]))
        weights = Weights(shape=(None, self.n_tasks))

        weighted_loss = WeightedError(in_layers=[loss, weights])
        self.set_loss(weighted_loss)
Esempio n. 7
0
def test_ReduceSum_pickle():
  tg = TensorGraph()
  feature = Feature(shape=(tg.batch_size, 1))
  layer = ReduceSum(in_layers=[feature])
  tg.add_output(layer)
  tg.set_loss(layer)
  tg.build()
  tg.save()
Esempio n. 8
0
    def _build(self):
        self.A_tilda_k = list()
        for k in range(1, self.k_max + 1):
            self.A_tilda_k.append(
                Feature(name="graph_adjacency_{}".format(k),
                        dtype=tf.float32,
                        shape=[None, self.max_nodes, self.max_nodes]))
        self.X = Feature(name='atom_features',
                         dtype=tf.float32,
                         shape=[None, self.max_nodes, self.num_node_features])

        graph_layers = list()
        adaptive_filters = list()

        for index, k in enumerate(range(1, self.k_max + 1)):

            in_layers = [self.A_tilda_k[index], self.X]

            adaptive_filters.append(
                AdaptiveFilter(batch_size=self.batch_size,
                               in_layers=in_layers,
                               num_nodes=self.max_nodes,
                               num_node_features=self.num_node_features,
                               combine_method=self.combine_method))

            graph_layers.append(
                KOrderGraphConv(batch_size=self.batch_size,
                                in_layers=in_layers +
                                [adaptive_filters[index]],
                                num_nodes=self.max_nodes,
                                num_node_features=self.num_node_features,
                                init='glorot_uniform'))

        graph_features = Concat(in_layers=graph_layers, axis=2)
        graph_features = ReLU(in_layers=[graph_features])
        flattened = Flatten(in_layers=[graph_features])

        dense1 = Dense(in_layers=[flattened],
                       out_channels=64,
                       activation_fn=tf.nn.relu)
        dense2 = Dense(in_layers=[dense1],
                       out_channels=16,
                       activation_fn=tf.nn.relu)
        dense3 = Dense(in_layers=[dense2],
                       out_channels=1 * self.n_tasks,
                       activation_fn=None)
        output = Reshape(in_layers=[dense3], shape=(-1, self.n_tasks, 1))
        self.add_output(output)

        label = Label(shape=(None, self.n_tasks, 1))
        weights = Weights(shape=(None, self.n_tasks))
        loss = ReduceSum(L2Loss(in_layers=[label, output]))

        weighted_loss = WeightedError(in_layers=[loss, weights])
        self.set_loss(weighted_loss)
Esempio n. 9
0
    def _build_graph(self):
        self.smiles_seqs = Feature(shape=(None, self.seq_length),
                                   dtype=tf.int32)
        # Character embedding
        Embedding = DTNNEmbedding(
            n_embedding=self.n_embedding,
            periodic_table_length=len(self.char_dict.keys()) + 1,
            in_layers=[self.smiles_seqs])
        pooled_outputs = []
        conv_layers = []
        for filter_size, num_filter in zip(self.kernel_sizes,
                                           self.num_filters):
            # Multiple convolutional layers with different filter widths
            conv_layers.append(
                Conv1D(kernel_size=filter_size,
                       filters=num_filter,
                       padding='valid',
                       in_layers=[Embedding]))
            # Max-over-time pooling
            pooled_outputs.append(
                ReduceMax(axis=1, in_layers=[conv_layers[-1]]))
        # Concat features from all filters(one feature per filter)
        concat_outputs = Concat(axis=1, in_layers=pooled_outputs)
        dropout = Dropout(dropout_prob=self.dropout,
                          in_layers=[concat_outputs])
        dense = Dense(out_channels=200,
                      activation_fn=tf.nn.relu,
                      in_layers=[dropout])
        # Highway layer from https://arxiv.org/pdf/1505.00387.pdf
        gather = Highway(in_layers=[dense])

        if self.mode == "classification":
            logits = Dense(out_channels=self.n_tasks * 2,
                           activation_fn=None,
                           in_layers=[gather])
            logits = Reshape(shape=(-1, self.n_tasks, 2), in_layers=[logits])
            output = SoftMax(in_layers=[logits])
            self.add_output(output)
            labels = Label(shape=(None, self.n_tasks, 2))
            loss = SoftMaxCrossEntropy(in_layers=[labels, logits])

        else:
            vals = Dense(out_channels=self.n_tasks * 1,
                         activation_fn=None,
                         in_layers=[gather])
            vals = Reshape(shape=(-1, self.n_tasks, 1), in_layers=[vals])
            self.add_output(vals)
            labels = Label(shape=(None, self.n_tasks, 1))
            loss = ReduceSum(L2Loss(in_layers=[labels, vals]))

        weights = Weights(shape=(None, self.n_tasks))
        weighted_loss = WeightedError(in_layers=[loss, weights])
        self.set_loss(weighted_loss)
Esempio n. 10
0
    def build_graph(self):
        """Building graph structures:
            Features => DTNNEmbedding => DTNNStep => DTNNStep => DTNNGather => Regression
            """
        self.atom_number = Feature(shape=(None, ), dtype=tf.int32)
        self.distance = Feature(shape=(None, self.n_distance))
        self.atom_membership = Feature(shape=(None, ), dtype=tf.int32)
        self.distance_membership_i = Feature(shape=(None, ), dtype=tf.int32)
        self.distance_membership_j = Feature(shape=(None, ), dtype=tf.int32)

        dtnn_embedding = DTNNEmbedding(n_embedding=self.n_embedding,
                                       in_layers=[self.atom_number])
        if self.dropout > 0.0:
            dtnn_embedding = Dropout(self.dropout, in_layers=dtnn_embedding)
        dtnn_layer1 = DTNNStep(n_embedding=self.n_embedding,
                               n_distance=self.n_distance,
                               in_layers=[
                                   dtnn_embedding, self.distance,
                                   self.distance_membership_i,
                                   self.distance_membership_j
                               ])
        if self.dropout > 0.0:
            dtnn_layer1 = Dropout(self.dropout, in_layers=dtnn_layer1)
        dtnn_layer2 = DTNNStep(n_embedding=self.n_embedding,
                               n_distance=self.n_distance,
                               in_layers=[
                                   dtnn_layer1, self.distance,
                                   self.distance_membership_i,
                                   self.distance_membership_j
                               ])
        if self.dropout > 0.0:
            dtnn_layer2 = Dropout(self.dropout, in_layers=dtnn_layer2)
        dtnn_gather = DTNNGather(n_embedding=self.n_embedding,
                                 layer_sizes=[self.n_hidden],
                                 n_outputs=self.n_tasks,
                                 output_activation=self.output_activation,
                                 in_layers=[dtnn_layer2, self.atom_membership])
        if self.dropout > 0.0:
            dtnn_gather = Dropout(self.dropout, in_layers=dtnn_gather)

        n_tasks = self.n_tasks
        weights = Weights(shape=(None, n_tasks))
        labels = Label(shape=(None, n_tasks))
        output = Reshape(
            shape=(None, n_tasks),
            in_layers=[Dense(in_layers=dtnn_gather, out_channels=n_tasks)])
        self.add_output(output)
        weighted_loss = ReduceSum(L2Loss(in_layers=[labels, output, weights]))
        self.set_loss(weighted_loss)
Esempio n. 11
0
  def test_change_loss_function(self):
    tasks, dataset, transformers, metric = self.get_dataset(
        'regression', 'GraphConv', num_tasks=1)

    batch_size = 50
    model = GraphConvModel(len(tasks), batch_size=batch_size, mode='regression')

    model.fit(dataset, nb_epoch=1)
    model.save()

    model2 = TensorGraph.load_from_dir(model.model_dir, restore=False)
    dummy_label = model2.labels[-1]
    dummy_ouput = model2.outputs[-1]
    loss = ReduceSum(L2Loss(in_layers=[dummy_label, dummy_ouput]))
    module = model2.create_submodel(loss=loss)
    model2.restore()
    model2.fit(dataset, nb_epoch=1, submodel=module)
    def build_graph(self):

        self.atom_numbers = Feature(shape=(None, self.max_atoms),
                                    dtype=tf.int32)
        self.atom_flags = Feature(shape=(None,
                                         self.max_atoms * self.max_atoms))
        self.atom_feats = Feature(shape=(None, self.max_atoms * 4))

        reshaped_atom_flags = Reshape(in_layers=[self.atom_flags],
                                      shape=(-1, self.max_atoms,
                                             self.max_atoms))
        reshaped_atom_feats = Reshape(in_layers=[self.atom_feats],
                                      shape=(-1, self.max_atoms, 4))

        previous_layer = ANIFeat(in_layers=reshaped_atom_feats,
                                 max_atoms=self.max_atoms)

        self.featurized = previous_layer

        Hiddens = []
        for n_hidden in self.layer_structures:
            Hidden = AtomicDifferentiatedDense(
                self.max_atoms,
                n_hidden,
                self.atom_number_cases,
                activation=self.activation_fn,
                in_layers=[previous_layer, self.atom_numbers])
            Hiddens.append(Hidden)
            previous_layer = Hiddens[-1]

        regression = Dense(out_channels=1 * self.n_tasks,
                           activation_fn=None,
                           in_layers=[Hiddens[-1]])
        output = BPGather(self.max_atoms,
                          in_layers=[regression, reshaped_atom_flags])
        self.add_output(output)

        label = Label(shape=(None, self.n_tasks, 1))
        loss = ReduceSum(L2Loss(in_layers=[label, output]))
        weights = Weights(shape=(None, self.n_tasks))

        weighted_loss = WeightedError(in_layers=[loss, weights])
        if self.exp_loss:
            weighted_loss = Exp(in_layers=[weighted_loss])
        self.set_loss(weighted_loss)
Esempio n. 13
0
    def build_graph(self):
        print("building")
        features = Feature(shape=(None, self.n_features))
        last_layer = features
        for layer_size in self.encoder_layers:
            last_layer = Dense(in_layers=last_layer,
                               activation_fn=tf.nn.elu,
                               out_channels=layer_size)

        self.mean = Dense(in_layers=last_layer,
                          activation_fn=None,
                          out_channels=1)
        self.std = Dense(in_layers=last_layer,
                         activation_fn=None,
                         out_channels=1)

        readout = CombineMeanStd([self.mean, self.std], training_only=True)
        last_layer = readout
        for layer_size in self.decoder_layers:
            last_layer = Dense(in_layers=readout,
                               activation_fn=tf.nn.elu,
                               out_channels=layer_size)

        self.reconstruction = Dense(in_layers=last_layer,
                                    activation_fn=None,
                                    out_channels=self.n_features)
        weights = Weights(shape=(None, self.n_features))
        reproduction_loss = L2Loss(
            in_layers=[features, self.reconstruction, weights])
        reproduction_loss = ReduceSum(in_layers=reproduction_loss, axis=0)
        global_step = TensorWrapper(self._get_tf("GlobalStep"))
        kl_loss = KLDivergenceLoss(
            in_layers=[self.mean, self.std, global_step],
            annealing_start_step=self.kl_annealing_start_step,
            annealing_stop_step=self.kl_annealing_stop_step)
        loss = Add(in_layers=[kl_loss, reproduction_loss], weights=[0.5, 1])

        self.add_output(self.mean)
        self.add_output(self.reconstruction)
        self.set_loss(loss)
Esempio n. 14
0
  def test_weighted_combo(self):
    """Tests that weighted linear combinations can be built"""
    N = 10
    n_features = 5

    X1 = NumpyDataset(np.random.rand(N, n_features))
    X2 = NumpyDataset(np.random.rand(N, n_features))
    y = NumpyDataset(np.random.rand(N))

    features_1 = Feature(shape=(None, n_features))
    features_2 = Feature(shape=(None, n_features))
    labels = Label(shape=(None,))

    combo = WeightedLinearCombo(in_layers=[features_1, features_2])
    out = ReduceSum(in_layers=[combo], axis=1)
    loss = ReduceSquareDifference(in_layers=[out, labels])

    databag = Databag({features_1: X1, features_2: X2, labels: y})

    tg = dc.models.TensorGraph(learning_rate=0.1, use_queue=False)
    tg.set_loss(loss)
    tg.fit_generator(databag.iterbatches(epochs=1))
Esempio n. 15
0
 def create_loss(self, layer, label, weight):
   weighted_loss = ReduceSum(L2Loss(in_layers=[label, layer, weight]))
   return weighted_loss
Esempio n. 16
0
    def __init__(self,
                 n_tasks,
                 n_features,
                 alpha_init_stddevs=0.02,
                 layer_sizes=[1000],
                 weight_init_stddevs=0.02,
                 bias_init_consts=1.0,
                 weight_decay_penalty=0.0,
                 weight_decay_penalty_type="l2",
                 dropouts=0.5,
                 activation_fns=tf.nn.relu,
                 **kwargs):
        """Creates a progressive network.
  
    Only listing parameters specific to progressive networks here.

    Parameters
    ----------
    n_tasks: int
      Number of tasks
    n_features: int
      Number of input features
    alpha_init_stddevs: list
      List of standard-deviations for alpha in adapter layers.
    layer_sizes: list
      the size of each dense layer in the network.  The length of this list determines the number of layers.
    weight_init_stddevs: list or float
      the standard deviation of the distribution to use for weight initialization of each layer.  The length
      of this list should equal len(layer_sizes)+1.  The final element corresponds to the output layer.
      Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.
    bias_init_consts: list or float
      the value to initialize the biases in each layer to.  The length of this list should equal len(layer_sizes)+1.
      The final element corresponds to the output layer.  Alternatively this may be a single value instead of a list,
      in which case the same value is used for every layer.
    weight_decay_penalty: float
      the magnitude of the weight decay penalty to use
    weight_decay_penalty_type: str
      the type of penalty to use for weight decay, either 'l1' or 'l2'
    dropouts: list or float
      the dropout probablity to use for each layer.  The length of this list should equal len(layer_sizes).
      Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.
    activation_fns: list or object
      the Tensorflow activation function to apply to each layer.  The length of this list should equal
      len(layer_sizes).  Alternatively this may be a single value instead of a list, in which case the
      same value is used for every layer.
    """

        super(ProgressiveMultitaskRegressor, self).__init__(**kwargs)
        self.n_tasks = n_tasks
        self.n_features = n_features
        self.layer_sizes = layer_sizes
        self.alpha_init_stddevs = alpha_init_stddevs
        self.weight_init_stddevs = weight_init_stddevs
        self.bias_init_consts = bias_init_consts
        self.dropouts = dropouts
        self.activation_fns = activation_fns

        n_layers = len(layer_sizes)
        if not isinstance(weight_init_stddevs, collections.Sequence):
            self.weight_init_stddevs = [weight_init_stddevs] * n_layers
        if not isinstance(alpha_init_stddevs, collections.Sequence):
            self.alpha_init_stddevs = [alpha_init_stddevs] * n_layers
        if not isinstance(bias_init_consts, collections.Sequence):
            self.bias_init_consts = [bias_init_consts] * n_layers
        if not isinstance(dropouts, collections.Sequence):
            self.dropouts = [dropouts] * n_layers
        if not isinstance(activation_fns, collections.Sequence):
            self.activation_fns = [activation_fns] * n_layers

        # Add the input features.
        self.mol_features = Feature(shape=(None, n_features))

        all_layers = {}
        outputs = []
        for task in range(self.n_tasks):
            task_layers = []
            for i in range(n_layers):
                if i == 0:
                    prev_layer = self.mol_features
                else:
                    prev_layer = all_layers[(i - 1, task)]
                    if task > 0:
                        lateral_contrib, trainables = self.add_adapter(
                            all_layers, task, i)
                        task_layers.extend(trainables)

                layer = Dense(in_layers=[prev_layer],
                              out_channels=layer_sizes[i],
                              activation_fn=None,
                              weights_initializer=TFWrapper(
                                  tf.truncated_normal_initializer,
                                  stddev=self.weight_init_stddevs[i]),
                              biases_initializer=TFWrapper(
                                  tf.constant_initializer,
                                  value=self.bias_init_consts[i]))
                task_layers.append(layer)

                if i > 0 and task > 0:
                    layer = layer + lateral_contrib
                assert self.activation_fns[
                    i] is tf.nn.relu, "Only ReLU is supported"
                layer = ReLU(in_layers=[layer])
                if self.dropouts[i] > 0.0:
                    layer = Dropout(self.dropouts[i], in_layers=[layer])
                all_layers[(i, task)] = layer

            prev_layer = all_layers[(n_layers - 1, task)]
            layer = Dense(in_layers=[prev_layer],
                          out_channels=1,
                          weights_initializer=TFWrapper(
                              tf.truncated_normal_initializer,
                              stddev=self.weight_init_stddevs[-1]),
                          biases_initializer=TFWrapper(
                              tf.constant_initializer,
                              value=self.bias_init_consts[-1]))
            task_layers.append(layer)

            if task > 0:
                lateral_contrib, trainables = self.add_adapter(
                    all_layers, task, n_layers)
                task_layers.extend(trainables)
                layer = layer + lateral_contrib
            outputs.append(layer)
            self.add_output(layer)
            task_label = Label(shape=(None, 1))
            task_weight = Weights(shape=(None, 1))
            weighted_loss = ReduceSum(
                L2Loss(in_layers=[task_label, layer, task_weight]))
            self.create_submodel(layers=task_layers,
                                 loss=weighted_loss,
                                 optimizer=None)
        # Weight decay not activated
        """
Esempio n. 17
0
    def __init__(self,
                 n_tasks,
                 n_features,
                 layer_sizes=[1000],
                 weight_init_stddevs=0.02,
                 bias_init_consts=1.0,
                 weight_decay_penalty=0.0,
                 weight_decay_penalty_type="l2",
                 dropouts=0.5,
                 activation_fns=tf.nn.relu,
                 bypass_layer_sizes=[100],
                 bypass_weight_init_stddevs=[.02],
                 bypass_bias_init_consts=[1.],
                 bypass_dropouts=[.5],
                 **kwargs):
        """ Create a RobustMultitaskRegressor.

    Parameters
    ----------
    n_tasks: int
      number of tasks
    n_features: int
      number of features
    layer_sizes: list
      the size of each dense layer in the network.  The length of this list determines the number of layers.
    weight_init_stddevs: list or float
      the standard deviation of the distribution to use for weight initialization of each layer.  The length
      of this list should equal len(layer_sizes).  Alternatively this may be a single value instead of a list,
      in which case the same value is used for every layer.
    bias_init_consts: list or loat
      the value to initialize the biases in each layer to.  The length of this list should equal len(layer_sizes).
      Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.
    weight_decay_penalty: float
      the magnitude of the weight decay penalty to use
    weight_decay_penalty_type: str
      the type of penalty to use for weight decay, either 'l1' or 'l2'
    dropouts: list or float
      the dropout probablity to use for each layer.  The length of this list should equal len(layer_sizes).
      Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.
    activation_fns: list or object
      the Tensorflow activation function to apply to each layer.  The length of this list should equal
      len(layer_sizes).  Alternatively this may be a single value instead of a list, in which case the
      same value is used for every layer.
    bypass_layer_sizes: list
      the size of each dense layer in the bypass network. The length of this list determines the number of bypass layers.
    bypass_weight_init_stddevs: list or float
      the standard deviation of the distribution to use for weight initialization of bypass layers.
      same requirements as weight_init_stddevs
    bypass_bias_init_consts: list or float
      the value to initialize the biases in bypass layers
      same requirements as bias_init_consts
    bypass_dropouts: list or float
      the dropout probablity to use for bypass layers.
      same requirements as dropouts
    """
        super(RobustMultitaskRegressor, self).__init__(**kwargs)
        self.n_tasks = n_tasks
        self.n_features = n_features
        n_layers = len(layer_sizes)
        if not isinstance(weight_init_stddevs, collections.Sequence):
            weight_init_stddevs = [weight_init_stddevs] * n_layers
        if not isinstance(bias_init_consts, collections.Sequence):
            bias_init_consts = [bias_init_consts] * n_layers
        if not isinstance(dropouts, collections.Sequence):
            dropouts = [dropouts] * n_layers
        if not isinstance(activation_fns, collections.Sequence):
            activation_fns = [activation_fns] * n_layers

        n_bypass_layers = len(bypass_layer_sizes)
        if not isinstance(bypass_weight_init_stddevs, collections.Sequence):
            bypass_weight_init_stddevs = [bypass_weight_init_stddevs
                                          ] * n_bypass_layers
        if not isinstance(bypass_bias_init_consts, collections.Sequence):
            bypass_bias_init_consts = [bypass_bias_init_consts
                                       ] * n_bypass_layers
        if not isinstance(bypass_dropouts, collections.Sequence):
            bypass_dropouts = [bypass_dropouts] * n_bypass_layers
        bypass_activation_fns = [activation_fns[0]] * n_bypass_layers

        # Add the input features.
        mol_features = Feature(shape=(None, n_features))
        prev_layer = mol_features

        # Add the shared dense layers
        for size, weight_stddev, bias_const, dropout, activation_fn in zip(
                layer_sizes, weight_init_stddevs, bias_init_consts, dropouts,
                activation_fns):
            layer = Dense(in_layers=[prev_layer],
                          out_channels=size,
                          activation_fn=activation_fn,
                          weights_initializer=TFWrapper(
                              tf.truncated_normal_initializer,
                              stddev=weight_stddev),
                          biases_initializer=TFWrapper(tf.constant_initializer,
                                                       value=bias_const))
            if dropout > 0.0:
                layer = Dropout(dropout, in_layers=[layer])
            prev_layer = layer
        top_multitask_layer = prev_layer

        task_outputs = []
        for i in range(self.n_tasks):
            prev_layer = mol_features
            # Add task-specific bypass layers
            for size, weight_stddev, bias_const, dropout, activation_fn in zip(
                    bypass_layer_sizes, bypass_weight_init_stddevs,
                    bypass_bias_init_consts, bypass_dropouts,
                    bypass_activation_fns):
                layer = Dense(in_layers=[prev_layer],
                              out_channels=size,
                              activation_fn=activation_fn,
                              weights_initializer=TFWrapper(
                                  tf.truncated_normal_initializer,
                                  stddev=weight_stddev),
                              biases_initializer=TFWrapper(
                                  tf.constant_initializer, value=bias_const))
                if dropout > 0.0:
                    layer = Dropout(dropout, in_layers=[layer])
                prev_layer = layer
            top_bypass_layer = prev_layer

            if n_bypass_layers > 0:
                task_layer = Concat(
                    axis=1, in_layers=[top_multitask_layer, top_bypass_layer])
            else:
                task_layer = top_multitask_layer

            task_out = Dense(in_layers=[task_layer], out_channels=1)
            task_outputs.append(task_out)

        output = Concat(axis=1, in_layers=task_outputs)

        self.add_output(output)
        labels = Label(shape=(None, n_tasks))
        weights = Weights(shape=(None, n_tasks))
        weighted_loss = ReduceSum(L2Loss(in_layers=[labels, output, weights]))
        if weight_decay_penalty != 0.0:
            weighted_loss = WeightDecay(weight_decay_penalty,
                                        weight_decay_penalty_type,
                                        in_layers=[weighted_loss])
        self.set_loss(weighted_loss)
Esempio n. 18
0
    def build_graph(self):
        """Building graph structures:
                Features => WeaveLayer => WeaveLayer => Dense => WeaveGather => Classification or Regression
                """
        self.atom_features = Feature(shape=(None, self.n_atom_feat))
        self.pair_features = Feature(shape=(None, self.n_pair_feat))
        self.pair_split = Feature(shape=(None, ), dtype=tf.int32)
        self.atom_split = Feature(shape=(None, ), dtype=tf.int32)
        self.atom_to_pair = Feature(shape=(None, 2), dtype=tf.int32)
        weave_layer1A, weave_layer1P = WeaveLayerFactory(
            n_atom_input_feat=self.n_atom_feat,
            n_pair_input_feat=self.n_pair_feat,
            n_atom_output_feat=self.n_hidden,
            n_pair_output_feat=self.n_hidden,
            in_layers=[
                self.atom_features, self.pair_features, self.pair_split,
                self.atom_to_pair
            ])
        weave_layer2A, weave_layer2P = WeaveLayerFactory(
            n_atom_input_feat=self.n_hidden,
            n_pair_input_feat=self.n_hidden,
            n_atom_output_feat=self.n_hidden,
            n_pair_output_feat=self.n_hidden,
            update_pair=False,
            in_layers=[
                weave_layer1A, weave_layer1P, self.pair_split,
                self.atom_to_pair
            ])
        dense1 = Dense(out_channels=self.n_graph_feat,
                       activation_fn=tf.nn.tanh,
                       in_layers=weave_layer2A)
        batch_norm1 = BatchNorm(epsilon=1e-5, in_layers=[dense1])
        weave_gather = WeaveGather(self.batch_size,
                                   n_input=self.n_graph_feat,
                                   gaussian_expand=True,
                                   in_layers=[batch_norm1, self.atom_split])

        n_tasks = self.n_tasks
        weights = Weights(shape=(None, n_tasks))
        if self.mode == 'classification':
            n_classes = self.n_classes
            labels = Label(shape=(None, n_tasks, n_classes))
            logits = Reshape(shape=(None, n_tasks, n_classes),
                             in_layers=[
                                 Dense(in_layers=weave_gather,
                                       out_channels=n_tasks * n_classes)
                             ])
            output = SoftMax(logits)
            self.add_output(output)
            loss = SoftMaxCrossEntropy(in_layers=[labels, logits])
            weighted_loss = WeightedError(in_layers=[loss, weights])
            self.set_loss(weighted_loss)
        else:
            labels = Label(shape=(None, n_tasks))
            output = Reshape(shape=(None, n_tasks),
                             in_layers=[
                                 Dense(in_layers=weave_gather,
                                       out_channels=n_tasks)
                             ])
            self.add_output(output)
            weighted_loss = ReduceSum(
                L2Loss(in_layers=[labels, output, weights]))
            self.set_loss(weighted_loss)
Esempio n. 19
0
    def build_graph(self):
        # Build placeholders
        self.atom_features = Feature(shape=(None, self.n_atom_feat))
        self.pair_features = Feature(shape=(None, self.n_pair_feat))
        self.atom_split = Feature(shape=(None, ), dtype=tf.int32)
        self.atom_to_pair = Feature(shape=(None, 2), dtype=tf.int32)

        message_passing = MessagePassing(self.T,
                                         message_fn='enn',
                                         update_fn='gru',
                                         n_hidden=self.n_hidden,
                                         in_layers=[
                                             self.atom_features,
                                             self.pair_features,
                                             self.atom_to_pair
                                         ])

        atom_embeddings = Dense(self.n_hidden, in_layers=[message_passing])

        mol_embeddings = SetGather(
            self.M,
            self.batch_size,
            n_hidden=self.n_hidden,
            in_layers=[atom_embeddings, self.atom_split])

        dense1 = Dense(out_channels=2 * self.n_hidden,
                       activation_fn=tf.nn.relu,
                       in_layers=[mol_embeddings])

        n_tasks = self.n_tasks
        weights = Weights(shape=(None, n_tasks))
        if self.mode == 'classification':
            n_classes = self.n_classes
            labels = Label(shape=(None, n_tasks, n_classes))
            logits = Reshape(shape=(None, n_tasks, n_classes),
                             in_layers=[
                                 Dense(in_layers=dense1,
                                       out_channels=n_tasks * n_classes)
                             ])
            logits = TrimGraphOutput([logits, weights])
            output = SoftMax(logits)
            self.add_output(output)
            loss = SoftMaxCrossEntropy(in_layers=[labels, logits])
            weighted_loss = WeightedError(in_layers=[loss, weights])
            self.set_loss(weighted_loss)
        else:
            labels = Label(shape=(None, n_tasks))
            output = Reshape(
                shape=(None, n_tasks),
                in_layers=[Dense(in_layers=dense1, out_channels=n_tasks)])
            output = TrimGraphOutput([output, weights])
            self.add_output(output)
            if self.uncertainty:
                log_var = Reshape(
                    shape=(None, n_tasks),
                    in_layers=[Dense(in_layers=dense1, out_channels=n_tasks)])
                log_var = TrimGraphOutput([log_var, weights])
                var = Exp(log_var)
                self.add_variance(var)
                diff = labels - output
                weighted_loss = weights * (diff * diff / var + log_var)
                weighted_loss = ReduceSum(ReduceMean(weighted_loss, axis=[1]))
            else:
                weighted_loss = ReduceSum(
                    L2Loss(in_layers=[labels, output, weights]))
            self.set_loss(weighted_loss)
Esempio n. 20
0
    def build_graph(self):
        """
    Building graph structures:
    """
        self.atom_features = Feature(shape=(None, self.number_atom_features))
        self.degree_slice = Feature(shape=(None, 2), dtype=tf.int32)
        self.membership = Feature(shape=(None, ), dtype=tf.int32)

        self.deg_adjs = []
        for i in range(0, 10 + 1):
            deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32)
            self.deg_adjs.append(deg_adj)
        in_layer = self.atom_features
        for layer_size, dropout in zip(self.graph_conv_layers, self.dropout):
            gc1_in = [in_layer, self.degree_slice, self.membership
                      ] + self.deg_adjs
            gc1 = GraphConv(layer_size,
                            activation_fn=tf.nn.relu,
                            in_layers=gc1_in)
            batch_norm1 = BatchNorm(in_layers=[gc1])
            if dropout > 0.0:
                batch_norm1 = Dropout(dropout, in_layers=batch_norm1)
            gp_in = [batch_norm1, self.degree_slice, self.membership
                     ] + self.deg_adjs
            in_layer = GraphPool(in_layers=gp_in)
        dense = Dense(out_channels=self.dense_layer_size,
                      activation_fn=tf.nn.relu,
                      in_layers=[in_layer])
        batch_norm3 = BatchNorm(in_layers=[dense])
        if self.dropout[-1] > 0.0:
            batch_norm3 = Dropout(self.dropout[-1], in_layers=batch_norm3)
        readout = GraphGather(
            batch_size=self.batch_size,
            activation_fn=tf.nn.tanh,
            in_layers=[batch_norm3, self.degree_slice, self.membership] +
            self.deg_adjs)

        n_tasks = self.n_tasks
        weights = Weights(shape=(None, n_tasks))
        if self.mode == 'classification':
            n_classes = self.n_classes
            labels = Label(shape=(None, n_tasks, n_classes))
            logits = Reshape(shape=(None, n_tasks, n_classes),
                             in_layers=[
                                 Dense(in_layers=readout,
                                       out_channels=n_tasks * n_classes)
                             ])
            logits = TrimGraphOutput([logits, weights])
            output = SoftMax(logits)
            self.add_output(output)
            loss = SoftMaxCrossEntropy(in_layers=[labels, logits])
            weighted_loss = WeightedError(in_layers=[loss, weights])
            self.set_loss(weighted_loss)
        else:
            labels = Label(shape=(None, n_tasks))
            output = Reshape(
                shape=(None, n_tasks),
                in_layers=[Dense(in_layers=readout, out_channels=n_tasks)])
            output = TrimGraphOutput([output, weights])
            self.add_output(output)
            if self.uncertainty:
                log_var = Reshape(
                    shape=(None, n_tasks),
                    in_layers=[Dense(in_layers=readout, out_channels=n_tasks)])
                log_var = TrimGraphOutput([log_var, weights])
                var = Exp(log_var)
                self.add_variance(var)
                diff = labels - output
                weighted_loss = weights * (diff * diff / var + log_var)
                weighted_loss = ReduceSum(ReduceMean(weighted_loss, axis=[1]))
            else:
                weighted_loss = ReduceSum(
                    L2Loss(in_layers=[labels, output, weights]))
            self.set_loss(weighted_loss)
Esempio n. 21
0
    def build_graph(self):
        """Building graph structures:
                Features => DAGLayer => DAGGather => Classification or Regression
                """
        self.atom_features = Feature(shape=(None, self.n_atom_feat))
        self.parents = Feature(shape=(None, self.max_atoms, self.max_atoms),
                               dtype=tf.int32)
        self.calculation_orders = Feature(shape=(None, self.max_atoms),
                                          dtype=tf.int32)
        self.calculation_masks = Feature(shape=(None, self.max_atoms),
                                         dtype=tf.bool)
        self.membership = Feature(shape=(None, ), dtype=tf.int32)
        self.n_atoms = Feature(shape=(), dtype=tf.int32)
        dag_layer1 = DAGLayer(n_graph_feat=self.n_graph_feat,
                              n_atom_feat=self.n_atom_feat,
                              max_atoms=self.max_atoms,
                              layer_sizes=self.layer_sizes,
                              dropout=self.dropout,
                              batch_size=self.batch_size,
                              in_layers=[
                                  self.atom_features, self.parents,
                                  self.calculation_orders,
                                  self.calculation_masks, self.n_atoms
                              ])
        dag_gather = DAGGather(n_graph_feat=self.n_graph_feat,
                               n_outputs=self.n_outputs,
                               max_atoms=self.max_atoms,
                               layer_sizes=self.layer_sizes_gather,
                               dropout=self.dropout,
                               in_layers=[dag_layer1, self.membership])

        n_tasks = self.n_tasks
        weights = Weights(shape=(None, n_tasks))
        if self.mode == 'classification':
            n_classes = self.n_classes
            labels = Label(shape=(None, n_tasks, n_classes))
            logits = Reshape(shape=(None, n_tasks, n_classes),
                             in_layers=[
                                 Dense(in_layers=dag_gather,
                                       out_channels=n_tasks * n_classes)
                             ])
            output = SoftMax(logits)
            self.add_output(output)
            loss = SoftMaxCrossEntropy(in_layers=[labels, logits])
            weighted_loss = WeightedError(in_layers=[loss, weights])
            self.set_loss(weighted_loss)
        else:
            labels = Label(shape=(None, n_tasks))
            output = Reshape(
                shape=(None, n_tasks),
                in_layers=[Dense(in_layers=dag_gather, out_channels=n_tasks)])
            self.add_output(output)
            if self.uncertainty:
                log_var = Reshape(shape=(None, n_tasks),
                                  in_layers=[
                                      Dense(in_layers=dag_gather,
                                            out_channels=n_tasks)
                                  ])
                var = Exp(log_var)
                self.add_variance(var)
                diff = labels - output
                weighted_loss = weights * (diff * diff / var + log_var)
                weighted_loss = ReduceSum(ReduceMean(weighted_loss, axis=[1]))
            else:
                weighted_loss = ReduceSum(
                    L2Loss(in_layers=[labels, output, weights]))
            self.set_loss(weighted_loss)
Esempio n. 22
0
    def __init__(self,
                 n_tasks,
                 n_features,
                 layer_sizes=[1000],
                 weight_init_stddevs=0.02,
                 bias_init_consts=1.0,
                 weight_decay_penalty=0.0,
                 weight_decay_penalty_type="l2",
                 dropouts=0.5,
                 activation_fns=tf.nn.relu,
                 uncertainty=False,
                 **kwargs):
        """Create a MultitaskRegressor.

    In addition to the following arguments, this class also accepts all the keywork arguments
    from TensorGraph.

    Parameters
    ----------
    n_tasks: int
      number of tasks
    n_features: int
      number of features
    layer_sizes: list
      the size of each dense layer in the network.  The length of this list determines the number of layers.
    weight_init_stddevs: list or float
      the standard deviation of the distribution to use for weight initialization of each layer.  The length
      of this list should equal len(layer_sizes)+1.  The final element corresponds to the output layer.
      Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.
    bias_init_consts: list or float
      the value to initialize the biases in each layer to.  The length of this list should equal len(layer_sizes)+1.
      The final element corresponds to the output layer.  Alternatively this may be a single value instead of a list,
      in which case the same value is used for every layer.
    weight_decay_penalty: float
      the magnitude of the weight decay penalty to use
    weight_decay_penalty_type: str
      the type of penalty to use for weight decay, either 'l1' or 'l2'
    dropouts: list or float
      the dropout probablity to use for each layer.  The length of this list should equal len(layer_sizes).
      Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.
    activation_fns: list or object
      the Tensorflow activation function to apply to each layer.  The length of this list should equal
      len(layer_sizes).  Alternatively this may be a single value instead of a list, in which case the
      same value is used for every layer.
    uncertainty: bool
      if True, include extra outputs and loss terms to enable the uncertainty
      in outputs to be predicted
    """
        super(MultitaskRegressor, self).__init__(**kwargs)
        self.n_tasks = n_tasks
        self.n_features = n_features
        n_layers = len(layer_sizes)
        if not isinstance(weight_init_stddevs, collections.Sequence):
            weight_init_stddevs = [weight_init_stddevs] * (n_layers + 1)
        if not isinstance(bias_init_consts, collections.Sequence):
            bias_init_consts = [bias_init_consts] * (n_layers + 1)
        if not isinstance(dropouts, collections.Sequence):
            dropouts = [dropouts] * n_layers
        if not isinstance(activation_fns, collections.Sequence):
            activation_fns = [activation_fns] * n_layers
        if uncertainty:
            if any(d == 0.0 for d in dropouts):
                raise ValueError(
                    'Dropout must be included in every layer to predict uncertainty'
                )

        # Add the input features.

        mol_features = Feature(shape=(None, n_features))
        prev_layer = mol_features

        # Add the dense layers

        for size, weight_stddev, bias_const, dropout, activation_fn in zip(
                layer_sizes, weight_init_stddevs, bias_init_consts, dropouts,
                activation_fns):
            layer = Dense(in_layers=[prev_layer],
                          out_channels=size,
                          activation_fn=activation_fn,
                          weights_initializer=TFWrapper(
                              tf.truncated_normal_initializer,
                              stddev=weight_stddev),
                          biases_initializer=TFWrapper(tf.constant_initializer,
                                                       value=bias_const))
            if dropout > 0.0:
                layer = Dropout(dropout, in_layers=[layer])
            prev_layer = layer
        self.neural_fingerprint = prev_layer

        # Compute the loss function for each label.

        output = Reshape(shape=(-1, n_tasks, 1),
                         in_layers=[
                             Dense(in_layers=[prev_layer],
                                   out_channels=n_tasks,
                                   weights_initializer=TFWrapper(
                                       tf.truncated_normal_initializer,
                                       stddev=weight_init_stddevs[-1]),
                                   biases_initializer=TFWrapper(
                                       tf.constant_initializer,
                                       value=bias_init_consts[-1]))
                         ])
        self.add_output(output)
        labels = Label(shape=(None, n_tasks, 1))
        weights = Weights(shape=(None, n_tasks, 1))
        if uncertainty:
            log_var = Reshape(
                shape=(-1, n_tasks, 1),
                in_layers=[
                    Dense(in_layers=[prev_layer],
                          out_channels=n_tasks,
                          weights_initializer=TFWrapper(
                              tf.truncated_normal_initializer,
                              stddev=weight_init_stddevs[-1]),
                          biases_initializer=TFWrapper(tf.constant_initializer,
                                                       value=0.0))
                ])
            var = Exp(log_var)
            self.add_variance(var)
            diff = labels - output
            weighted_loss = weights * (diff * diff / var + log_var)
            weighted_loss = ReduceSum(ReduceMean(weighted_loss, axis=[1, 2]))
        else:
            weighted_loss = ReduceSum(
                L2Loss(in_layers=[labels, output, weights]))
        if weight_decay_penalty != 0.0:
            weighted_loss = WeightDecay(weight_decay_penalty,
                                        weight_decay_penalty_type,
                                        in_layers=[weighted_loss])
        self.set_loss(weighted_loss)