コード例 #1
0
ファイル: scscore.py プロジェクト: zwtian666/deepchem
  def build_graph(self):
    """
    Building graph structures:
    """
    self.m1_features = Feature(shape=(None, self.n_features))
    self.m2_features = Feature(shape=(None, self.n_features))
    prev_layer1 = self.m1_features
    prev_layer2 = self.m2_features
    for layer_size in self.layer_sizes:
      prev_layer1 = Dense(
          out_channels=layer_size,
          in_layers=[prev_layer1],
          activation_fn=tf.nn.relu)
      prev_layer2 = prev_layer1.shared([prev_layer2])
      if self.dropout > 0.0:
        prev_layer1 = Dropout(self.dropout, in_layers=prev_layer1)
        prev_layer2 = Dropout(self.dropout, in_layers=prev_layer2)

    readout_m1 = Dense(
        out_channels=1, in_layers=[prev_layer1], activation_fn=None)
    readout_m2 = readout_m1.shared([prev_layer2])
    self.add_output(Sigmoid(readout_m1) * 4 + 1)
    self.add_output(Sigmoid(readout_m2) * 4 + 1)

    self.difference = readout_m1 - readout_m2
    label = Label(shape=(None, 1))
    loss = HingeLoss(in_layers=[label, self.difference])
    self.my_task_weights = Weights(shape=(None, 1))
    loss = WeightedError(in_layers=[loss, self.my_task_weights])
    self.set_loss(loss)
コード例 #2
0
    def _build_graph(self):
        self.atom_flags = Feature(shape=(None,
                                         self.max_atoms * self.max_atoms))
        self.atom_feats = Feature(shape=(None, self.max_atoms * self.n_feat))

        reshaped_atom_feats = Reshape(in_layers=[self.atom_feats],
                                      shape=(-1, self.max_atoms, self.n_feat))
        reshaped_atom_flags = Reshape(in_layers=[self.atom_flags],
                                      shape=(-1, self.max_atoms,
                                             self.max_atoms))

        previous_layer = reshaped_atom_feats

        Hiddens = []
        for n_hidden in self.layer_structures:
            Hidden = Dense(out_channels=n_hidden,
                           activation_fn=tf.nn.tanh,
                           in_layers=[previous_layer])
            Hiddens.append(Hidden)
            previous_layer = Hiddens[-1]

        regression = Dense(out_channels=1 * self.n_tasks,
                           activation_fn=None,
                           in_layers=[Hiddens[-1]])
        output = BPGather(self.max_atoms,
                          in_layers=[regression, reshaped_atom_flags])
        self.add_output(output)

        label = Label(shape=(None, self.n_tasks, 1))
        loss = ReduceSum(L2Loss(in_layers=[label, output]))
        weights = Weights(shape=(None, self.n_tasks))

        weighted_loss = WeightedError(in_layers=[loss, weights])
        self.set_loss(weighted_loss)
コード例 #3
0
    def build_graph(self):
        # Layer 1
        gc1_input = [self.atom_features, self.indexing, self.membership] + self.deg_adj_list
        gc1 = GraphConv(64, activation_fn=tf.nn.relu, in_layers=gc1_input)
        bn1 = BatchNorm(in_layers=[gc1])
        gp1_input = [bn1, self.indexing, self.membership] + self.deg_adj_list
        gp1 = GraphPool(in_layers=gp1_input)

        # Layer 2
        gc2_input = [gp1, self.indexing, self.membership] + self.deg_adj_list
        gc2 = GraphConv(64, activation_fn=tf.nn.relu, in_layers=gc2_input)
        bn2 = BatchNorm(in_layers=[gc2])
        gp2_input = [bn2, self.indexing, self.membership] + self.deg_adj_list
        gp2 = GraphPool(in_layers=gp2_input)

        # Dense layer 1
        d1 = Dense(out_channels=128, activation_fn=tf.nn.relu, in_layers=[gp2])
        bn3 = BatchNorm(in_layers=[d1])

        # Graph gather layer
        gg1_input = [bn3, self.indexing, self.membership] + self.deg_adj_list
        gg1 = GraphGather(batch_size=self.batch_size, activation=tf.nn.tanh, in_layers=gg1_input)

        # Output dense layer
        d2 = Dense(out_channels=2, activation_fn=None, in_layers=[gg1])
        softmax = SoftMax(in_layers=[d2])
        self.tg.add_output(softmax)

        # Set loss function
        self.label = Label(shape=(None, 2))
        cost = SoftMaxCrossEntropy(in_layers=[self.label, d2])
        self.weight = Weights(shape=(None, 1))
        loss = WeightedError(in_layers=[cost, self.weight])
        self.tg.set_loss(loss)
コード例 #4
0
  def build_graph(self):
    self.atom_flags = Feature(shape=(None, self.max_atoms, self.max_atoms))
    self.atom_feats = Feature(shape=(None, self.max_atoms, self.n_feat))
    previous_layer = self.atom_feats

    Hiddens = []
    for n_hidden in self.layer_structures:
      Hidden = Dense(
          out_channels=n_hidden,
          activation_fn=tf.nn.tanh,
          in_layers=[previous_layer])
      Hiddens.append(Hidden)
      previous_layer = Hiddens[-1]

    costs = []
    self.labels_fd = []
    for task in range(self.n_tasks):
      regression = Dense(
          out_channels=1, activation_fn=None, in_layers=[Hiddens[-1]])
      output = BPGather(self.max_atoms, in_layers=[regression, self.atom_flags])
      self.add_output(output)

      label = Label(shape=(None, 1))
      self.labels_fd.append(label)
      cost = L2Loss(in_layers=[label, output])
      costs.append(cost)

    all_cost = Stack(in_layers=costs, axis=1)
    self.weights = Weights(shape=(None, self.n_tasks))
    loss = WeightedError(in_layers=[all_cost, self.weights])
    self.set_loss(loss)
コード例 #5
0
ファイル: deepmhc.py プロジェクト: ming013r/dcpathtest
    def _build_graph(self):

        self.one_hot_seq = Feature(shape=(None, self.pad_length,
                                          self.num_amino_acids),
                                   dtype=tf.float32)

        conv1 = Conv1D(kernel_size=2,
                       filters=512,
                       in_layers=[self.one_hot_seq])

        maxpool1 = MaxPool1D(strides=2, padding="VALID", in_layers=[conv1])
        conv2 = Conv1D(kernel_size=3, filters=512, in_layers=[maxpool1])
        flattened = Flatten(in_layers=[conv2])
        dense1 = Dense(out_channels=400,
                       in_layers=[flattened],
                       activation_fn=tf.nn.tanh)
        dropout = Dropout(dropout_prob=self.dropout_p, in_layers=[dense1])
        output = Dense(out_channels=1, in_layers=[dropout], activation_fn=None)
        self.add_output(output)

        if self.mode == "regression":
            label = Label(shape=(None, 1))
            loss = L2Loss(in_layers=[label, output])
        else:
            raise NotImplementedError(
                "Classification support not added yet. Missing details in paper."
            )
        weights = Weights(shape=(None, ))
        weighted_loss = WeightedError(in_layers=[loss, weights])
        self.set_loss(weighted_loss)
コード例 #6
0
ファイル: tictactoe.py プロジェクト: miradel51/deepchem
 def create_layers(self, state, **kwargs):
   d1 = Flatten(in_layers=state)
   d2 = Dense(
       in_layers=[d1],
       activation_fn=tf.nn.relu,
       normalizer_fn=tf.nn.l2_normalize,
       normalizer_params={"dim": 1},
       out_channels=64)
   d3 = Dense(
       in_layers=[d2],
       activation_fn=tf.nn.relu,
       normalizer_fn=tf.nn.l2_normalize,
       normalizer_params={"dim": 1},
       out_channels=32)
   d4 = Dense(
       in_layers=[d3],
       activation_fn=tf.nn.relu,
       normalizer_fn=tf.nn.l2_normalize,
       normalizer_params={"dim": 1},
       out_channels=16)
   d4 = BatchNorm(in_layers=[d4])
   d5 = Dense(in_layers=[d4], activation_fn=None, out_channels=9)
   value = Dense(in_layers=[d4], activation_fn=None, out_channels=1)
   value = Squeeze(squeeze_dims=1, in_layers=[value])
   probs = SoftMax(in_layers=[d5])
   return {'action_prob': probs, 'value': value}
コード例 #7
0
    def build_graph(self):
        # Build placeholders
        self.atom_features = Feature(shape=(None, self.n_atom_feat))
        self.pair_features = Feature(shape=(None, self.n_pair_feat))
        self.atom_split = Feature(shape=(None, ), dtype=tf.int32)
        self.atom_to_pair = Feature(shape=(None, 2), dtype=tf.int32)

        message_passing = MessagePassing(self.T,
                                         message_fn='enn',
                                         update_fn='gru',
                                         n_hidden=self.n_hidden,
                                         in_layers=[
                                             self.atom_features,
                                             self.pair_features,
                                             self.atom_to_pair
                                         ])

        atom_embeddings = Dense(self.n_hidden, in_layers=[message_passing])

        mol_embeddings = SetGather(
            self.M,
            self.batch_size,
            n_hidden=self.n_hidden,
            in_layers=[atom_embeddings, self.atom_split])

        dense1 = Dense(out_channels=2 * self.n_hidden,
                       activation_fn=tf.nn.relu,
                       in_layers=[mol_embeddings])
        costs = []
        self.labels_fd = []
        for task in range(self.n_tasks):
            if self.mode == "classification":
                classification = Dense(out_channels=2,
                                       activation_fn=None,
                                       in_layers=[dense1])
                softmax = SoftMax(in_layers=[classification])
                self.add_output(softmax)

                label = Label(shape=(None, 2))
                self.labels_fd.append(label)
                cost = SoftMaxCrossEntropy(in_layers=[label, classification])
                costs.append(cost)
            if self.mode == "regression":
                regression = Dense(out_channels=1,
                                   activation_fn=None,
                                   in_layers=[dense1])
                self.add_output(regression)

                label = Label(shape=(None, 1))
                self.labels_fd.append(label)
                cost = L2Loss(in_layers=[label, regression])
                costs.append(cost)
        if self.mode == "classification":
            all_cost = Concat(in_layers=costs, axis=1)
        elif self.mode == "regression":
            all_cost = Stack(in_layers=costs, axis=1)
        self.weights = Weights(shape=(None, self.n_tasks))
        loss = WeightedError(in_layers=[all_cost, self.weights])
        self.set_loss(loss)
コード例 #8
0
    def build_graph(self):
        """Building graph structures:
        Features => DAGLayer => DAGGather => Classification or Regression
        """
        self.atom_features = Feature(shape=(None, self.n_atom_feat))
        self.parents = Feature(shape=(None, self.max_atoms, self.max_atoms),
                               dtype=tf.int32)
        self.calculation_orders = Feature(shape=(None, self.max_atoms),
                                          dtype=tf.int32)
        self.calculation_masks = Feature(shape=(None, self.max_atoms),
                                         dtype=tf.bool)
        self.membership = Feature(shape=(None, ), dtype=tf.int32)
        self.n_atoms = Feature(shape=(), dtype=tf.int32)
        dag_layer1 = DAGLayer(n_graph_feat=self.n_graph_feat,
                              n_atom_feat=self.n_atom_feat,
                              max_atoms=self.max_atoms,
                              batch_size=self.batch_size,
                              in_layers=[
                                  self.atom_features, self.parents,
                                  self.calculation_orders,
                                  self.calculation_masks, self.n_atoms
                              ])
        dag_gather = DAGGather(n_graph_feat=self.n_graph_feat,
                               n_outputs=self.n_outputs,
                               max_atoms=self.max_atoms,
                               in_layers=[dag_layer1, self.membership])

        costs = []
        self.labels_fd = []
        for task in range(self.n_tasks):
            if self.mode == "classification":
                classification = Dense(out_channels=2,
                                       activation_fn=None,
                                       in_layers=[dag_gather])
                softmax = SoftMax(in_layers=[classification])
                self.add_output(softmax)

                label = Label(shape=(None, 2))
                self.labels_fd.append(label)
                cost = SoftMaxCrossEntropy(in_layers=[label, classification])
                costs.append(cost)
            if self.mode == "regression":
                regression = Dense(out_channels=1,
                                   activation_fn=None,
                                   in_layers=[dag_gather])
                self.add_output(regression)

                label = Label(shape=(None, 1))
                self.labels_fd.append(label)
                cost = L2Loss(in_layers=[label, regression])
                costs.append(cost)
        if self.mode == "classification":
            all_cost = Concat(in_layers=costs, axis=1)
        elif self.mode == "regression":
            all_cost = Stack(in_layers=costs, axis=1)
        self.weights = Weights(shape=(None, self.n_tasks))
        loss = WeightedError(in_layers=[all_cost, self.weights])
        self.set_loss(loss)
コード例 #9
0
ファイル: text_cnn.py プロジェクト: zhshLee/deepchem
  def build_graph(self):
    self.smiles_seqs = Feature(shape=(None, self.seq_length), dtype=tf.int32)
    # Character embedding
    self.Embedding = DTNNEmbedding(
        n_embedding=self.n_embedding,
        periodic_table_length=len(self.char_dict.keys()) + 1,
        in_layers=[self.smiles_seqs])
    self.pooled_outputs = []
    self.conv_layers = []
    for filter_size, num_filter in zip(self.kernel_sizes, self.num_filters):
      # Multiple convolutional layers with different filter widths
      self.conv_layers.append(
          Conv1D(
              kernel_size=filter_size,
              filters=num_filter,
              padding='valid',
              in_layers=[self.Embedding]))
      # Max-over-time pooling
      self.pooled_outputs.append(
          ReduceMax(axis=1, in_layers=[self.conv_layers[-1]]))
    # Concat features from all filters(one feature per filter)
    concat_outputs = Concat(axis=1, in_layers=self.pooled_outputs)
    dropout = Dropout(dropout_prob=self.dropout, in_layers=[concat_outputs])
    dense = Dense(
        out_channels=200, activation_fn=tf.nn.relu, in_layers=[dropout])
    # Highway layer from https://arxiv.org/pdf/1505.00387.pdf
    self.gather = Highway(in_layers=[dense])

    costs = []
    self.labels_fd = []
    for task in range(self.n_tasks):
      if self.mode == "classification":
        classification = Dense(
            out_channels=2, activation_fn=None, in_layers=[self.gather])
        softmax = SoftMax(in_layers=[classification])
        self.add_output(softmax)

        label = Label(shape=(None, 2))
        self.labels_fd.append(label)
        cost = SoftMaxCrossEntropy(in_layers=[label, classification])
        costs.append(cost)
      if self.mode == "regression":
        regression = Dense(
            out_channels=1, activation_fn=None, in_layers=[self.gather])
        self.add_output(regression)

        label = Label(shape=(None, 1))
        self.labels_fd.append(label)
        cost = L2Loss(in_layers=[label, regression])
        costs.append(cost)
    if self.mode == "classification":
      all_cost = Stack(in_layers=costs, axis=1)
    elif self.mode == "regression":
      all_cost = Stack(in_layers=costs, axis=1)
    self.weights = Weights(shape=(None, self.n_tasks))
    loss = WeightedError(in_layers=[all_cost, self.weights])
    self.set_loss(loss)
コード例 #10
0
ファイル: graphModels.py プロジェクト: zbjzbj001/lsc
    def build_graph(self):
        self.atom_features = Feature(shape=(None, 75))
        self.degree_slice = Feature(shape=(None, 2), dtype=tf.int32)
        self.membership = Feature(shape=(None, ), dtype=tf.int32)

        self.deg_adjs = []
        for i in range(0, 10 + 1):
            deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32)
            self.deg_adjs.append(deg_adj)
        in_layer = self.atom_features
        for layer_size in self.graph_conv_layers:
            gc1_in = [in_layer, self.degree_slice, self.membership
                      ] + self.deg_adjs
            gc1 = GraphConv(layer_size,
                            activation_fn=tf.nn.relu,
                            in_layers=gc1_in)
            batch_norm1 = MyBatchNorm(in_layers=[gc1])
            gp_in = [batch_norm1, self.degree_slice, self.membership
                     ] + self.deg_adjs
            in_layer = GraphPool(in_layers=gp_in)
        dense = Dense(out_channels=self.dense_layer_size[0],
                      activation_fn=tf.nn.relu,
                      in_layers=[in_layer])
        batch_norm3 = MyBatchNorm(in_layers=[dense])
        batch_norm3 = Dropout(self.dropout, in_layers=[batch_norm3])
        readout = GraphGather(
            batch_size=self.batch_size,
            activation_fn=tf.nn.tanh,
            in_layers=[batch_norm3, self.degree_slice, self.membership] +
            self.deg_adjs)

        curLayer = readout
        for myind in range(1, len(self.dense_layer_size) - 1):
            curLayer = Dense(out_channels=self.dense_layer_size[myind],
                             activation_fn=tf.nn.relu,
                             in_layers=[curLayer])
            curLayer = Dropout(self.dropout, in_layers=[curLayer])

        classification = Dense(out_channels=self.n_tasks,
                               activation_fn=None,
                               in_layers=[curLayer])
        sigmoid = MySigmoid(in_layers=[classification])
        self.add_output(sigmoid)

        self.label = Label(shape=(None, self.n_tasks))
        all_cost = MySigmoidCrossEntropy(
            in_layers=[self.label, classification])
        self.weights = Weights(shape=(None, self.n_tasks))
        loss = WeightedError(in_layers=[all_cost, self.weights])
        self.set_loss(loss)

        self.mydense = dense
        self.myreadout = readout
        self.myclassification = classification
        self.mysigmoid = sigmoid
        self.myall_cost = all_cost
        self.myloss = loss
コード例 #11
0
ファイル: petroskisuch.py プロジェクト: ming013r/dcpathtest
  def build_graph(self):
    self.vertex_features = Feature(shape=(None, self.max_atoms, 75))
    self.adj_matrix = Feature(shape=(None, self.max_atoms, 1, self.max_atoms))
    self.mask = Feature(shape=(None, self.max_atoms, 1))

    gcnn1 = BatchNorm(
        GraphCNN(
            num_filters=64,
            in_layers=[self.vertex_features, self.adj_matrix, self.mask]))
    gcnn1 = Dropout(self.dropout, in_layers=gcnn1)
    gcnn2 = BatchNorm(
        GraphCNN(num_filters=64, in_layers=[gcnn1, self.adj_matrix, self.mask]))
    gcnn2 = Dropout(self.dropout, in_layers=gcnn2)
    gc_pool, adj_matrix = GraphCNNPool(
        num_vertices=32, in_layers=[gcnn2, self.adj_matrix, self.mask])
    gc_pool = BatchNorm(gc_pool)
    gc_pool = Dropout(self.dropout, in_layers=gc_pool)
    gcnn3 = BatchNorm(GraphCNN(num_filters=32, in_layers=[gc_pool, adj_matrix]))
    gcnn3 = Dropout(self.dropout, in_layers=gcnn3)
    gc_pool2, adj_matrix2 = GraphCNNPool(
        num_vertices=8, in_layers=[gcnn3, adj_matrix])
    gc_pool2 = BatchNorm(gc_pool2)
    gc_pool2 = Dropout(self.dropout, in_layers=gc_pool2)
    flattened = Flatten(in_layers=gc_pool2)
    readout = Dense(
        out_channels=256, activation_fn=tf.nn.relu, in_layers=flattened)
    costs = []
    self.my_labels = []
    for task in range(self.n_tasks):
      if self.mode == 'classification':
        classification = Dense(
            out_channels=2, activation_fn=None, in_layers=[readout])

        softmax = SoftMax(in_layers=[classification])
        self.add_output(softmax)

        label = Label(shape=(None, 2))
        self.my_labels.append(label)
        cost = SoftMaxCrossEntropy(in_layers=[label, classification])
        costs.append(cost)
      if self.mode == 'regression':
        regression = Dense(
            out_channels=1, activation_fn=None, in_layers=[readout])
        self.add_output(regression)

        label = Label(shape=(None, 1))
        self.my_labels.append(label)
        cost = L2Loss(in_layers=[label, regression])
        costs.append(cost)
    if self.mode == "classification":
      entropy = Stack(in_layers=costs, axis=-1)
    elif self.mode == "regression":
      entropy = Stack(in_layers=costs, axis=1)
    self.my_task_weights = Weights(shape=(None, self.n_tasks))
    loss = WeightedError(in_layers=[entropy, self.my_task_weights])
    self.set_loss(loss)
コード例 #12
0
ファイル: test_ppo.py プロジェクト: zwtian666/deepchem
            def create_layers(self, state, **kwargs):

                dense1 = Dense(6, activation_fn=tf.nn.relu, in_layers=state)
                dense2 = Dense(6, activation_fn=tf.nn.relu, in_layers=dense1)
                output = Dense(4,
                               activation_fn=tf.nn.softmax,
                               biases_initializer=None,
                               in_layers=dense2)
                value = Dense(1, in_layers=dense2)
                return {'action_prob': output, 'value': value}
コード例 #13
0
    def _build(self):
        self.A_tilda_k = list()
        for k in range(1, self.k_max + 1):
            self.A_tilda_k.append(
                Feature(name="graph_adjacency_{}".format(k),
                        dtype=tf.float32,
                        shape=[None, self.max_nodes, self.max_nodes]))
        self.X = Feature(name='atom_features',
                         dtype=tf.float32,
                         shape=[None, self.max_nodes, self.num_node_features])

        graph_layers = list()
        adaptive_filters = list()

        for index, k in enumerate(range(1, self.k_max + 1)):

            in_layers = [self.A_tilda_k[index], self.X]

            adaptive_filters.append(
                AdaptiveFilter(batch_size=self.batch_size,
                               in_layers=in_layers,
                               num_nodes=self.max_nodes,
                               num_node_features=self.num_node_features,
                               combine_method=self.combine_method))

            graph_layers.append(
                KOrderGraphConv(batch_size=self.batch_size,
                                in_layers=in_layers +
                                [adaptive_filters[index]],
                                num_nodes=self.max_nodes,
                                num_node_features=self.num_node_features,
                                init='glorot_uniform'))

        graph_features = Concat(in_layers=graph_layers, axis=2)
        graph_features = ReLU(in_layers=[graph_features])
        flattened = Flatten(in_layers=[graph_features])

        dense1 = Dense(in_layers=[flattened],
                       out_channels=64,
                       activation_fn=tf.nn.relu)
        dense2 = Dense(in_layers=[dense1],
                       out_channels=16,
                       activation_fn=tf.nn.relu)
        dense3 = Dense(in_layers=[dense2],
                       out_channels=1 * self.n_tasks,
                       activation_fn=None)
        output = Reshape(in_layers=[dense3], shape=(-1, self.n_tasks, 1))
        self.add_output(output)

        label = Label(shape=(None, self.n_tasks, 1))
        weights = Weights(shape=(None, self.n_tasks))
        loss = ReduceSum(L2Loss(in_layers=[label, output]))

        weighted_loss = WeightedError(in_layers=[loss, weights])
        self.set_loss(weighted_loss)
コード例 #14
0
 def create_layers(self, state, **kwargs):
     action_mean = Dense(1,
                         in_layers=state,
                         weights_initializer=tf.zeros_initializer)
     action_std = Constant([10.0])
     value = Dense(1, in_layers=state)
     return {
         'action_mean': action_mean,
         'action_std': action_std,
         'value': value
     }
コード例 #15
0
ファイル: text_cnn.py プロジェクト: zhoutf/deepchem
    def _build_graph(self):
        self.smiles_seqs = Feature(shape=(None, self.seq_length),
                                   dtype=tf.int32)
        # Character embedding
        Embedding = DTNNEmbedding(
            n_embedding=self.n_embedding,
            periodic_table_length=len(self.char_dict.keys()) + 1,
            in_layers=[self.smiles_seqs])
        pooled_outputs = []
        conv_layers = []
        for filter_size, num_filter in zip(self.kernel_sizes,
                                           self.num_filters):
            # Multiple convolutional layers with different filter widths
            conv_layers.append(
                Conv1D(kernel_size=filter_size,
                       filters=num_filter,
                       padding='valid',
                       in_layers=[Embedding]))
            # Max-over-time pooling
            pooled_outputs.append(
                ReduceMax(axis=1, in_layers=[conv_layers[-1]]))
        # Concat features from all filters(one feature per filter)
        concat_outputs = Concat(axis=1, in_layers=pooled_outputs)
        dropout = Dropout(dropout_prob=self.dropout,
                          in_layers=[concat_outputs])
        dense = Dense(out_channels=200,
                      activation_fn=tf.nn.relu,
                      in_layers=[dropout])
        # Highway layer from https://arxiv.org/pdf/1505.00387.pdf
        gather = Highway(in_layers=[dense])

        if self.mode == "classification":
            logits = Dense(out_channels=self.n_tasks * 2,
                           activation_fn=None,
                           in_layers=[gather])
            logits = Reshape(shape=(-1, self.n_tasks, 2), in_layers=[logits])
            output = SoftMax(in_layers=[logits])
            self.add_output(output)
            labels = Label(shape=(None, self.n_tasks, 2))
            loss = SoftMaxCrossEntropy(in_layers=[labels, logits])

        else:
            vals = Dense(out_channels=self.n_tasks * 1,
                         activation_fn=None,
                         in_layers=[gather])
            vals = Reshape(shape=(-1, self.n_tasks, 1), in_layers=[vals])
            self.add_output(vals)
            labels = Label(shape=(None, self.n_tasks, 1))
            loss = ReduceSum(L2Loss(in_layers=[labels, vals]))

        weights = Weights(shape=(None, self.n_tasks))
        weighted_loss = WeightedError(in_layers=[loss, weights])
        self.set_loss(weighted_loss)
コード例 #16
0
    def build_graph(self):
        d1 = Dense(out_channels=256, activation_fn=tf.nn.relu, in_layers=[self.feature])
        d2 = Dense(out_channels=64, activation_fn=tf.nn.relu, in_layers=[d1])
        d3 = Dense(out_channels=16, activation=None, in_layers=[d2])
        d4 = Dense(out_channels=2, activation=None, in_layers=[d3])
        softmax = SoftMax(in_layers=[d4])
        self.tg.add_output(softmax)

        self.label = Label(shape=(None, 2))
        cost = SoftMaxCrossEntropy(in_layers=[self.label, d4])
        loss = ReduceMean(in_layers=[cost])
        self.tg.set_loss(loss)
コード例 #17
0
    def add_adapter(self, all_layers, task, layer_num):
        """Add an adapter connection for given task/layer combo"""
        i = layer_num
        prev_layers = []
        trainable_layers = []
        # Handle output layer
        if i < len(self.layer_sizes):
            layer_sizes = self.layer_sizes
            alpha_init_stddev = self.alpha_init_stddevs[i]
            weight_init_stddev = self.weight_init_stddevs[i]
            bias_init_const = self.bias_init_consts[i]
        elif i == len(self.layer_sizes):
            layer_sizes = self.layer_sizes + [1]
            alpha_init_stddev = self.alpha_init_stddevs[-1]
            weight_init_stddev = self.weight_init_stddevs[-1]
            bias_init_const = self.bias_init_consts[-1]
        else:
            raise ValueError("layer_num too large for add_adapter.")
        # Iterate over all previous tasks.
        for prev_task in range(task):
            prev_layers.append(all_layers[(i - 1, prev_task)])
        # prev_layers is a list with elements of size
        # (batch_size, layer_sizes[i-1])
        prev_layer = Concat(axis=1, in_layers=prev_layers)
        with self._get_tf("Graph").as_default():
            alpha = TensorWrapper(
                tf.Variable(tf.truncated_normal((1, ),
                                                stddev=alpha_init_stddev),
                            name="alpha_layer_%d_task%d" % (i, task)))
            trainable_layers.append(alpha)

        prev_layer = prev_layer * alpha
        dense1 = Dense(in_layers=[prev_layer],
                       out_channels=layer_sizes[i - 1],
                       activation_fn=None,
                       weights_initializer=TFWrapper(
                           tf.truncated_normal_initializer,
                           stddev=weight_init_stddev),
                       biases_initializer=TFWrapper(tf.constant_initializer,
                                                    value=bias_init_const))
        trainable_layers.append(dense1)

        dense2 = Dense(in_layers=[dense1],
                       out_channels=layer_sizes[i],
                       activation_fn=None,
                       weights_initializer=TFWrapper(
                           tf.truncated_normal_initializer,
                           stddev=weight_init_stddev),
                       biases_initializer=None)
        trainable_layers.append(dense2)

        return dense2, trainable_layers
コード例 #18
0
    def test_compute_model_performance_multitask_regressor(self):
        random_seed = 42
        n_data_points = 20
        n_features = 2
        n_tasks = 2
        np.random.seed(seed=random_seed)

        X = np.random.rand(n_data_points, n_features)
        y1 = np.array([0.5 for x in range(n_data_points)])
        y2 = np.array([-0.5 for x in range(n_data_points)])
        y = np.stack([y1, y2], axis=1)
        dataset = NumpyDataset(X, y)

        features = Feature(shape=(None, n_features))
        label = Label(shape=(None, n_tasks))
        dense = Dense(out_channels=n_tasks, in_layers=[features])
        loss = ReduceSquareDifference(in_layers=[dense, label])

        tg = dc.models.TensorGraph(random_seed=random_seed, learning_rate=0.1)
        tg.add_output(dense)
        tg.set_loss(loss)

        tg.fit(dataset, nb_epoch=1000)
        metric = [
            dc.metrics.Metric(dc.metrics.mean_absolute_error,
                              np.mean,
                              mode="regression"),
        ]
        scores = tg.evaluate_generator(tg.default_generator(dataset),
                                       metric,
                                       labels=[label],
                                       per_task_metrics=True)
        scores = list(scores[1].values())
        assert_true(np.all(np.isclose(scores, [0.0, 0.0], atol=1.0)))
コード例 #19
0
    def test_compute_model_performance_singletask_classifier(self):
        n_data_points = 20
        n_features = 10

        X = np.ones(shape=(int(n_data_points / 2), n_features)) * -1
        X1 = np.ones(shape=(int(n_data_points / 2), n_features))
        X = np.concatenate((X, X1))
        class_1 = np.array([[0.0, 1.0] for x in range(int(n_data_points / 2))])
        class_0 = np.array([[1.0, 0.0] for x in range(int(n_data_points / 2))])
        y = np.concatenate((class_0, class_1))
        dataset = NumpyDataset(X, y)

        features = Feature(shape=(None, n_features))
        label = Label(shape=(None, 2))
        dense = Dense(out_channels=2, in_layers=[features])
        output = SoftMax(in_layers=[dense])
        smce = SoftMaxCrossEntropy(in_layers=[label, dense])
        total_loss = ReduceMean(in_layers=smce)

        tg = dc.models.TensorGraph(learning_rate=0.1)
        tg.add_output(output)
        tg.set_loss(total_loss)

        tg.fit(dataset, nb_epoch=1000)
        metric = dc.metrics.Metric(dc.metrics.roc_auc_score,
                                   np.mean,
                                   mode="classification")

        scores = tg.evaluate_generator(tg.default_generator(dataset), [metric],
                                       labels=[label],
                                       per_task_metrics=True)
        scores = list(scores[1].values())
        assert_true(np.isclose(scores, [1.0], atol=0.05))
コード例 #20
0
  def test_neighbor_list_vina(self):
    """Test under conditions closer to Vina usage."""
    N_atoms = 5
    M_nbrs = 2
    ndim = 3
    start = 0
    stop = 4
    nbr_cutoff = 1

    X = NumpyDataset(start + np.random.rand(N_atoms, ndim) * (stop - start))

    coords = Feature(shape=(N_atoms, ndim))

    # Now an (N, M) shape
    nbr_list = NeighborList(
        N_atoms, M_nbrs, ndim, nbr_cutoff, start, stop, in_layers=[coords])

    nbr_list = ToFloat(in_layers=[nbr_list])
    flattened = Flatten(in_layers=[nbr_list])
    dense = Dense(out_channels=1, in_layers=[flattened])
    output = ReduceSum(in_layers=[dense])

    tg = dc.models.TensorGraph(learning_rate=0.1, use_queue=False)
    tg.set_loss(output)

    databag = Databag({coords: X})
    tg.fit_generator(databag.iterbatches(epochs=1))
コード例 #21
0
    def test_saliency_mapping(self):
        """Test computing a saliency map."""
        n_tasks = 3
        n_features = 5
        features = Feature(shape=(None, n_features))
        dense = Dense(out_channels=n_tasks,
                      in_layers=[features],
                      activation_fn=tf.tanh)
        label = Label(shape=(None, n_tasks))
        loss = ReduceSquareDifference(in_layers=[dense, label])
        model = dc.models.TensorGraph()
        model.add_output(dense)
        model.set_loss(loss)
        x = np.random.random(n_features)
        s = model.compute_saliency(x)
        assert s.shape[0] == n_tasks
        assert s.shape[1] == n_features

        # Take a tiny step in the direction of s and see if the output changes by
        # the expected amount.

        delta = 0.01
        for task in range(n_tasks):
            norm = np.sqrt(np.sum(s[task]**2))
            step = 0.5 * delta / norm
            pred1 = model.predict_on_batch((x + s[task] * step).reshape(
                (1, n_features))).flatten()
            pred2 = model.predict_on_batch((x - s[task] * step).reshape(
                (1, n_features))).flatten()
            self.assertAlmostEqual(pred1[task], (pred2 + norm * delta)[task],
                                   places=4)
コード例 #22
0
 def test_tensorboard(self):
     n_data_points = 20
     n_features = 2
     X = np.random.rand(n_data_points, n_features)
     y = [[0, 1] for x in range(n_data_points)]
     dataset = NumpyDataset(X, y)
     features = Feature(shape=(None, n_features))
     dense = Dense(out_channels=2, in_layers=[features])
     output = SoftMax(in_layers=[dense])
     label = Label(shape=(None, 2))
     smce = SoftMaxCrossEntropy(in_layers=[label, dense])
     loss = ReduceMean(in_layers=[smce])
     tg = dc.models.TensorGraph(tensorboard=True,
                                tensorboard_log_frequency=1,
                                learning_rate=0.01,
                                model_dir='/tmp/tensorgraph')
     tg.add_output(output)
     tg.set_loss(loss)
     tg.fit(dataset, nb_epoch=1000)
     files_in_dir = os.listdir(tg.model_dir)
     event_file = list(
         filter(lambda x: x.startswith("events"), files_in_dir))
     assert_true(len(event_file) > 0)
     event_file = os.path.join(tg.model_dir, event_file[0])
     file_size = os.stat(event_file).st_size
     assert_true(file_size > 0)
コード例 #23
0
    def test_graph_save(self):
        n_samples = 10
        n_features = 11
        n_tasks = 1
        batch_size = 10
        X = np.random.rand(batch_size, n_samples, n_features)
        y = np.ones(shape=(n_samples, n_tasks))
        ids = np.arange(n_samples)

        dataset = dc.data.NumpyDataset(X, y, None, ids)
        g = TensorGraph(model_dir='/tmp/tmpss5_ki5_')

        inLayer = Input(shape=(None, n_samples, n_features))
        g.add_feature(inLayer)

        flatten = Flatten()
        g.add_layer(flatten, parents=[inLayer])

        dense = Dense(out_channels=1)
        g.add_layer(dense, parents=[flatten])
        g.add_output(dense)

        label_out = Input(shape=(None, 1))
        g.add_label(label_out)

        loss = LossLayer()
        g.add_layer(loss, parents=[dense, label_out])
        g.set_loss(loss)

        g.fit(dataset, nb_epoch=100)
        g.save()
        g1 = TensorGraph.load_from_dir('/tmp/tmpss5_ki5_')
        print(g1)
        print(g1.predict_on_batch(X))
コード例 #24
0
ファイル: test_tensor_graph.py プロジェクト: zhshLee/deepchem
 def test_copy_layers_shared(self):
     """Test copying layers with shared variables."""
     tg = dc.models.TensorGraph()
     features = Feature(shape=(None, 10))
     dense = Dense(10,
                   in_layers=features,
                   biases_initializer=tf.random_normal_initializer)
     constant = Constant(10.0)
     output = dense + constant
     tg.add_output(output)
     tg.set_loss(output)
     replacements = {features: features, constant: Constant(20.0)}
     copy = output.copy(replacements, shared=True)
     tg.add_output(copy)
     assert isinstance(copy, Add)
     assert isinstance(copy.in_layers[0], Dense)
     assert isinstance(copy.in_layers[0].in_layers[0], Feature)
     assert copy.in_layers[1] == replacements[constant]
     variables1 = tg.get_layer_variables(dense)
     variables2 = tg.get_layer_variables(copy.in_layers[0])
     for v1, v2, in zip(variables1, variables2):
         assert v1 == v2
     feed_dict = {features: np.random.random((5, 10))}
     v1, v2 = tg.predict_on_generator([feed_dict], outputs=[output, copy])
     assert_true(np.all(np.isclose(v1 + 10, v2)))
コード例 #25
0
    def test_set_optimizer(self):
        n_data_points = 20
        n_features = 2
        X = np.random.rand(n_data_points, n_features)
        y = [[0, 1] for x in range(n_data_points)]
        dataset = NumpyDataset(X, y)
        features = Feature(shape=(None, n_features))
        dense = Dense(out_channels=2, in_layers=[features])
        output = SoftMax(in_layers=[dense])
        label = Label(shape=(None, 2))
        smce = SoftMaxCrossEntropy(in_layers=[label, dense])
        loss = ReduceMean(in_layers=[smce])
        tg = dc.models.TensorGraph(learning_rate=0.01, use_queue=False)
        tg.add_output(output)
        tg.set_loss(loss)
        global_step = tg.get_global_step()
        learning_rate = ExponentialDecay(initial_rate=0.1,
                                         decay_rate=0.96,
                                         decay_steps=100000)
        tg.set_optimizer(GradientDescent(learning_rate=learning_rate))
        tg.fit(dataset, nb_epoch=1000)
        prediction = np.squeeze(tg.predict_on_batch(X))
        tg.save()

        tg1 = TensorGraph.load_from_dir(tg.model_dir)
        prediction2 = np.squeeze(tg1.predict_on_batch(X))
        assert_true(np.all(np.isclose(prediction, prediction2, atol=0.01)))
コード例 #26
0
ファイル: test_tensor_graph.py プロジェクト: zhshLee/deepchem
 def test_copy_layers(self):
     """Test copying layers."""
     tg = dc.models.TensorGraph()
     features = Feature(shape=(None, 10))
     dense = Dense(10,
                   in_layers=features,
                   biases_initializer=tf.random_normal_initializer)
     constant = Constant(10.0)
     output = dense + constant
     tg.add_output(output)
     tg.set_loss(output)
     tg.fit_generator([])
     replacements = {constant: Constant(20.0)}
     copy = output.copy(replacements, tg)
     assert isinstance(copy, Add)
     assert isinstance(copy.in_layers[0], Dense)
     assert isinstance(copy.in_layers[0].in_layers[0], Feature)
     assert copy.in_layers[1] == replacements[constant]
     variables = tg.get_layer_variables(dense)
     with tg._get_tf("Graph").as_default():
         if tfe.in_eager_mode():
             values = [v.numpy() for v in variables]
         else:
             values = tg.session.run(variables)
     for v1, v2 in zip(values, copy.in_layers[0].variable_values):
         assert np.array_equal(v1, v2)
コード例 #27
0
ファイル: test_tensor_graph.py プロジェクト: zhshLee/deepchem
    def test_save_load(self):
        n_data_points = 20
        n_features = 2
        X = np.random.rand(n_data_points, n_features)
        y = [[0, 1] for x in range(n_data_points)]
        dataset = NumpyDataset(X, y)
        features = Feature(shape=(None, n_features))
        dense = Dense(out_channels=2, in_layers=[features])
        output = SoftMax(in_layers=[dense])
        label = Label(shape=(None, 2))
        smce = SoftMaxCrossEntropy(in_layers=[label, dense])
        loss = ReduceMean(in_layers=[smce])
        tg = dc.models.TensorGraph(learning_rate=0.01)
        tg.add_output(output)
        tg.set_loss(loss)
        submodel_loss = ReduceSum(in_layers=smce)
        submodel_opt = Adam(learning_rate=0.002)
        submodel = tg.create_submodel(layers=[dense],
                                      loss=submodel_loss,
                                      optimizer=submodel_opt)
        tg.fit(dataset, nb_epoch=1)
        prediction = np.squeeze(tg.predict_on_batch(X))
        tg.save()

        dirpath = tempfile.mkdtemp()
        shutil.rmtree(dirpath)
        shutil.move(tg.model_dir, dirpath)

        tg1 = TensorGraph.load_from_dir(dirpath)
        prediction2 = np.squeeze(tg1.predict_on_batch(X))
        assert_true(np.all(np.isclose(prediction, prediction2, atol=0.01)))
コード例 #28
0
    def test_mnist(self):
        from tensorflow.examples.tutorials.mnist import input_data
        mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
        train = dc.data.NumpyDataset(mnist.train.images, mnist.train.labels)
        valid = dc.data.NumpyDataset(mnist.validation.images,
                                     mnist.validation.labels)

        # Images are square 28x28 (batch, height, width, channel)
        feature = Feature(shape=(None, 784), name="Feature")
        make_image = Reshape(shape=(-1, 28, 28, 1), in_layers=[feature])

        conv2d_1 = Conv2D(num_outputs=32,
                          normalizer_fn=tf.contrib.layers.batch_norm,
                          in_layers=[make_image])
        maxpool_1 = MaxPool(in_layers=[conv2d_1])

        conv2d_2 = Conv2D(num_outputs=64,
                          normalizer_fn=tf.contrib.layers.batch_norm,
                          in_layers=[maxpool_1])
        maxpool_2 = MaxPool(in_layers=[conv2d_2])
        flatten = Flatten(in_layers=[maxpool_2])

        dense1 = Dense(out_channels=1024,
                       activation_fn=tf.nn.relu,
                       in_layers=[flatten])
        dense2 = Dense(out_channels=10, in_layers=[dense1])
        label = Label(shape=(None, 10), name="Label")
        smce = SoftMaxCrossEntropy(in_layers=[label, dense2])
        loss = ReduceMean(in_layers=[smce])
        output = SoftMax(in_layers=[dense2])

        tg = dc.models.TensorGraph(model_dir='/tmp/mnist',
                                   batch_size=1000,
                                   use_queue=True)
        tg.add_output(output)
        tg.set_loss(loss)
        tg.fit(train, nb_epoch=2)

        prediction = np.squeeze(tg.predict_proba_on_batch(valid.X))

        fpr = dict()
        tpr = dict()
        roc_auc = dict()
        for i in range(10):
            fpr[i], tpr[i], thresh = roc_curve(valid.y[:, i], prediction[:, i])
            roc_auc[i] = auc(fpr[i], tpr[i])
            assert_true(roc_auc[i] > 0.99)
コード例 #29
0
 def __init__(self):
     self.batch_size = 10
     self.tg = dc.models.TensorGraph(use_queue=False)
     self.features = Feature(shape=(None, 1))
     self.labels = Label(shape=(None, 1))
     hidden1 = Dense(in_layers=self.features,
                     out_channels=40,
                     activation_fn=tf.nn.relu)
     hidden2 = Dense(in_layers=hidden1,
                     out_channels=40,
                     activation_fn=tf.nn.relu)
     output = Dense(in_layers=hidden2, out_channels=1)
     loss = L2Loss(in_layers=[output, self.labels])
     self.tg.add_output(output)
     self.tg.set_loss(loss)
     with self.tg._get_tf("Graph").as_default():
         self.tg.build()
コード例 #30
0
def test_Dense_pickle():
  tg = TensorGraph()
  feature = Feature(shape=(tg.batch_size, 1))
  dense = Dense(out_channels=1, in_layers=feature)
  tg.add_output(dense)
  tg.set_loss(dense)
  tg.build()
  tg.save()