コード例 #1
0
    def build_graph(self):
        """Constructs the graph architecture of IRV as described in:

       https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2750043/
    """
        self.mol_features = Feature(shape=(None, self.n_features))
        self._labels = Label(shape=(None, self.n_tasks))
        self._weights = Weights(shape=(None, self.n_tasks))
        predictions = IRVLayer(self.n_tasks,
                               self.K,
                               in_layers=[self.mol_features])
        costs = []
        outputs = []
        for task in range(self.n_tasks):
            task_output = Slice(task, 1, in_layers=[predictions])
            sigmoid = Sigmoid(in_layers=[task_output])
            outputs.append(sigmoid)

            label = Slice(task, axis=1, in_layers=[self._labels])
            cost = SigmoidCrossEntropy(in_layers=[label, task_output])
            costs.append(cost)
        all_cost = Concat(in_layers=costs, axis=1)
        loss = WeightedError(in_layers=[all_cost, self._weights]) + \
            IRVRegularize(predictions, self.penalty, in_layers=[predictions])
        self.set_loss(loss)
        outputs = Stack(axis=1, in_layers=outputs)
        outputs = Concat(axis=2, in_layers=[1 - outputs, outputs])
        self.add_output(outputs)
コード例 #2
0
ファイル: test_layers.py プロジェクト: ktaneishi/deepchem
 def test_concat(self):
   """Test that Concat can be invoked."""
   batch_size = 10
   n_features = 5
   in_tensor_1 = np.random.rand(batch_size, n_features)
   in_tensor_2 = np.random.rand(batch_size, n_features)
   with self.session() as sess:
     in_tensor_1 = tf.convert_to_tensor(in_tensor_1, dtype=tf.float32)
     in_tensor_2 = tf.convert_to_tensor(in_tensor_2, dtype=tf.float32)
     out_tensor = Concat(axis=1)(in_tensor_1, in_tensor_2)
     out_tensor = out_tensor.eval()
     assert out_tensor.shape == (batch_size, 2 * n_features)
コード例 #3
0
 def test_concat(self):
     """Test that Concat can be invoked."""
     batch_size = 10
     n_features = 5
     in_tensor_1 = np.random.rand(batch_size, n_features)
     in_tensor_2 = np.random.rand(batch_size, n_features)
     with self.session() as sess:
         in_tensor_1 = tf.convert_to_tensor(in_tensor_1, dtype=tf.float32)
         in_tensor_2 = tf.convert_to_tensor(in_tensor_2, dtype=tf.float32)
         out_tensor = Concat(axis=1)(in_tensor_1, in_tensor_2)
         out_tensor = out_tensor.eval()
         assert out_tensor.shape == (batch_size, 2 * n_features)
コード例 #4
0
    def build_graph(self):
        self.atom_flags = Feature(shape=(None, self.max_atoms, self.max_atoms))
        self.atom_feats = Feature(shape=(None, self.max_atoms, self.n_feat))
        previous_layer = self.atom_feats

        Hiddens = []
        for n_hidden in self.layer_structures:
            Hidden = Dense(out_channels=n_hidden,
                           activation_fn=tf.nn.tanh,
                           in_layers=[previous_layer])
            Hiddens.append(Hidden)
            previous_layer = Hiddens[-1]

        costs = []
        self.labels_fd = []
        for task in range(self.n_tasks):
            regression = Dense(out_channels=1,
                               activation_fn=None,
                               in_layers=[Hiddens[-1]])
            output = BPGather(self.max_atoms,
                              in_layers=[regression, self.atom_flags])
            self.add_output(output)

            label = Label(shape=(None, 1))
            self.labels_fd.append(label)
            cost = L2Loss(in_layers=[label, output])
            costs.append(cost)

        all_cost = Concat(in_layers=costs, axis=0)
        self.weights = Weights(shape=(None, self.n_tasks))
        loss = WeightedError(in_layers=[all_cost, self.weights])
        self.set_loss(loss)
コード例 #5
0
    def build_graph(self):
        # Build placeholders
        self.atom_features = Feature(shape=(None, self.n_atom_feat))
        self.pair_features = Feature(shape=(None, self.n_pair_feat))
        self.atom_split = Feature(shape=(None, ), dtype=tf.int32)
        self.atom_to_pair = Feature(shape=(None, 2), dtype=tf.int32)

        message_passing = MessagePassing(self.T,
                                         message_fn='enn',
                                         update_fn='gru',
                                         n_hidden=self.n_hidden,
                                         in_layers=[
                                             self.atom_features,
                                             self.pair_features,
                                             self.atom_to_pair
                                         ])

        atom_embeddings = Dense(self.n_hidden, in_layers=[message_passing])

        mol_embeddings = SetGather(
            self.M,
            self.batch_size,
            n_hidden=self.n_hidden,
            in_layers=[atom_embeddings, self.atom_split])

        dense1 = Dense(out_channels=2 * self.n_hidden,
                       activation_fn=tf.nn.relu,
                       in_layers=[mol_embeddings])
        costs = []
        self.labels_fd = []
        for task in range(self.n_tasks):
            if self.mode == "classification":
                classification = Dense(out_channels=2,
                                       activation_fn=None,
                                       in_layers=[dense1])
                softmax = SoftMax(in_layers=[classification])
                self.add_output(softmax)

                label = Label(shape=(None, 2))
                self.labels_fd.append(label)
                cost = SoftMaxCrossEntropy(in_layers=[label, classification])
                costs.append(cost)
            if self.mode == "regression":
                regression = Dense(out_channels=1,
                                   activation_fn=None,
                                   in_layers=[dense1])
                self.add_output(regression)

                label = Label(shape=(None, 1))
                self.labels_fd.append(label)
                cost = L2Loss(in_layers=[label, regression])
                costs.append(cost)
        if self.mode == "classification":
            all_cost = Concat(in_layers=costs, axis=1)
        elif self.mode == "regression":
            all_cost = Stack(in_layers=costs, axis=1)
        self.weights = Weights(shape=(None, self.n_tasks))
        loss = WeightedError(in_layers=[all_cost, self.weights])
        self.set_loss(loss)
コード例 #6
0
def test_Concat_pickle():
  tg = TensorGraph()
  feature = Feature(shape=(tg.batch_size, 1))
  layer = Concat(in_layers=[feature, feature])
  tg.add_output(layer)
  tg.set_loss(layer)
  tg.build()
  tg.save()
コード例 #7
0
    def build_graph(self):
        """Building graph structures:
        Features => DAGLayer => DAGGather => Classification or Regression
        """
        self.atom_features = Feature(shape=(None, self.n_atom_feat))
        self.parents = Feature(shape=(None, self.max_atoms, self.max_atoms),
                               dtype=tf.int32)
        self.calculation_orders = Feature(shape=(None, self.max_atoms),
                                          dtype=tf.int32)
        self.calculation_masks = Feature(shape=(None, self.max_atoms),
                                         dtype=tf.bool)
        self.membership = Feature(shape=(None, ), dtype=tf.int32)
        self.n_atoms = Feature(shape=(), dtype=tf.int32)
        dag_layer1 = DAGLayer(n_graph_feat=self.n_graph_feat,
                              n_atom_feat=self.n_atom_feat,
                              max_atoms=self.max_atoms,
                              batch_size=self.batch_size,
                              in_layers=[
                                  self.atom_features, self.parents,
                                  self.calculation_orders,
                                  self.calculation_masks, self.n_atoms
                              ])
        dag_gather = DAGGather(n_graph_feat=self.n_graph_feat,
                               n_outputs=self.n_outputs,
                               max_atoms=self.max_atoms,
                               in_layers=[dag_layer1, self.membership])

        costs = []
        self.labels_fd = []
        for task in range(self.n_tasks):
            if self.mode == "classification":
                classification = Dense(out_channels=2,
                                       activation_fn=None,
                                       in_layers=[dag_gather])
                softmax = SoftMax(in_layers=[classification])
                self.add_output(softmax)

                label = Label(shape=(None, 2))
                self.labels_fd.append(label)
                cost = SoftMaxCrossEntropy(in_layers=[label, classification])
                costs.append(cost)
            if self.mode == "regression":
                regression = Dense(out_channels=1,
                                   activation_fn=None,
                                   in_layers=[dag_gather])
                self.add_output(regression)

                label = Label(shape=(None, 1))
                self.labels_fd.append(label)
                cost = L2Loss(in_layers=[label, regression])
                costs.append(cost)
        if self.mode == "classification":
            all_cost = Concat(in_layers=costs, axis=1)
        elif self.mode == "regression":
            all_cost = Stack(in_layers=costs, axis=1)
        self.weights = Weights(shape=(None, self.n_tasks))
        loss = WeightedError(in_layers=[all_cost, self.weights])
        self.set_loss(loss)
コード例 #8
0
ファイル: text_cnn.py プロジェクト: zhshLee/deepchem
  def build_graph(self):
    self.smiles_seqs = Feature(shape=(None, self.seq_length), dtype=tf.int32)
    # Character embedding
    self.Embedding = DTNNEmbedding(
        n_embedding=self.n_embedding,
        periodic_table_length=len(self.char_dict.keys()) + 1,
        in_layers=[self.smiles_seqs])
    self.pooled_outputs = []
    self.conv_layers = []
    for filter_size, num_filter in zip(self.kernel_sizes, self.num_filters):
      # Multiple convolutional layers with different filter widths
      self.conv_layers.append(
          Conv1D(
              kernel_size=filter_size,
              filters=num_filter,
              padding='valid',
              in_layers=[self.Embedding]))
      # Max-over-time pooling
      self.pooled_outputs.append(
          ReduceMax(axis=1, in_layers=[self.conv_layers[-1]]))
    # Concat features from all filters(one feature per filter)
    concat_outputs = Concat(axis=1, in_layers=self.pooled_outputs)
    dropout = Dropout(dropout_prob=self.dropout, in_layers=[concat_outputs])
    dense = Dense(
        out_channels=200, activation_fn=tf.nn.relu, in_layers=[dropout])
    # Highway layer from https://arxiv.org/pdf/1505.00387.pdf
    self.gather = Highway(in_layers=[dense])

    costs = []
    self.labels_fd = []
    for task in range(self.n_tasks):
      if self.mode == "classification":
        classification = Dense(
            out_channels=2, activation_fn=None, in_layers=[self.gather])
        softmax = SoftMax(in_layers=[classification])
        self.add_output(softmax)

        label = Label(shape=(None, 2))
        self.labels_fd.append(label)
        cost = SoftMaxCrossEntropy(in_layers=[label, classification])
        costs.append(cost)
      if self.mode == "regression":
        regression = Dense(
            out_channels=1, activation_fn=None, in_layers=[self.gather])
        self.add_output(regression)

        label = Label(shape=(None, 1))
        self.labels_fd.append(label)
        cost = L2Loss(in_layers=[label, regression])
        costs.append(cost)
    if self.mode == "classification":
      all_cost = Stack(in_layers=costs, axis=1)
    elif self.mode == "regression":
      all_cost = Stack(in_layers=costs, axis=1)
    self.weights = Weights(shape=(None, self.n_tasks))
    loss = WeightedError(in_layers=[all_cost, self.weights])
    self.set_loss(loss)
コード例 #9
0
    def _build(self):
        self.A_tilda_k = list()
        for k in range(1, self.k_max + 1):
            self.A_tilda_k.append(
                Feature(name="graph_adjacency_{}".format(k),
                        dtype=tf.float32,
                        shape=[None, self.max_nodes, self.max_nodes]))
        self.X = Feature(name='atom_features',
                         dtype=tf.float32,
                         shape=[None, self.max_nodes, self.num_node_features])

        graph_layers = list()
        adaptive_filters = list()

        for index, k in enumerate(range(1, self.k_max + 1)):

            in_layers = [self.A_tilda_k[index], self.X]

            adaptive_filters.append(
                AdaptiveFilter(batch_size=self.batch_size,
                               in_layers=in_layers,
                               num_nodes=self.max_nodes,
                               num_node_features=self.num_node_features,
                               combine_method=self.combine_method))

            graph_layers.append(
                KOrderGraphConv(batch_size=self.batch_size,
                                in_layers=in_layers +
                                [adaptive_filters[index]],
                                num_nodes=self.max_nodes,
                                num_node_features=self.num_node_features,
                                init='glorot_uniform'))

        graph_features = Concat(in_layers=graph_layers, axis=2)
        graph_features = ReLU(in_layers=[graph_features])
        flattened = Flatten(in_layers=[graph_features])

        dense1 = Dense(in_layers=[flattened],
                       out_channels=64,
                       activation_fn=tf.nn.relu)
        dense2 = Dense(in_layers=[dense1],
                       out_channels=16,
                       activation_fn=tf.nn.relu)
        dense3 = Dense(in_layers=[dense2],
                       out_channels=1 * self.n_tasks,
                       activation_fn=None)
        output = Reshape(in_layers=[dense3], shape=(-1, self.n_tasks, 1))
        self.add_output(output)

        label = Label(shape=(None, self.n_tasks, 1))
        weights = Weights(shape=(None, self.n_tasks))
        loss = ReduceSum(L2Loss(in_layers=[label, output]))

        weighted_loss = WeightedError(in_layers=[loss, weights])
        self.set_loss(weighted_loss)
コード例 #10
0
ファイル: text_cnn.py プロジェクト: zhoutf/deepchem
    def _build_graph(self):
        self.smiles_seqs = Feature(shape=(None, self.seq_length),
                                   dtype=tf.int32)
        # Character embedding
        Embedding = DTNNEmbedding(
            n_embedding=self.n_embedding,
            periodic_table_length=len(self.char_dict.keys()) + 1,
            in_layers=[self.smiles_seqs])
        pooled_outputs = []
        conv_layers = []
        for filter_size, num_filter in zip(self.kernel_sizes,
                                           self.num_filters):
            # Multiple convolutional layers with different filter widths
            conv_layers.append(
                Conv1D(kernel_size=filter_size,
                       filters=num_filter,
                       padding='valid',
                       in_layers=[Embedding]))
            # Max-over-time pooling
            pooled_outputs.append(
                ReduceMax(axis=1, in_layers=[conv_layers[-1]]))
        # Concat features from all filters(one feature per filter)
        concat_outputs = Concat(axis=1, in_layers=pooled_outputs)
        dropout = Dropout(dropout_prob=self.dropout,
                          in_layers=[concat_outputs])
        dense = Dense(out_channels=200,
                      activation_fn=tf.nn.relu,
                      in_layers=[dropout])
        # Highway layer from https://arxiv.org/pdf/1505.00387.pdf
        gather = Highway(in_layers=[dense])

        if self.mode == "classification":
            logits = Dense(out_channels=self.n_tasks * 2,
                           activation_fn=None,
                           in_layers=[gather])
            logits = Reshape(shape=(-1, self.n_tasks, 2), in_layers=[logits])
            output = SoftMax(in_layers=[logits])
            self.add_output(output)
            labels = Label(shape=(None, self.n_tasks, 2))
            loss = SoftMaxCrossEntropy(in_layers=[labels, logits])

        else:
            vals = Dense(out_channels=self.n_tasks * 1,
                         activation_fn=None,
                         in_layers=[gather])
            vals = Reshape(shape=(-1, self.n_tasks, 1), in_layers=[vals])
            self.add_output(vals)
            labels = Label(shape=(None, self.n_tasks, 1))
            loss = ReduceSum(L2Loss(in_layers=[labels, vals]))

        weights = Weights(shape=(None, self.n_tasks))
        weighted_loss = WeightedError(in_layers=[loss, weights])
        self.set_loss(weighted_loss)
コード例 #11
0
    def add_adapter(self, all_layers, task, layer_num):
        """Add an adapter connection for given task/layer combo"""
        i = layer_num
        prev_layers = []
        trainable_layers = []
        # Handle output layer
        if i < len(self.layer_sizes):
            layer_sizes = self.layer_sizes
            alpha_init_stddev = self.alpha_init_stddevs[i]
            weight_init_stddev = self.weight_init_stddevs[i]
            bias_init_const = self.bias_init_consts[i]
        elif i == len(self.layer_sizes):
            layer_sizes = self.layer_sizes + [1]
            alpha_init_stddev = self.alpha_init_stddevs[-1]
            weight_init_stddev = self.weight_init_stddevs[-1]
            bias_init_const = self.bias_init_consts[-1]
        else:
            raise ValueError("layer_num too large for add_adapter.")
        # Iterate over all previous tasks.
        for prev_task in range(task):
            prev_layers.append(all_layers[(i - 1, prev_task)])
        # prev_layers is a list with elements of size
        # (batch_size, layer_sizes[i-1])
        prev_layer = Concat(axis=1, in_layers=prev_layers)
        with self._get_tf("Graph").as_default():
            alpha = TensorWrapper(
                tf.Variable(tf.truncated_normal((1, ),
                                                stddev=alpha_init_stddev),
                            name="alpha_layer_%d_task%d" % (i, task)))
            trainable_layers.append(alpha)

        prev_layer = prev_layer * alpha
        dense1 = Dense(in_layers=[prev_layer],
                       out_channels=layer_sizes[i - 1],
                       activation_fn=None,
                       weights_initializer=TFWrapper(
                           tf.truncated_normal_initializer,
                           stddev=weight_init_stddev),
                       biases_initializer=TFWrapper(tf.constant_initializer,
                                                    value=bias_init_const))
        trainable_layers.append(dense1)

        dense2 = Dense(in_layers=[dense1],
                       out_channels=layer_sizes[i],
                       activation_fn=None,
                       weights_initializer=TFWrapper(
                           tf.truncated_normal_initializer,
                           stddev=weight_init_stddev),
                       biases_initializer=None)
        trainable_layers.append(dense2)

        return dense2, trainable_layers
コード例 #12
0
  def build_graph(self):
    """Building graph structures:
        Features => DTNNEmbedding => DTNNStep => DTNNStep => DTNNGather => Regression
        """
    self.atom_number = Feature(shape=(None,), dtype=tf.int32)
    self.distance = Feature(shape=(None, self.n_distance))
    self.atom_membership = Feature(shape=(None,), dtype=tf.int32)
    self.distance_membership_i = Feature(shape=(None,), dtype=tf.int32)
    self.distance_membership_j = Feature(shape=(None,), dtype=tf.int32)

    dtnn_embedding = DTNNEmbedding(
        n_embedding=self.n_embedding, in_layers=[self.atom_number])
    dtnn_layer1 = DTNNStep(
        n_embedding=self.n_embedding,
        n_distance=self.n_distance,
        in_layers=[
            dtnn_embedding, self.distance, self.distance_membership_i,
            self.distance_membership_j
        ])
    dtnn_layer2 = DTNNStep(
        n_embedding=self.n_embedding,
        n_distance=self.n_distance,
        in_layers=[
            dtnn_layer1, self.distance, self.distance_membership_i,
            self.distance_membership_j
        ])
    dtnn_gather = DTNNGather(
        n_embedding=self.n_embedding,
        n_outputs=self.n_hidden,
        in_layers=[dtnn_layer2, self.atom_membership])

    costs = []
    self.labels_fd = []
    for task in range(self.n_tasks):
      regression = Dense(
          out_channels=1, activation_fn=None, in_layers=[dtnn_gather])
      self.add_output(regression)

      label = Label(shape=(None, 1))
      self.labels_fd.append(label)
      cost = L2Loss(in_layers=[label, regression])
      costs.append(cost)

    all_cost = Concat(in_layers=costs)
    self.weights = Weights(shape=(None, self.n_tasks))
    loss = WeightedError(in_layers=[all_cost, self.weights])
    self.set_loss(loss)
コード例 #13
0
    def create_layers(self, state, **kwargs):
        i = Reshape(in_layers=[state[0]], shape=(-1, 1))
        i = AddConstant(-1, in_layers=[i])
        i = InsertBatchIndex(in_layers=[i])
        # shape(i) = (batch_size, 1)

        q = Reshape(in_layers=[state[1]], shape=(-1, self.n_queue_obs))
        # shape(q) = (batch_size, n_queue_obs)
        #q = Dense(16, in_layers=[q], activation_fn=tensorflow.nn.relu)
        ## shape(q) = (batch_size, 16)

        x = q
        if not self.single_layer:
            for j in range(1):
                x1 = Dense(8, in_layers=[x], activation_fn=tensorflow.nn.relu)
                x = Concat(in_layers=[q, x1])
        # 1) shape(x) = (batch_size, n_queue_obs)
        # 2) shape(x) = (batch_size, n_queue_obs + 8)

        ps = []
        for j in range(self.n_products):
            p = Dense(n_actions, in_layers=[x])
            ps.append(p)
        p = Stack(in_layers=ps, axis=1)
        # shape(p) = (batch_size, n_products, n_actions)
        p = Gather(in_layers=[p, i])
        # shape(p) = (batch_size, n_actions)
        p = SoftMax(in_layers=[p])

        vs = []
        for j in range(self.n_products):
            v = Dense(1, in_layers=[x])
            vs.append(v)
        v = Stack(in_layers=vs, axis=1)
        # shape(v) = (batch_size, n_products, 1)
        v = Gather(in_layers=[v, i])
        # shape(v) = (batch_size, 1)

        return {'action_prob': p, 'value': v}
コード例 #14
0
  def build_graph(self):
    """Building graph structures:
        Features => WeaveLayer => WeaveLayer => Dense => WeaveGather => Classification or Regression
        """
    self.atom_features = Feature(shape=(None, self.n_atom_feat))
    self.pair_features = Feature(shape=(None, self.n_pair_feat))
    combined = Combine_AP(in_layers=[self.atom_features, self.pair_features])
    self.pair_split = Feature(shape=(None,), dtype=tf.int32)
    self.atom_split = Feature(shape=(None,), dtype=tf.int32)
    self.atom_to_pair = Feature(shape=(None, 2), dtype=tf.int32)
    weave_layer1 = WeaveLayer(
        n_atom_input_feat=self.n_atom_feat,
        n_pair_input_feat=self.n_pair_feat,
        n_atom_output_feat=self.n_hidden,
        n_pair_output_feat=self.n_hidden,
        in_layers=[combined, self.pair_split, self.atom_to_pair])
    weave_layer2 = WeaveLayer(
        n_atom_input_feat=self.n_hidden,
        n_pair_input_feat=self.n_hidden,
        n_atom_output_feat=self.n_hidden,
        n_pair_output_feat=self.n_hidden,
        update_pair=False,
        in_layers=[weave_layer1, self.pair_split, self.atom_to_pair])
    separated = Separate_AP(in_layers=[weave_layer2])
    dense1 = Dense(
        out_channels=self.n_graph_feat,
        activation_fn=tf.nn.tanh,
        in_layers=[separated])
    batch_norm1 = BatchNormalization(epsilon=1e-5, mode=1, in_layers=[dense1])
    weave_gather = WeaveGather(
        self.batch_size,
        n_input=self.n_graph_feat,
        gaussian_expand=True,
        in_layers=[batch_norm1, self.atom_split])

    costs = []
    self.labels_fd = []
    for task in range(self.n_tasks):
      if self.mode == "classification":
        classification = Dense(
            out_channels=2, activation_fn=None, in_layers=[weave_gather])
        softmax = SoftMax(in_layers=[classification])
        self.add_output(softmax)

        label = Label(shape=(None, 2))
        self.labels_fd.append(label)
        cost = SoftMaxCrossEntropy(in_layers=[label, classification])
        costs.append(cost)
      if self.mode == "regression":
        regression = Dense(
            out_channels=1, activation_fn=None, in_layers=[weave_gather])
        self.add_output(regression)

        label = Label(shape=(None, 1))
        self.labels_fd.append(label)
        cost = L2Loss(in_layers=[label, regression])
        costs.append(cost)
    if self.mode == "classification":
      all_cost = Concat(in_layers=costs, axis=1)
    elif self.mode == "regression":
      all_cost = Stack(in_layers=costs, axis=1)
    self.weights = Weights(shape=(None, self.n_tasks))
    loss = WeightedError(in_layers=[all_cost, self.weights])
    self.set_loss(loss)
コード例 #15
0
  def build_graph(self):
    """
    Building graph structures:
    """
    self.atom_features = Feature(shape=(None, 75))
    self.degree_slice = Feature(shape=(None, 2), dtype=tf.int32)
    self.membership = Feature(shape=(None,), dtype=tf.int32)

    self.deg_adjs = []
    for i in range(0, 10 + 1):
      deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32)
      self.deg_adjs.append(deg_adj)
    gc1 = GraphConv(
        64,
        activation_fn=tf.nn.relu,
        in_layers=[self.atom_features, self.degree_slice, self.membership] +
        self.deg_adjs)
    batch_norm1 = BatchNorm(in_layers=[gc1])
    gp1 = GraphPool(in_layers=[batch_norm1, self.degree_slice, self.membership]
                    + self.deg_adjs)
    gc2 = GraphConv(
        64,
        activation_fn=tf.nn.relu,
        in_layers=[gp1, self.degree_slice, self.membership] + self.deg_adjs)
    batch_norm2 = BatchNorm(in_layers=[gc2])
    gp2 = GraphPool(in_layers=[batch_norm2, self.degree_slice, self.membership]
                    + self.deg_adjs)
    dense = Dense(out_channels=128, activation_fn=tf.nn.relu, in_layers=[gp2])
    batch_norm3 = BatchNorm(in_layers=[dense])
    readout = GraphGather(
        batch_size=self.batch_size,
        activation_fn=tf.nn.tanh,
        in_layers=[batch_norm3, self.degree_slice, self.membership] +
        self.deg_adjs)

    if self.error_bars == True:
      readout = Dropout(in_layers=[readout], dropout_prob=0.2)

    costs = []
    self.my_labels = []
    for task in range(self.n_tasks):
      if self.mode == 'classification':
        classification = Dense(
            out_channels=2, activation_fn=None, in_layers=[readout])

        softmax = SoftMax(in_layers=[classification])
        self.add_output(softmax)

        label = Label(shape=(None, 2))
        self.my_labels.append(label)
        cost = SoftMaxCrossEntropy(in_layers=[label, classification])
        costs.append(cost)
      if self.mode == 'regression':
        regression = Dense(
            out_channels=1, activation_fn=None, in_layers=[readout])
        self.add_output(regression)

        label = Label(shape=(None, 1))
        self.my_labels.append(label)
        cost = L2Loss(in_layers=[label, regression])
        costs.append(cost)
    if self.mode == "classification":
      entropy = Concat(in_layers=costs, axis=-1)
    elif self.mode == "regression":
      entropy = Stack(in_layers=costs, axis=1)
    self.my_task_weights = Weights(shape=(None, self.n_tasks))
    loss = WeightedError(in_layers=[entropy, self.my_task_weights])
    self.set_loss(loss)
コード例 #16
0
ファイル: model_utils.py プロジェクト: truongbuu/Invivo
def graph_conv_net(batch_size, prior, num_task):
    """
    Build a tensorgraph for multilabel classification task

    Return: features and labels layers
    """
    tg = TensorGraph(use_queue=False)
    if prior == True:
        add_on = num_task
    else:
        add_on = 0
    atom_features = Feature(shape=(None, 75 + 2 * add_on))
    circular_features = Feature(shape=(batch_size, 256), dtype=tf.float32)

    degree_slice = Feature(shape=(None, 2), dtype=tf.int32)
    membership = Feature(shape=(None, ), dtype=tf.int32)
    deg_adjs = []
    for i in range(0, 10 + 1):
        deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32)
        deg_adjs.append(deg_adj)

    gc1 = GraphConv(64 + add_on,
                    activation_fn=tf.nn.elu,
                    in_layers=[atom_features, degree_slice, membership] +
                    deg_adjs)
    batch_norm1 = BatchNorm(in_layers=[gc1])
    gp1 = GraphPool(in_layers=[batch_norm1, degree_slice, membership] +
                    deg_adjs)

    gc2 = GraphConv(64 + add_on,
                    activation_fn=tf.nn.elu,
                    in_layers=[gc1, degree_slice, membership] + deg_adjs)
    batch_norm2 = BatchNorm(in_layers=[gc2])
    gp2 = GraphPool(in_layers=[batch_norm2, degree_slice, membership] +
                    deg_adjs)

    add = Concat(in_layers=[gp1, gp2])
    add = Dropout(0.5, in_layers=[add])
    dense = Dense(out_channels=128, activation_fn=tf.nn.elu, in_layers=[add])
    batch_norm3 = BatchNorm(in_layers=[dense])
    readout = GraphGather(batch_size=batch_size,
                          activation_fn=tf.nn.tanh,
                          in_layers=[batch_norm3, degree_slice, membership] +
                          deg_adjs)
    batch_norm4 = BatchNorm(in_layers=[readout])

    dense1 = Dense(out_channels=128,
                   activation_fn=tf.nn.elu,
                   in_layers=[circular_features])
    dense1 = BatchNorm(in_layers=[dense1])
    dense1 = Dropout(0.5, in_layers=[dense1])
    dense1 = Dense(out_channels=128,
                   activation_fn=tf.nn.elu,
                   in_layers=[circular_features])
    dense1 = BatchNorm(in_layers=[dense1])
    dense1 = Dropout(0.5, in_layers=[dense1])
    merge_feat = Concat(in_layers=[dense1, batch_norm4])
    merge = Dense(out_channels=256,
                  activation_fn=tf.nn.elu,
                  in_layers=[merge_feat])
    costs = []
    labels = []
    for task in range(num_task):
        classification = Dense(out_channels=2,
                               activation_fn=None,
                               in_layers=[merge])
        softmax = SoftMax(in_layers=[classification])
        tg.add_output(softmax)
        label = Label(shape=(None, 2))
        labels.append(label)
        cost = SoftMaxCrossEntropy(in_layers=[label, classification])
        costs.append(cost)
    all_cost = Stack(in_layers=costs, axis=1)
    weights = Weights(shape=(None, num_task))
    loss = WeightedError(in_layers=[all_cost, weights])
    tg.set_loss(loss)
    #if prior == True:
    #    return tg, atom_features,circular_features, degree_slice, membership, deg_adjs, labels, weights#, prior_layer
    return tg, atom_features, circular_features, degree_slice, membership, deg_adjs, labels, weights
コード例 #17
0
def graph_conv_model(batch_size, tasks):
    model = TensorGraph(model_dir=model_dir,
                        batch_size=batch_size,
                        use_queue=False)
    atom_features = Feature(shape=(None, 75))
    degree_slice = Feature(shape=(None, 2), dtype=tf.int32)
    membership = Feature(shape=(None, ), dtype=tf.int32)

    deg_adjs = []
    for i in range(0, 10 + 1):
        deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32)
        deg_adjs.append(deg_adj)
    gc1 = GraphConv(64,
                    activation_fn=tf.nn.relu,
                    in_layers=[atom_features, degree_slice, membership] +
                    deg_adjs)
    batch_norm1 = BatchNorm(in_layers=[gc1])
    gp1 = GraphPool(in_layers=[batch_norm1, degree_slice, membership] +
                    deg_adjs)
    gc2 = GraphConv(64,
                    activation_fn=tf.nn.relu,
                    in_layers=[gp1, degree_slice, membership] + deg_adjs)
    batch_norm2 = BatchNorm(in_layers=[gc2])
    gp2 = GraphPool(in_layers=[batch_norm2, degree_slice, membership] +
                    deg_adjs)
    dense = Dense(out_channels=128, activation_fn=None, in_layers=[gp2])
    batch_norm3 = BatchNorm(in_layers=[dense])
    gg1 = GraphGather(batch_size=batch_size,
                      activation_fn=tf.nn.tanh,
                      in_layers=[batch_norm3, degree_slice, membership] +
                      deg_adjs)

    costs = []
    labels = []
    for task in tasks:
        classification = Dense(out_channels=2,
                               activation_fn=None,
                               in_layers=[gg1])

        softmax = SoftMax(in_layers=[classification])
        model.add_output(softmax)

        label = Label(shape=(None, 2))
        labels.append(label)
        cost = SoftMaxCrossEntropy(in_layers=[label, classification])
        costs.append(cost)

    entropy = Concat(in_layers=costs)
    task_weights = Weights(shape=(None, len(tasks)))
    loss = WeightedError(in_layers=[entropy, task_weights])
    model.set_loss(loss)

    def feed_dict_generator(dataset, batch_size, epochs=1):
        for epoch in range(epochs):
            for ind, (X_b, y_b, w_b, ids_b) in enumerate(
                    dataset.iterbatches(batch_size, pad_batches=True)):
                d = {}
                for index, label in enumerate(labels):
                    d[label] = to_one_hot(y_b[:, index])
                d[task_weights] = w_b
                multiConvMol = ConvMol.agglomerate_mols(X_b)
                d[atom_features] = multiConvMol.get_atom_features()
                d[degree_slice] = multiConvMol.deg_slice
                d[membership] = multiConvMol.membership
                for i in range(1, len(multiConvMol.get_deg_adjacency_lists())):
                    d[deg_adjs[i -
                               1]] = multiConvMol.get_deg_adjacency_lists()[i]
                yield d

    return model, feed_dict_generator, labels, task_weights
コード例 #18
0
    def __init__(self,
                 n_tasks,
                 n_features,
                 layer_sizes=[1000],
                 weight_init_stddevs=0.02,
                 bias_init_consts=1.0,
                 weight_decay_penalty=0.0,
                 weight_decay_penalty_type="l2",
                 dropouts=0.5,
                 activation_fns=tf.nn.relu,
                 bypass_layer_sizes=[100],
                 bypass_weight_init_stddevs=[.02],
                 bypass_bias_init_consts=[1.],
                 bypass_dropouts=[.5],
                 **kwargs):
        """ Create a RobustMultitaskRegressor.

    Parameters
    ----------
    n_tasks: int
      number of tasks
    n_features: int
      number of features
    layer_sizes: list
      the size of each dense layer in the network.  The length of this list determines the number of layers.
    weight_init_stddevs: list or float
      the standard deviation of the distribution to use for weight initialization of each layer.  The length
      of this list should equal len(layer_sizes).  Alternatively this may be a single value instead of a list,
      in which case the same value is used for every layer.
    bias_init_consts: list or loat
      the value to initialize the biases in each layer to.  The length of this list should equal len(layer_sizes).
      Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.
    weight_decay_penalty: float
      the magnitude of the weight decay penalty to use
    weight_decay_penalty_type: str
      the type of penalty to use for weight decay, either 'l1' or 'l2'
    dropouts: list or float
      the dropout probablity to use for each layer.  The length of this list should equal len(layer_sizes).
      Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.
    activation_fns: list or object
      the Tensorflow activation function to apply to each layer.  The length of this list should equal
      len(layer_sizes).  Alternatively this may be a single value instead of a list, in which case the
      same value is used for every layer.
    bypass_layer_sizes: list
      the size of each dense layer in the bypass network. The length of this list determines the number of bypass layers.
    bypass_weight_init_stddevs: list or float
      the standard deviation of the distribution to use for weight initialization of bypass layers.
      same requirements as weight_init_stddevs
    bypass_bias_init_consts: list or float
      the value to initialize the biases in bypass layers
      same requirements as bias_init_consts
    bypass_dropouts: list or float
      the dropout probablity to use for bypass layers.
      same requirements as dropouts
    """
        super(RobustMultitaskRegressor, self).__init__(**kwargs)
        self.n_tasks = n_tasks
        self.n_features = n_features
        n_layers = len(layer_sizes)
        if not isinstance(weight_init_stddevs, collections.Sequence):
            weight_init_stddevs = [weight_init_stddevs] * n_layers
        if not isinstance(bias_init_consts, collections.Sequence):
            bias_init_consts = [bias_init_consts] * n_layers
        if not isinstance(dropouts, collections.Sequence):
            dropouts = [dropouts] * n_layers
        if not isinstance(activation_fns, collections.Sequence):
            activation_fns = [activation_fns] * n_layers

        n_bypass_layers = len(bypass_layer_sizes)
        if not isinstance(bypass_weight_init_stddevs, collections.Sequence):
            bypass_weight_init_stddevs = [bypass_weight_init_stddevs
                                          ] * n_bypass_layers
        if not isinstance(bypass_bias_init_consts, collections.Sequence):
            bypass_bias_init_consts = [bypass_bias_init_consts
                                       ] * n_bypass_layers
        if not isinstance(bypass_dropouts, collections.Sequence):
            bypass_dropouts = [bypass_dropouts] * n_bypass_layers
        bypass_activation_fns = [activation_fns[0]] * n_bypass_layers

        # Add the input features.
        mol_features = Feature(shape=(None, n_features))
        prev_layer = mol_features

        # Add the shared dense layers
        for size, weight_stddev, bias_const, dropout, activation_fn in zip(
                layer_sizes, weight_init_stddevs, bias_init_consts, dropouts,
                activation_fns):
            layer = Dense(in_layers=[prev_layer],
                          out_channels=size,
                          activation_fn=activation_fn,
                          weights_initializer=TFWrapper(
                              tf.truncated_normal_initializer,
                              stddev=weight_stddev),
                          biases_initializer=TFWrapper(tf.constant_initializer,
                                                       value=bias_const))
            if dropout > 0.0:
                layer = Dropout(dropout, in_layers=[layer])
            prev_layer = layer
        top_multitask_layer = prev_layer

        task_outputs = []
        for i in range(self.n_tasks):
            prev_layer = mol_features
            # Add task-specific bypass layers
            for size, weight_stddev, bias_const, dropout, activation_fn in zip(
                    bypass_layer_sizes, bypass_weight_init_stddevs,
                    bypass_bias_init_consts, bypass_dropouts,
                    bypass_activation_fns):
                layer = Dense(in_layers=[prev_layer],
                              out_channels=size,
                              activation_fn=activation_fn,
                              weights_initializer=TFWrapper(
                                  tf.truncated_normal_initializer,
                                  stddev=weight_stddev),
                              biases_initializer=TFWrapper(
                                  tf.constant_initializer, value=bias_const))
                if dropout > 0.0:
                    layer = Dropout(dropout, in_layers=[layer])
                prev_layer = layer
            top_bypass_layer = prev_layer

            if n_bypass_layers > 0:
                task_layer = Concat(
                    axis=1, in_layers=[top_multitask_layer, top_bypass_layer])
            else:
                task_layer = top_multitask_layer

            task_out = Dense(in_layers=[task_layer], out_channels=1)
            task_outputs.append(task_out)

        output = Concat(axis=1, in_layers=task_outputs)

        self.add_output(output)
        labels = Label(shape=(None, n_tasks))
        weights = Weights(shape=(None, n_tasks))
        weighted_loss = ReduceSum(L2Loss(in_layers=[labels, output, weights]))
        if weight_decay_penalty != 0.0:
            weighted_loss = WeightDecay(weight_decay_penalty,
                                        weight_decay_penalty_type,
                                        in_layers=[weighted_loss])
        self.set_loss(weighted_loss)
コード例 #19
0
ファイル: graph_models.py プロジェクト: kicchi/from_physalis
    def build_graph(self):
        self.vertex_features = Feature(shape=(None, self.max_atoms, 75))
        self.adj_matrix = Feature(shape=(None, self.max_atoms, 1,
                                         self.max_atoms))
        self.mask = Feature(shape=(None, self.max_atoms, 1))

        gcnn1 = BatchNorm(
            GraphCNN(
                num_filters=64,
                in_layers=[self.vertex_features, self.adj_matrix, self.mask]))
        gcnn1 = Dropout(self.dropout, in_layers=gcnn1)
        gcnn2 = BatchNorm(
            GraphCNN(num_filters=64,
                     in_layers=[gcnn1, self.adj_matrix, self.mask]))
        gcnn2 = Dropout(self.dropout, in_layers=gcnn2)
        gc_pool, adj_matrix = GraphCNNPool(
            num_vertices=32, in_layers=[gcnn2, self.adj_matrix, self.mask])
        gc_pool = BatchNorm(gc_pool)
        gc_pool = Dropout(self.dropout, in_layers=gc_pool)
        gcnn3 = BatchNorm(
            GraphCNN(num_filters=32, in_layers=[gc_pool, adj_matrix]))
        gcnn3 = Dropout(self.dropout, in_layers=gcnn3)
        gc_pool2, adj_matrix2 = GraphCNNPool(num_vertices=8,
                                             in_layers=[gcnn3, adj_matrix])
        gc_pool2 = BatchNorm(gc_pool2)
        gc_pool2 = Dropout(self.dropout, in_layers=gc_pool2)
        flattened = Flatten(in_layers=gc_pool2)
        readout = Dense(out_channels=256,
                        activation_fn=tf.nn.relu,
                        in_layers=flattened)
        costs = []
        self.my_labels = []
        for task in range(self.n_tasks):
            if self.mode == 'classification':
                classification = Dense(out_channels=2,
                                       activation_fn=None,
                                       in_layers=[readout])

                softmax = SoftMax(in_layers=[classification])
                self.add_output(softmax)

                label = Label(shape=(None, 2))
                self.my_labels.append(label)
                cost = SoftMaxCrossEntropy(in_layers=[label, classification])
                costs.append(cost)
            if self.mode == 'regression':
                regression = Dense(out_channels=1,
                                   activation_fn=None,
                                   in_layers=[readout])
                self.add_output(regression)

                label = Label(shape=(None, 1))
                self.my_labels.append(label)
                cost = L2Loss(in_layers=[label, regression])
                costs.append(cost)
        if self.mode == "classification":
            entropy = Concat(in_layers=costs, axis=-1)
        elif self.mode == "regression":
            entropy = Stack(in_layers=costs, axis=1)
        self.my_task_weights = Weights(shape=(None, self.n_tasks))
        loss = WeightedError(in_layers=[entropy, self.my_task_weights])
        self.set_loss(loss)
コード例 #20
0
ファイル: unet.py プロジェクト: veena-v-g/deepchem
    def __init__(self,
                 img_rows=512,
                 img_cols=512,
                 filters=[64, 128, 256, 512, 1024],
                 model=dc.models.TensorGraph(),
                 **kwargs):
        super(UNet, self).__init__(use_queue=False, **kwargs)
        self.img_cols = img_cols
        self.img_rows = img_rows
        self.filters = filters
        self.model = dc.models.TensorGraph()

        input = Feature(shape=(None, self.img_rows, self.img_cols))

        conv1 = Conv2D(num_outputs=self.filters[0],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[input])
        conv1 = Conv2D(num_outputs=self.filters[0],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[conv1])
        pool1 = MaxPool2D(ksize=2, in_layers=[conv1])

        conv2 = Conv2D(num_outputs=self.filters[1],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[pool1])
        conv2 = Conv2D(num_outputs=self.filters[1],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[conv2])
        pool2 = MaxPool2D(ksize=2, in_layers=[conv2])

        conv3 = Conv2D(num_outputs=self.filters[2],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[pool2])
        conv3 = Conv2D(num_outputs=self.filters[2],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[conv3])
        pool3 = MaxPool2D(ksize=2, in_layers=[conv3])

        conv4 = Conv2D(num_outputs=self.filters[3],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[pool3])
        conv4 = Conv2D(num_outputs=self.filters[3],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[conv4])
        pool4 = MaxPool2D(ksize=2, in_layers=[conv4])

        conv5 = Conv2D(num_outputs=self.filters[4],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[pool4])
        conv5 = Conv2D(num_outputs=self.filters[4],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[conv5])

        up6 = Conv2DTranspose(num_outputs=self.filters[3],
                              kernel_size=2,
                              in_layers=[conv5])
        concat6 = Concat(in_layers=[conv4, up6], axis=1)
        conv6 = Conv2D(num_outputs=self.filters[3],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[concat6])
        conv6 = Conv2D(num_outputs=self.filters[3],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[conv6])

        up7 = Conv2DTranspose(num_outputs=self.filters[2],
                              kernel_size=2,
                              in_layers=[conv6])
        concat7 = Concat(in_layers=[conv3, up7], axis=1)
        conv7 = Conv2D(num_outputs=self.filters[2],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[concat7])
        conv7 = Conv2D(num_outputs=self.filters[2],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[conv7])

        up8 = Conv2DTranspose(num_outputs=self.filters[1],
                              kernel_size=2,
                              in_layers=[conv7])
        concat8 = Concat(in_layers=[conv2, up8], axis=1)
        conv8 = Conv2D(num_outputs=self.filters[1],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[concat8])
        conv8 = Conv2D(num_outputs=self.filters[1],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[conv8])

        up9 = Conv2DTranspose(num_outputs=self.filters[0],
                              kernel_size=2,
                              in_layers=[conv8])
        concat9 = Concat(in_layers=[conv1, up9], axis=1)
        conv9 = Conv2D(num_outputs=self.filters[0],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[concat9])
        conv9 = Conv2D(num_outputs=self.filters[0],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[conv9])

        conv10 = Conv2D(num_outputs=1,
                        kernel_size=1,
                        activation='sigmoid',
                        in_layers=[conv9])

        model.add_output(conv10)
コード例 #21
0
def sluice_model(batch_size, tasks):
    model = TensorGraph(model_dir=model_dir,
                        batch_size=batch_size,
                        use_queue=False,
                        tensorboard=True)
    atom_features = Feature(shape=(None, 75))
    degree_slice = Feature(shape=(None, 2), dtype=tf.int32)
    membership = Feature(shape=(None, ), dtype=tf.int32)

    sluice_loss = []
    deg_adjs = []
    for i in range(0, 10 + 1):
        deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32)
        deg_adjs.append(deg_adj)

    gc1 = GraphConv(64,
                    activation_fn=tf.nn.relu,
                    in_layers=[atom_features, degree_slice, membership] +
                    deg_adjs)

    as1 = AlphaShare(in_layers=[gc1, gc1])
    sluice_loss.append(gc1)

    batch_norm1a = BatchNorm(in_layers=[as1[0]])
    batch_norm1b = BatchNorm(in_layers=[as1[1]])

    gp1a = GraphPool(in_layers=[batch_norm1a, degree_slice, membership] +
                     deg_adjs)
    gp1b = GraphPool(in_layers=[batch_norm1b, degree_slice, membership] +
                     deg_adjs)

    gc2a = GraphConv(64,
                     activation_fn=tf.nn.relu,
                     in_layers=[gp1a, degree_slice, membership] + deg_adjs)
    gc2b = GraphConv(64,
                     activation_fn=tf.nn.relu,
                     in_layers=[gp1b, degree_slice, membership] + deg_adjs)

    as2 = AlphaShare(in_layers=[gc2a, gc2b])
    sluice_loss.append(gc2a)
    sluice_loss.append(gc2b)

    batch_norm2a = BatchNorm(in_layers=[as2[0]])
    batch_norm2b = BatchNorm(in_layers=[as2[1]])

    gp2a = GraphPool(in_layers=[batch_norm2a, degree_slice, membership] +
                     deg_adjs)
    gp2b = GraphPool(in_layers=[batch_norm2b, degree_slice, membership] +
                     deg_adjs)

    densea = Dense(out_channels=128, activation_fn=None, in_layers=[gp2a])
    denseb = Dense(out_channels=128, activation_fn=None, in_layers=[gp2b])

    batch_norm3a = BatchNorm(in_layers=[densea])
    batch_norm3b = BatchNorm(in_layers=[denseb])

    as3 = AlphaShare(in_layers=[batch_norm3a, batch_norm3b])
    sluice_loss.append(batch_norm3a)
    sluice_loss.append(batch_norm3b)

    gg1a = GraphGather(batch_size=batch_size,
                       activation_fn=tf.nn.tanh,
                       in_layers=[as3[0], degree_slice, membership] + deg_adjs)
    gg1b = GraphGather(batch_size=batch_size,
                       activation_fn=tf.nn.tanh,
                       in_layers=[as3[1], degree_slice, membership] + deg_adjs)

    costs = []
    labels = []
    count = 0
    for task in tasks:
        if count < len(tasks) / 2:
            classification = Dense(out_channels=2,
                                   activation_fn=None,
                                   in_layers=[gg1a])
            print("first half:")
            print(task)
        else:
            classification = Dense(out_channels=2,
                                   activation_fn=None,
                                   in_layers=[gg1b])
            print('second half')
            print(task)
        count += 1

        softmax = SoftMax(in_layers=[classification])
        model.add_output(softmax)

        label = Label(shape=(None, 2))
        labels.append(label)
        cost = SoftMaxCrossEntropy(in_layers=[label, classification])
        costs.append(cost)

    entropy = Concat(in_layers=costs)
    task_weights = Weights(shape=(None, len(tasks)))
    task_loss = WeightedError(in_layers=[entropy, task_weights])

    s_cost = SluiceLoss(in_layers=sluice_loss)

    total_loss = Add(in_layers=[task_loss, s_cost])
    model.set_loss(total_loss)

    def feed_dict_generator(dataset, batch_size, epochs=1):
        for epoch in range(epochs):
            for ind, (X_b, y_b, w_b, ids_b) in enumerate(
                    dataset.iterbatches(batch_size, pad_batches=True)):
                d = {}
                for index, label in enumerate(labels):
                    d[label] = to_one_hot(y_b[:, index])
                d[task_weights] = w_b
                multiConvMol = ConvMol.agglomerate_mols(X_b)
                d[atom_features] = multiConvMol.get_atom_features()
                d[degree_slice] = multiConvMol.deg_slice
                d[membership] = multiConvMol.membership
                for i in range(1, len(multiConvMol.get_deg_adjacency_lists())):
                    d[deg_adjs[i -
                               1]] = multiConvMol.get_deg_adjacency_lists()[i]
                yield d

    return model, feed_dict_generator, labels, task_weights
コード例 #22
0
    def __init__(self,
                 img_rows=512,
                 img_cols=512,
                 filters=[64, 128, 256, 512, 1024],
                 **kwargs):
        super(UNet, self).__init__(use_queue=False, **kwargs)
        self.img_cols = img_cols
        self.img_rows = img_rows
        self.filters = filters

        input = Feature(shape=(None, self.img_rows, self.img_cols, 3))
        labels = Label(shape=(None, self.img_rows * self.img_cols))

        conv1 = Conv2D(num_outputs=self.filters[0],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[input])
        conv1 = Conv2D(num_outputs=self.filters[0],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[conv1])
        pool1 = MaxPool2D(ksize=[1, 2, 2, 1], in_layers=[conv1])

        conv2 = Conv2D(num_outputs=self.filters[1],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[pool1])
        conv2 = Conv2D(num_outputs=self.filters[1],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[conv2])
        pool2 = MaxPool2D(ksize=[1, 2, 2, 1], in_layers=[conv2])

        conv3 = Conv2D(num_outputs=self.filters[2],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[pool2])
        conv3 = Conv2D(num_outputs=self.filters[2],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[conv3])
        pool3 = MaxPool2D(ksize=[1, 2, 2, 1], in_layers=[conv3])

        conv4 = Conv2D(num_outputs=self.filters[3],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[pool3])
        conv4 = Conv2D(num_outputs=self.filters[3],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[conv4])
        pool4 = MaxPool2D(ksize=[1, 2, 2, 1], in_layers=[conv4])

        conv5 = Conv2D(num_outputs=self.filters[4],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[pool4])
        conv5 = Conv2D(num_outputs=self.filters[4],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[conv5])

        up6 = Conv2DTranspose(num_outputs=self.filters[3],
                              kernel_size=2,
                              stride=2,
                              in_layers=[conv5])
        concat6 = Concat(in_layers=[conv4, up6], axis=3)
        conv6 = Conv2D(num_outputs=self.filters[3],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[concat6])

        conv6 = Conv2D(num_outputs=self.filters[3],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[conv6])

        up7 = Conv2DTranspose(num_outputs=self.filters[2],
                              kernel_size=2,
                              stride=2,
                              in_layers=[conv6])
        concat7 = Concat(in_layers=[conv3, up7], axis=3)
        conv7 = Conv2D(num_outputs=self.filters[2],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[concat7])
        conv7 = Conv2D(num_outputs=self.filters[2],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[conv7])

        up8 = Conv2DTranspose(num_outputs=self.filters[1],
                              kernel_size=2,
                              stride=2,
                              in_layers=[conv7])
        concat8 = Concat(in_layers=[conv2, up8], axis=3)
        conv8 = Conv2D(num_outputs=self.filters[1],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[concat8])
        conv8 = Conv2D(num_outputs=self.filters[1],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[conv8])

        up9 = Conv2DTranspose(num_outputs=self.filters[0],
                              kernel_size=2,
                              stride=2,
                              in_layers=[conv8])
        concat9 = Concat(in_layers=[conv1, up9], axis=3)
        conv9 = Conv2D(num_outputs=self.filters[0],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[concat9])
        conv9 = Conv2D(num_outputs=self.filters[0],
                       kernel_size=3,
                       activation='relu',
                       padding='same',
                       in_layers=[conv9])

        conv10 = Conv2D(num_outputs=1,
                        kernel_size=1,
                        activation='sigmoid',
                        in_layers=[conv9])

        loss = SoftMaxCrossEntropy(in_layers=[labels, conv10])
        loss = ReduceMean(in_layers=[loss])
        self.set_loss(loss)
        self.add_output(conv10)