Esempio n. 1
0
  def build_graph(self):
    self.smiles_seqs = Feature(shape=(None, self.seq_length), dtype=tf.int32)
    # Character embedding
    self.Embedding = DTNNEmbedding(
        n_embedding=self.n_embedding,
        periodic_table_length=len(self.char_dict.keys()) + 1,
        in_layers=[self.smiles_seqs])
    self.pooled_outputs = []
    self.conv_layers = []
    for filter_size, num_filter in zip(self.kernel_sizes, self.num_filters):
      # Multiple convolutional layers with different filter widths
      self.conv_layers.append(
          Conv1D(
              kernel_size=filter_size,
              filters=num_filter,
              padding='valid',
              in_layers=[self.Embedding]))
      # Max-over-time pooling
      self.pooled_outputs.append(
          ReduceMax(axis=1, in_layers=[self.conv_layers[-1]]))
    # Concat features from all filters(one feature per filter)
    concat_outputs = Concat(axis=1, in_layers=self.pooled_outputs)
    dropout = Dropout(dropout_prob=self.dropout, in_layers=[concat_outputs])
    dense = Dense(
        out_channels=200, activation_fn=tf.nn.relu, in_layers=[dropout])
    # Highway layer from https://arxiv.org/pdf/1505.00387.pdf
    self.gather = Highway(in_layers=[dense])

    costs = []
    self.labels_fd = []
    for task in range(self.n_tasks):
      if self.mode == "classification":
        classification = Dense(
            out_channels=2, activation_fn=None, in_layers=[self.gather])
        softmax = SoftMax(in_layers=[classification])
        self.add_output(softmax)

        label = Label(shape=(None, 2))
        self.labels_fd.append(label)
        cost = SoftMaxCrossEntropy(in_layers=[label, classification])
        costs.append(cost)
      if self.mode == "regression":
        regression = Dense(
            out_channels=1, activation_fn=None, in_layers=[self.gather])
        self.add_output(regression)

        label = Label(shape=(None, 1))
        self.labels_fd.append(label)
        cost = L2Loss(in_layers=[label, regression])
        costs.append(cost)
    if self.mode == "classification":
      all_cost = Stack(in_layers=costs, axis=1)
    elif self.mode == "regression":
      all_cost = Stack(in_layers=costs, axis=1)
    self.weights = Weights(shape=(None, self.n_tasks))
    loss = WeightedError(in_layers=[all_cost, self.weights])
    self.set_loss(loss)
Esempio n. 2
0
    def _build_graph(self):
        self.smiles_seqs = Feature(shape=(None, self.seq_length),
                                   dtype=tf.int32)
        # Character embedding
        Embedding = DTNNEmbedding(
            n_embedding=self.n_embedding,
            periodic_table_length=len(self.char_dict.keys()) + 1,
            in_layers=[self.smiles_seqs])
        pooled_outputs = []
        conv_layers = []
        for filter_size, num_filter in zip(self.kernel_sizes,
                                           self.num_filters):
            # Multiple convolutional layers with different filter widths
            conv_layers.append(
                Conv1D(kernel_size=filter_size,
                       filters=num_filter,
                       padding='valid',
                       in_layers=[Embedding]))
            # Max-over-time pooling
            pooled_outputs.append(
                ReduceMax(axis=1, in_layers=[conv_layers[-1]]))
        # Concat features from all filters(one feature per filter)
        concat_outputs = Concat(axis=1, in_layers=pooled_outputs)
        dropout = Dropout(dropout_prob=self.dropout,
                          in_layers=[concat_outputs])
        dense = Dense(out_channels=200,
                      activation_fn=tf.nn.relu,
                      in_layers=[dropout])
        # Highway layer from https://arxiv.org/pdf/1505.00387.pdf
        gather = Highway(in_layers=[dense])

        if self.mode == "classification":
            logits = Dense(out_channels=self.n_tasks * 2,
                           activation_fn=None,
                           in_layers=[gather])
            logits = Reshape(shape=(-1, self.n_tasks, 2), in_layers=[logits])
            output = SoftMax(in_layers=[logits])
            self.add_output(output)
            labels = Label(shape=(None, self.n_tasks, 2))
            loss = SoftMaxCrossEntropy(in_layers=[labels, logits])

        else:
            vals = Dense(out_channels=self.n_tasks * 1,
                         activation_fn=None,
                         in_layers=[gather])
            vals = Reshape(shape=(-1, self.n_tasks, 1), in_layers=[vals])
            self.add_output(vals)
            labels = Label(shape=(None, self.n_tasks, 1))
            loss = ReduceSum(L2Loss(in_layers=[labels, vals]))

        weights = Weights(shape=(None, self.n_tasks))
        weighted_loss = WeightedError(in_layers=[loss, weights])
        self.set_loss(weighted_loss)
    def build_graph(self):
        # inputs placeholder
        self.inputs = Feature(shape=(None, self.image_size, self.image_size,
                                     3),
                              dtype=tf.float32)
        # data preprocessing and augmentation
        in_layer = DRAugment(self.augment,
                             self.batch_size,
                             size=(self.image_size, self.image_size),
                             in_layers=[self.inputs])
        # first conv layer
        in_layer = Conv2D(int(self.n_init_kernel),
                          kernel_size=7,
                          activation_fn=None,
                          in_layers=[in_layer])
        in_layer = BatchNorm(in_layers=[in_layer])
        in_layer = ReLU(in_layers=[in_layer])

        # downsample by max pooling
        res_in = MaxPool2D(ksize=[1, 3, 3, 1],
                           strides=[1, 2, 2, 1],
                           in_layers=[in_layer])

        for ct_module in range(self.n_downsample - 1):
            # each module is a residual convolutional block
            # followed by a convolutional downsample layer
            in_layer = Conv2D(int(self.n_init_kernel * 2**(ct_module - 1)),
                              kernel_size=1,
                              activation_fn=None,
                              in_layers=[res_in])
            in_layer = BatchNorm(in_layers=[in_layer])
            in_layer = ReLU(in_layers=[in_layer])
            in_layer = Conv2D(int(self.n_init_kernel * 2**(ct_module - 1)),
                              kernel_size=3,
                              activation_fn=None,
                              in_layers=[in_layer])
            in_layer = BatchNorm(in_layers=[in_layer])
            in_layer = ReLU(in_layers=[in_layer])
            in_layer = Conv2D(int(self.n_init_kernel * 2**ct_module),
                              kernel_size=1,
                              activation_fn=None,
                              in_layers=[in_layer])
            res_a = BatchNorm(in_layers=[in_layer])

            res_out = res_in + res_a
            res_in = Conv2D(int(self.n_init_kernel * 2**(ct_module + 1)),
                            kernel_size=3,
                            stride=2,
                            in_layers=[res_out])
            res_in = BatchNorm(in_layers=[res_in])

        # max pooling over the final outcome
        in_layer = ReduceMax(axis=(1, 2), in_layers=[res_in])

        for layer_size in self.n_fully_connected:
            # fully connected layers
            in_layer = Dense(layer_size,
                             activation_fn=tf.nn.relu,
                             in_layers=[in_layer])
            # dropout for dense layers
            #in_layer = Dropout(0.25, in_layers=[in_layer])

        logit_pred = Dense(self.n_tasks * self.n_classes,
                           activation_fn=None,
                           in_layers=[in_layer])
        logit_pred = Reshape(shape=(None, self.n_tasks, self.n_classes),
                             in_layers=[logit_pred])

        weights = Weights(shape=(None, self.n_tasks))
        labels = Label(shape=(None, self.n_tasks), dtype=tf.int32)

        output = SoftMax(logit_pred)
        self.add_output(output)
        loss = SparseSoftMaxCrossEntropy(in_layers=[labels, logit_pred])
        weighted_loss = WeightedError(in_layers=[loss, weights])

        # weight decay regularizer
        # weighted_loss = WeightDecay(0.1, 'l2', in_layers=[weighted_loss])
        self.set_loss(weighted_loss)