def _build_graph(self):
        self.atom_flags = Feature(shape=(None,
                                         self.max_atoms * self.max_atoms))
        self.atom_feats = Feature(shape=(None, self.max_atoms * self.n_feat))

        reshaped_atom_feats = Reshape(in_layers=[self.atom_feats],
                                      shape=(-1, self.max_atoms, self.n_feat))
        reshaped_atom_flags = Reshape(in_layers=[self.atom_flags],
                                      shape=(-1, self.max_atoms,
                                             self.max_atoms))

        previous_layer = reshaped_atom_feats

        Hiddens = []
        for n_hidden in self.layer_structures:
            Hidden = Dense(out_channels=n_hidden,
                           activation_fn=tf.nn.tanh,
                           in_layers=[previous_layer])
            Hiddens.append(Hidden)
            previous_layer = Hiddens[-1]

        regression = Dense(out_channels=1 * self.n_tasks,
                           activation_fn=None,
                           in_layers=[Hiddens[-1]])
        output = BPGather(self.max_atoms,
                          in_layers=[regression, reshaped_atom_flags])
        self.add_output(output)

        label = Label(shape=(None, self.n_tasks, 1))
        loss = ReduceSum(L2Loss(in_layers=[label, output]))
        weights = Weights(shape=(None, self.n_tasks))

        weighted_loss = WeightedError(in_layers=[loss, weights])
        self.set_loss(weighted_loss)
  def build_graph(self):
    self.atom_flags = Feature(shape=(None, self.max_atoms, self.max_atoms))
    self.atom_feats = Feature(shape=(None, self.max_atoms, self.n_feat))
    previous_layer = self.atom_feats

    Hiddens = []
    for n_hidden in self.layer_structures:
      Hidden = Dense(
          out_channels=n_hidden,
          activation_fn=tf.nn.tanh,
          in_layers=[previous_layer])
      Hiddens.append(Hidden)
      previous_layer = Hiddens[-1]

    costs = []
    self.labels_fd = []
    for task in range(self.n_tasks):
      regression = Dense(
          out_channels=1, activation_fn=None, in_layers=[Hiddens[-1]])
      output = BPGather(self.max_atoms, in_layers=[regression, self.atom_flags])
      self.add_output(output)

      label = Label(shape=(None, 1))
      self.labels_fd.append(label)
      cost = L2Loss(in_layers=[label, output])
      costs.append(cost)

    all_cost = Stack(in_layers=costs, axis=1)
    self.weights = Weights(shape=(None, self.n_tasks))
    loss = WeightedError(in_layers=[all_cost, self.weights])
    self.set_loss(loss)
    def build_graph(self):

        self.atom_numbers = Feature(shape=(None, self.max_atoms),
                                    dtype=tf.int32)
        self.atom_flags = Feature(shape=(None,
                                         self.max_atoms * self.max_atoms))
        self.atom_feats = Feature(shape=(None, self.max_atoms * 4))

        reshaped_atom_flags = Reshape(in_layers=[self.atom_flags],
                                      shape=(-1, self.max_atoms,
                                             self.max_atoms))
        reshaped_atom_feats = Reshape(in_layers=[self.atom_feats],
                                      shape=(-1, self.max_atoms, 4))

        previous_layer = ANIFeat(in_layers=reshaped_atom_feats,
                                 max_atoms=self.max_atoms)

        self.featurized = previous_layer

        Hiddens = []
        for n_hidden in self.layer_structures:
            Hidden = AtomicDifferentiatedDense(
                self.max_atoms,
                n_hidden,
                self.atom_number_cases,
                activation=self.activation_fn,
                in_layers=[previous_layer, self.atom_numbers])
            Hiddens.append(Hidden)
            previous_layer = Hiddens[-1]

        regression = Dense(out_channels=1 * self.n_tasks,
                           activation_fn=None,
                           in_layers=[Hiddens[-1]])
        output = BPGather(self.max_atoms,
                          in_layers=[regression, reshaped_atom_flags])
        self.add_output(output)

        label = Label(shape=(None, self.n_tasks, 1))
        loss = ReduceSum(L2Loss(in_layers=[label, output]))
        weights = Weights(shape=(None, self.n_tasks))

        weighted_loss = WeightedError(in_layers=[loss, weights])
        if self.exp_loss:
            weighted_loss = Exp(in_layers=[weighted_loss])
        self.set_loss(weighted_loss)
Пример #4
0
    def build_graph(self):

        self.atom_numbers = Feature(shape=(None, self.max_atoms),
                                    dtype=tf.int32)
        self.atom_flags = Feature(shape=(None, self.max_atoms, self.max_atoms))
        self.atom_feats = Feature(shape=(None, self.max_atoms, 4))

        previous_layer = ANIFeat(in_layers=self.atom_feats,
                                 max_atoms=self.max_atoms)

        self.featurized = previous_layer

        Hiddens = []
        for n_hidden in self.layer_structures:
            Hidden = AtomicDifferentiatedDense(
                self.max_atoms,
                n_hidden,
                self.atom_number_cases,
                activation=self.activation_fn,
                in_layers=[previous_layer, self.atom_numbers])
            Hiddens.append(Hidden)
            previous_layer = Hiddens[-1]

        costs = []
        self.labels_fd = []
        for task in range(self.n_tasks):
            regression = Dense(out_channels=1,
                               activation_fn=None,
                               in_layers=[Hiddens[-1]])
            output = BPGather(self.max_atoms,
                              in_layers=[regression, self.atom_flags])
            self.add_output(output)

            label = Label(shape=(None, 1))
            self.labels_fd.append(label)
            cost = L2Loss(in_layers=[label, output])
            costs.append(cost)

        all_cost = Stack(in_layers=costs, axis=1)
        self.weights = Weights(shape=(None, self.n_tasks))
        loss = WeightedError(in_layers=[all_cost, self.weights])
        if self.exp_loss:
            loss = Exp(in_layers=[loss])
        self.set_loss(loss)