Esempio n. 1
0
 def test_reshape(self):
   """Test that Reshape can be invoked."""
   in_dim_1 = 2
   in_dim_2 = 2
   out_dim = 4
   batch_size = 10
   in_tensor = np.random.rand(batch_size, in_dim_1, in_dim_2)
   with self.session() as sess:
     in_tensor = tf.convert_to_tensor(in_tensor, dtype=tf.float32)
     out_tensor = Reshape((batch_size, out_dim))(in_tensor)
     out_tensor = out_tensor.eval()
     assert out_tensor.shape == (batch_size, out_dim)
Esempio n. 2
0
    def build_graph(self):
        """Building graph structures:
                Features => DAGLayer => DAGGather => Classification or Regression
                """
        self.atom_features = Feature(shape=(None, self.n_atom_feat))
        self.parents = Feature(shape=(None, self.max_atoms, self.max_atoms),
                               dtype=tf.int32)
        self.calculation_orders = Feature(shape=(None, self.max_atoms),
                                          dtype=tf.int32)
        self.calculation_masks = Feature(shape=(None, self.max_atoms),
                                         dtype=tf.bool)
        self.membership = Feature(shape=(None, ), dtype=tf.int32)
        self.n_atoms = Feature(shape=(), dtype=tf.int32)
        dag_layer1 = DAGLayer(n_graph_feat=self.n_graph_feat,
                              n_atom_feat=self.n_atom_feat,
                              max_atoms=self.max_atoms,
                              layer_sizes=self.layer_sizes,
                              dropout=self.dropout,
                              batch_size=self.batch_size,
                              in_layers=[
                                  self.atom_features, self.parents,
                                  self.calculation_orders,
                                  self.calculation_masks, self.n_atoms
                              ])
        dag_gather = DAGGather(n_graph_feat=self.n_graph_feat,
                               n_outputs=self.n_outputs,
                               max_atoms=self.max_atoms,
                               layer_sizes=self.layer_sizes_gather,
                               dropout=self.dropout,
                               in_layers=[dag_layer1, self.membership])

        n_tasks = self.n_tasks
        weights = Weights(shape=(None, n_tasks))
        if self.mode == 'classification':
            n_classes = self.n_classes
            labels = Label(shape=(None, n_tasks, n_classes))
            logits = Reshape(shape=(None, n_tasks, n_classes),
                             in_layers=[
                                 Dense(in_layers=dag_gather,
                                       out_channels=n_tasks * n_classes)
                             ])
            output = SoftMax(logits)
            self.add_output(output)
            loss = SoftMaxCrossEntropy(in_layers=[labels, logits])
            weighted_loss = WeightedError(in_layers=[loss, weights])
            self.set_loss(weighted_loss)
        else:
            labels = Label(shape=(None, n_tasks))
            output = Reshape(
                shape=(None, n_tasks),
                in_layers=[Dense(in_layers=dag_gather, out_channels=n_tasks)])
            self.add_output(output)
            if self.uncertainty:
                log_var = Reshape(shape=(None, n_tasks),
                                  in_layers=[
                                      Dense(in_layers=dag_gather,
                                            out_channels=n_tasks)
                                  ])
                var = Exp(log_var)
                self.add_variance(var)
                diff = labels - output
                weighted_loss = weights * (diff * diff / var + log_var)
                weighted_loss = ReduceSum(ReduceMean(weighted_loss, axis=[1]))
            else:
                weighted_loss = ReduceSum(
                    L2Loss(in_layers=[labels, output, weights]))
            self.set_loss(weighted_loss)
Esempio n. 3
0
    def build_graph(self):
        """
    Building graph structures:
    """
        self.atom_features = Feature(shape=(None, self.number_atom_features))
        self.degree_slice = Feature(shape=(None, 2), dtype=tf.int32)
        self.membership = Feature(shape=(None, ), dtype=tf.int32)

        self.deg_adjs = []
        for i in range(0, 10 + 1):
            deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32)
            self.deg_adjs.append(deg_adj)
        in_layer = self.atom_features
        for layer_size, dropout in zip(self.graph_conv_layers, self.dropout):
            gc1_in = [in_layer, self.degree_slice, self.membership
                      ] + self.deg_adjs
            gc1 = GraphConv(layer_size,
                            activation_fn=tf.nn.relu,
                            in_layers=gc1_in)
            batch_norm1 = BatchNorm(in_layers=[gc1])
            if dropout > 0.0:
                batch_norm1 = Dropout(dropout, in_layers=batch_norm1)
            gp_in = [batch_norm1, self.degree_slice, self.membership
                     ] + self.deg_adjs
            in_layer = GraphPool(in_layers=gp_in)
        dense = Dense(out_channels=self.dense_layer_size,
                      activation_fn=tf.nn.relu,
                      in_layers=[in_layer])
        batch_norm3 = BatchNorm(in_layers=[dense])
        if self.dropout[-1] > 0.0:
            batch_norm3 = Dropout(self.dropout[-1], in_layers=batch_norm3)
        self.neural_fingerprint = GraphGather(
            batch_size=self.batch_size,
            activation_fn=tf.nn.tanh,
            in_layers=[batch_norm3, self.degree_slice, self.membership] +
            self.deg_adjs)

        n_tasks = self.n_tasks
        weights = Weights(shape=(None, n_tasks))
        if self.mode == 'classification':
            n_classes = self.n_classes
            labels = Label(shape=(None, n_tasks, n_classes))
            logits = Reshape(shape=(None, n_tasks, n_classes),
                             in_layers=[
                                 Dense(in_layers=self.neural_fingerprint,
                                       out_channels=n_tasks * n_classes)
                             ])
            logits = TrimGraphOutput([logits, weights])
            output = SoftMax(logits)
            self.add_output(output)
            loss = SoftMaxCrossEntropy(in_layers=[labels, logits])
            weighted_loss = WeightedError(in_layers=[loss, weights])
            self.set_loss(weighted_loss)
        else:
            labels = Label(shape=(None, n_tasks))
            output = Reshape(shape=(None, n_tasks),
                             in_layers=[
                                 Dense(in_layers=self.neural_fingerprint,
                                       out_channels=n_tasks)
                             ])
            output = TrimGraphOutput([output, weights])
            self.add_output(output)
            if self.uncertainty:
                log_var = Reshape(shape=(None, n_tasks),
                                  in_layers=[
                                      Dense(in_layers=self.neural_fingerprint,
                                            out_channels=n_tasks)
                                  ])
                log_var = TrimGraphOutput([log_var, weights])
                var = Exp(log_var)
                self.add_variance(var)
                diff = labels - output
                weighted_loss = weights * (diff * diff / var + log_var)
                weighted_loss = ReduceSum(ReduceMean(weighted_loss, axis=[1]))
            else:
                weighted_loss = ReduceSum(
                    L2Loss(in_layers=[labels, output, weights]))
            self.set_loss(weighted_loss)
Esempio n. 4
0
  def __init__(self,
               n_tasks,
               n_features,
               layer_sizes=[1000],
               weight_init_stddevs=0.02,
               bias_init_consts=1.0,
               weight_decay_penalty=0.0,
               weight_decay_penalty_type="l2",
               dropouts=0.5,
               activation_fns=tf.nn.relu,
               n_classes=2,
               **kwargs):
    """Create a MultiTaskClassifier.

    In addition to the following arguments, this class also accepts
    all the keyword arguments from TensorGraph.

    Parameters
    ----------
    n_tasks: int
      number of tasks
    n_features: int
      number of features
    layer_sizes: list
      the size of each dense layer in the network.  The length of
      this list determines the number of layers.
    weight_init_stddevs: list or float
      the standard deviation of the distribution to use for weight
      initialization of each layer.  The length of this list should
      equal len(layer_sizes).  Alternatively this may be a single
      value instead of a list, in which case the same value is used
      for every layer.
    bias_init_consts: list or loat
      the value to initialize the biases in each layer to.  The
      length of this list should equal len(layer_sizes).
      Alternatively this may be a single value instead of a list, in
      which case the same value is used for every layer.
    weight_decay_penalty: float
      the magnitude of the weight decay penalty to use
    weight_decay_penalty_type: str
      the type of penalty to use for weight decay, either 'l1' or 'l2'
    dropouts: list or float
      the dropout probablity to use for each layer.  The length of this list should equal len(layer_sizes).
      Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.
    activation_fns: list or object
      the Tensorflow activation function to apply to each layer.  The length of this list should equal
      len(layer_sizes).  Alternatively this may be a single value instead of a list, in which case the
      same value is used for every layer.
    n_classes: int
      the number of classes
    """
    super(MultiTaskClassifier, self).__init__(**kwargs)
    self.n_tasks = n_tasks
    self.n_features = n_features
    self.n_classes = n_classes
    n_layers = len(layer_sizes)
    if not isinstance(weight_init_stddevs, collections.Sequence):
      weight_init_stddevs = [weight_init_stddevs] * n_layers
    if not isinstance(bias_init_consts, collections.Sequence):
      bias_init_consts = [bias_init_consts] * n_layers
    if not isinstance(dropouts, collections.Sequence):
      dropouts = [dropouts] * n_layers
    if not isinstance(activation_fns, collections.Sequence):
      activation_fns = [activation_fns] * n_layers

    # Add the input features.

    mol_features = Feature(shape=(None, n_features))
    prev_layer = mol_features

    # Add the dense layers

    for size, weight_stddev, bias_const, dropout, activation_fn in zip(
        layer_sizes, weight_init_stddevs, bias_init_consts, dropouts,
        activation_fns):
      layer = Dense(
          in_layers=[prev_layer],
          out_channels=size,
          activation_fn=activation_fn,
          weights_initializer=TFWrapper(
              tf.truncated_normal_initializer, stddev=weight_stddev),
          biases_initializer=TFWrapper(
              tf.constant_initializer, value=bias_const))
      if dropout > 0.0:
        layer = Dropout(dropout, in_layers=[layer])
      prev_layer = layer

    # Compute the loss function for each label.

    output = Reshape(
        shape=(-1, n_tasks, n_classes),
        in_layers=[
            Dense(in_layers=[prev_layer], out_channels=n_tasks * n_classes)
        ])
    self.add_output(output)
    labels = Label(shape=(None, n_tasks, n_classes))
    weights = Weights(shape=(None, n_tasks))
    loss = SoftMaxCrossEntropy(in_layers=[labels, output])
    weighted_loss = WeightedError(in_layers=[loss, weights])
    if weight_decay_penalty != 0.0:
      weighted_loss = WeightDecay(
          weight_decay_penalty,
          weight_decay_penalty_type,
          in_layers=[weighted_loss])
    self.set_loss(weighted_loss)
Esempio n. 5
0
 def create_layers(self, state, **kwargs):
   action = Variable(np.ones(env.n_actions))
   output = SoftMax(
       in_layers=[Reshape(in_layers=[action], shape=(-1, env.n_actions))])
   value = Variable([0.0])
   return {'action_prob': output, 'value': value}
Esempio n. 6
0
    def __init__(self,
                 n_tasks,
                 n_features,
                 layer_sizes=[1000],
                 weight_init_stddevs=[0.02, 0.02],
                 bias_init_consts=[1.0, 1.0],
                 weight_decay_penalty=0.0,
                 weight_decay_penalty_type="l2",
                 dropouts=[0.5],
                 **kwargs):
        """Create a TensorGraphMultiTaskRegressor.

    In addition to the following arguments, this class also accepts all the keywork arguments
    from TensorGraph.

    Parameters
    ----------
    n_tasks: int
      number of tasks
    n_features: int
      number of features
    layer_sizes: list
      the size of each dense layer in the network.  The length of this list determines the number of layers.
    weight_init_stddevs: list
      the standard deviation of the distribution to use for weight initialization of each layer.  The length
      of this list should equal len(layer_sizes)+1.  The final element corresponds to the output layer.
    bias_init_consts: list
      the value to initialize the biases in each layer to.  The length of this list should equal len(layer_sizes)+1.
      The final element corresponds to the output layer.
    weight_decay_penalty: float
      the magnitude of the weight decay penalty to use
    weight_decay_penalty_type: str
      the type of penalty to use for weight decay, either 'l1' or 'l2'
    dropouts: list
      the dropout probablity to use for each layer.  The length of this list should equal len(layer_sizes).
    """
        super().__init__(mode='regression', **kwargs)
        self.n_tasks = n_tasks
        self.n_features = n_features

        # Add the input features.

        mol_features = Feature(shape=(None, n_features))
        prev_layer = mol_features

        # Add the dense layers

        for size, weight_stddev, bias_const, dropout in zip(
                layer_sizes, weight_init_stddevs, bias_init_consts, dropouts):
            layer = Dense(in_layers=[prev_layer],
                          out_channels=size,
                          activation_fn=tf.nn.relu,
                          weights_initializer=TFWrapper(
                              tf.truncated_normal_initializer,
                              stddev=weight_stddev),
                          biases_initializer=TFWrapper(tf.constant_initializer,
                                                       value=bias_const))
            if dropout > 0.0:
                layer = Dropout(dropout, in_layers=[layer])
            prev_layer = layer

        # Compute the loss function for each label.

        output = Reshape(shape=(-1, n_tasks, 1),
                         in_layers=[
                             Dense(in_layers=[prev_layer],
                                   out_channels=n_tasks,
                                   weights_initializer=TFWrapper(
                                       tf.truncated_normal_initializer,
                                       stddev=weight_init_stddevs[-1]),
                                   biases_initializer=TFWrapper(
                                       tf.constant_initializer,
                                       value=bias_init_consts[-1]))
                         ])
        self.add_output(output)
        labels = Label(shape=(None, n_tasks, 1))
        weights = Weights(shape=(None, n_tasks))
        loss = L2Loss(in_layers=[labels, output])
        weighted_loss = WeightedError(in_layers=[loss, weights])
        if weight_decay_penalty != 0.0:
            weighted_loss = WeightDecay(weight_decay_penalty,
                                        weight_decay_penalty_type,
                                        in_layers=[weighted_loss])
        self.set_loss(weighted_loss)
Esempio n. 7
0
    def __init__(self,
                 n_tasks,
                 n_features,
                 layer_sizes=[1000],
                 weight_init_stddevs=0.02,
                 bias_init_consts=1.0,
                 weight_decay_penalty=0.0,
                 weight_decay_penalty_type="l2",
                 dropouts=0.5,
                 activation_fns=tf.nn.relu,
                 uncertainty=False,
                 **kwargs):
        """Create a MultiTaskRegressor.

    In addition to the following arguments, this class also accepts all the keywork arguments
    from TensorGraph.

    Parameters
    ----------
    n_tasks: int
      number of tasks
    n_features: int
      number of features
    layer_sizes: list
      the size of each dense layer in the network.  The length of this list determines the number of layers.
    weight_init_stddevs: list or float
      the standard deviation of the distribution to use for weight initialization of each layer.  The length
      of this list should equal len(layer_sizes)+1.  The final element corresponds to the output layer.
      Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.
    bias_init_consts: list or float
      the value to initialize the biases in each layer to.  The length of this list should equal len(layer_sizes)+1.
      The final element corresponds to the output layer.  Alternatively this may be a single value instead of a list,
      in which case the same value is used for every layer.
    weight_decay_penalty: float
      the magnitude of the weight decay penalty to use
    weight_decay_penalty_type: str
      the type of penalty to use for weight decay, either 'l1' or 'l2'
    dropouts: list or float
      the dropout probablity to use for each layer.  The length of this list should equal len(layer_sizes).
      Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.
    activation_fns: list or object
      the Tensorflow activation function to apply to each layer.  The length of this list should equal
      len(layer_sizes).  Alternatively this may be a single value instead of a list, in which case the
      same value is used for every layer.
    uncertainty: bool
      if True, include extra outputs and loss terms to enable the uncertainty
      in outputs to be predicted
    """
        super(MultiTaskRegressor, self).__init__(**kwargs)
        self.n_tasks = n_tasks
        self.n_features = n_features
        n_layers = len(layer_sizes)
        if not isinstance(weight_init_stddevs, collections.Sequence):
            weight_init_stddevs = [weight_init_stddevs] * (n_layers + 1)
        if not isinstance(bias_init_consts, collections.Sequence):
            bias_init_consts = [bias_init_consts] * (n_layers + 1)
        if not isinstance(dropouts, collections.Sequence):
            dropouts = [dropouts] * n_layers
        if not isinstance(activation_fns, collections.Sequence):
            activation_fns = [activation_fns] * n_layers
        if uncertainty:
            if any(d == 0.0 for d in dropouts):
                raise ValueError(
                    'Dropout must be included in every layer to predict uncertainty'
                )

        # Add the input features.

        mol_features = Feature(shape=(None, n_features))
        prev_layer = mol_features

        # Add the dense layers

        for size, weight_stddev, bias_const, dropout, activation_fn in zip(
                layer_sizes, weight_init_stddevs, bias_init_consts, dropouts,
                activation_fns):
            layer = Dense(in_layers=[prev_layer],
                          out_channels=size,
                          activation_fn=activation_fn,
                          weights_initializer=TFWrapper(
                              tf.truncated_normal_initializer,
                              stddev=weight_stddev),
                          biases_initializer=TFWrapper(tf.constant_initializer,
                                                       value=bias_const))
            if dropout > 0.0:
                layer = Dropout(dropout, in_layers=[layer])
            prev_layer = layer

        # Compute the loss function for each label.

        output = Reshape(shape=(-1, n_tasks, 1),
                         in_layers=[
                             Dense(in_layers=[prev_layer],
                                   out_channels=n_tasks,
                                   weights_initializer=TFWrapper(
                                       tf.truncated_normal_initializer,
                                       stddev=weight_init_stddevs[-1]),
                                   biases_initializer=TFWrapper(
                                       tf.constant_initializer,
                                       value=bias_init_consts[-1]))
                         ])
        self.add_output(output)
        labels = Label(shape=(None, n_tasks, 1))
        weights = Weights(shape=(None, n_tasks, 1))
        if uncertainty:
            log_var = Reshape(
                shape=(-1, n_tasks, 1),
                in_layers=[
                    Dense(in_layers=[prev_layer],
                          out_channels=n_tasks,
                          weights_initializer=TFWrapper(
                              tf.truncated_normal_initializer,
                              stddev=weight_init_stddevs[-1]),
                          biases_initializer=TFWrapper(tf.constant_initializer,
                                                       value=0.0))
                ])
            var = Exp(log_var)
            self.add_variance(var)
            diff = labels - output
            weighted_loss = weights * (diff * diff / var + log_var)
            weighted_loss = ReduceSum(ReduceMean(weighted_loss, axis=[1, 2]))
        else:
            weighted_loss = ReduceSum(
                L2Loss(in_layers=[labels, output, weights]))
        if weight_decay_penalty != 0.0:
            weighted_loss = WeightDecay(weight_decay_penalty,
                                        weight_decay_penalty_type,
                                        in_layers=[weighted_loss])
        self.set_loss(weighted_loss)
Esempio n. 8
0
  def build_graph(self):
    # inputs placeholder
    self.inputs = Feature(
        shape=(None, self.image_size, self.image_size, 3), dtype=tf.uint8)
    # data preprocessing and augmentation
    in_layer = DRAugment(
        self.augment,
        self.batch_size,
        size=(self.image_size, self.image_size),
        in_layers=[self.inputs])
    # first conv layer
    in_layer = Conv2D(
        self.n_init_kernel,
        kernel_size=7,
        activation_fn=None,
        in_layers=[in_layer])
    in_layer = BatchNorm(in_layers=[in_layer])
    in_layer = ReLU(in_layers=[in_layer])

    # downsample by max pooling
    res_in = MaxPool2D(
        ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], in_layers=[in_layer])

    for ct_module in range(self.n_downsample - 1):
      # each module is a residual convolutional block
      # followed by a convolutional downsample layer
      in_layer = Conv2D(
          self.n_init_kernel * 2**(ct_module - 1),
          kernel_size=1,
          activation_fn=None,
          in_layers=[res_in])
      in_layer = BatchNorm(in_layers=[in_layer])
      in_layer = ReLU(in_layers=[in_layer])
      in_layer = Conv2D(
          self.n_init_kernel * 2**(ct_module - 1),
          kernel_size=3,
          activation_fn=None,
          in_layers=[in_layer])
      in_layer = BatchNorm(in_layers=[in_layer])
      in_layer = ReLU(in_layers=[in_layer])
      in_layer = Conv2D(
          self.n_init_kernel * 2**ct_module,
          kernel_size=1,
          activation_fn=None,
          in_layers=[in_layer])
      res_a = BatchNorm(in_layers=[in_layer])

      res_out = res_in + res_a
      res_in = Conv2D(
          self.n_init_kernel * 2**(ct_module + 1),
          kernel_size=3,
          stride=2,
          in_layers=[res_out])
      res_in = BatchNorm(in_layers=[res_in])

    # max pooling over the final outcome
    in_layer = ReduceMax(axis=(1, 2), in_layers=[res_in])

    for layer_size in self.n_fully_connected:
      # fully connected layers
      in_layer = Dense(
          layer_size, activation_fn=tf.nn.relu, in_layers=[in_layer])
      # dropout for dense layers
      #in_layer = Dropout(0.25, in_layers=[in_layer])

    logit_pred = Dense(
        self.n_tasks * self.n_classes, activation_fn=None, in_layers=[in_layer])
    logit_pred = Reshape(
        shape=(None, self.n_tasks, self.n_classes), in_layers=[logit_pred])

    weights = Weights(shape=(None, self.n_tasks))
    labels = Label(shape=(None, self.n_tasks), dtype=tf.int32)

    output = SoftMax(logit_pred)
    self.add_output(output)
    loss = SparseSoftMaxCrossEntropy(in_layers=[labels, logit_pred])
    weighted_loss = WeightedError(in_layers=[loss, weights])

    # weight decay regularizer
    # weighted_loss = WeightDecay(0.1, 'l2', in_layers=[weighted_loss])
    self.set_loss(weighted_loss)