コード例 #1
0
ファイル: test_layers.py プロジェクト: ktaneishi/deepchem
 def test_slice(self):
   """Test that Slice can be invoked."""
   batch_size = 10
   n_features = 5
   test_tensor_input = np.random.rand(batch_size, n_features)
   with self.session() as sess:
     test_tensor = tf.convert_to_tensor(test_tensor_input, dtype=tf.float32)
     out_tensor = Slice(1)(test_tensor)
     out_tensor = out_tensor.eval()
     assert np.allclose(out_tensor, test_tensor_input[:, 1:2])
コード例 #2
0
 def test_slice(self):
     """Test that Slice can be invoked."""
     batch_size = 10
     n_features = 5
     test_tensor_input = np.random.rand(batch_size, n_features)
     with self.session() as sess:
         test_tensor = tf.convert_to_tensor(test_tensor_input,
                                            dtype=tf.float32)
         out_tensor = Slice(1)(test_tensor)
         out_tensor = out_tensor.eval()
         assert np.allclose(out_tensor, test_tensor_input[:, 1:2])
コード例 #3
0
def test_Slice_pickle():
    V = Feature(shape=(None, 10))
    out = Slice(5, 1, in_layers=[V])
    tg = TensorGraph()
    tg.add_output(out)
    tg.set_loss(out)
    tg.build()
    tg.save()
コード例 #4
0
  def __init__(self,
               n_tasks,
               n_features,
               alpha_init_stddevs=0.02,
               layer_sizes=[1000],
               weight_init_stddevs=0.02,
               bias_init_consts=1.0,
               weight_decay_penalty=0.0,
               weight_decay_penalty_type="l2",
               dropouts=0.5,
               activation_fns=tf.nn.relu,
               n_outputs=1,
               **kwargs):
    """Creates a progressive network.
  
    Only listing parameters specific to progressive networks here.

    Parameters
    ----------
    n_tasks: int
      Number of tasks
    n_features: int
      Number of input features
    alpha_init_stddevs: list
      List of standard-deviations for alpha in adapter layers.
    layer_sizes: list
      the size of each dense layer in the network.  The length of this list determines the number of layers.
    weight_init_stddevs: list or float
      the standard deviation of the distribution to use for weight initialization of each layer.  The length
      of this list should equal len(layer_sizes)+1.  The final element corresponds to the output layer.
      Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.
    bias_init_consts: list or float
      the value to initialize the biases in each layer to.  The length of this list should equal len(layer_sizes)+1.
      The final element corresponds to the output layer.  Alternatively this may be a single value instead of a list,
      in which case the same value is used for every layer.
    weight_decay_penalty: float
      the magnitude of the weight decay penalty to use
    weight_decay_penalty_type: str
      the type of penalty to use for weight decay, either 'l1' or 'l2'
    dropouts: list or float
      the dropout probablity to use for each layer.  The length of this list should equal len(layer_sizes).
      Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.
    activation_fns: list or object
      the Tensorflow activation function to apply to each layer.  The length of this list should equal
      len(layer_sizes).  Alternatively this may be a single value instead of a list, in which case the
      same value is used for every layer.
    """

    super(ProgressiveMultitaskRegressor, self).__init__(**kwargs)
    self.n_tasks = n_tasks
    self.n_features = n_features
    self.layer_sizes = layer_sizes
    self.alpha_init_stddevs = alpha_init_stddevs
    self.weight_init_stddevs = weight_init_stddevs
    self.bias_init_consts = bias_init_consts
    self.dropouts = dropouts
    self.activation_fns = activation_fns
    self.n_outputs = n_outputs

    n_layers = len(layer_sizes)
    if not isinstance(weight_init_stddevs, collections.Sequence):
      self.weight_init_stddevs = [weight_init_stddevs] * n_layers
    if not isinstance(alpha_init_stddevs, collections.Sequence):
      self.alpha_init_stddevs = [alpha_init_stddevs] * n_layers
    if not isinstance(bias_init_consts, collections.Sequence):
      self.bias_init_consts = [bias_init_consts] * n_layers
    if not isinstance(dropouts, collections.Sequence):
      self.dropouts = [dropouts] * n_layers
    if not isinstance(activation_fns, collections.Sequence):
      self.activation_fns = [activation_fns] * n_layers

    # Add the input features.
    self.mol_features = Feature(shape=(None, n_features))
    self._task_labels = Label(shape=(None, n_tasks))
    self._task_weights = Weights(shape=(None, n_tasks))

    all_layers = {}
    outputs = []
    for task in range(self.n_tasks):
      task_layers = []
      for i in range(n_layers):
        if i == 0:
          prev_layer = self.mol_features
        else:
          prev_layer = all_layers[(i - 1, task)]
          if task > 0:
            lateral_contrib, trainables = self.add_adapter(all_layers, task, i)
            task_layers.extend(trainables)

        layer = Dense(
            in_layers=[prev_layer],
            out_channels=layer_sizes[i],
            activation_fn=None,
            weights_initializer=TFWrapper(
                tf.truncated_normal_initializer,
                stddev=self.weight_init_stddevs[i]),
            biases_initializer=TFWrapper(
                tf.constant_initializer, value=self.bias_init_consts[i]))
        task_layers.append(layer)

        if i > 0 and task > 0:
          layer = layer + lateral_contrib
        assert self.activation_fns[i] is tf.nn.relu, "Only ReLU is supported"
        layer = ReLU(in_layers=[layer])
        if self.dropouts[i] > 0.0:
          layer = Dropout(self.dropouts[i], in_layers=[layer])
        all_layers[(i, task)] = layer

      prev_layer = all_layers[(n_layers - 1, task)]
      layer = Dense(
          in_layers=[prev_layer],
          out_channels=n_outputs,
          weights_initializer=TFWrapper(
              tf.truncated_normal_initializer,
              stddev=self.weight_init_stddevs[-1]),
          biases_initializer=TFWrapper(
              tf.constant_initializer, value=self.bias_init_consts[-1]))
      task_layers.append(layer)

      if task > 0:
        lateral_contrib, trainables = self.add_adapter(all_layers, task,
                                                       n_layers)
        task_layers.extend(trainables)
        layer = layer + lateral_contrib
      output_layer = self.create_output(layer)
      outputs.append(output_layer)

      label = Slice(task, axis=1, in_layers=[self._task_labels])
      weight = Slice(task, axis=1, in_layers=[self._task_weights])
      task_loss = self.create_loss(layer, label, weight)
      self.create_submodel(layers=task_layers, loss=task_loss, optimizer=None)

    outputs = Stack(axis=1, in_layers=outputs)
    self.add_output(outputs)

    # Weight decay not activated
    """