예제 #1
0
파일: mcts.py 프로젝트: AhlamMD/deepchem
 def _build_graph(self, tf_graph, scope, model_dir):
   """Construct a TensorGraph containing the policy and loss calculations."""
   state_shape = self._env.state_shape
   state_dtype = self._env.state_dtype
   if not self._state_is_list:
     state_shape = [state_shape]
     state_dtype = [state_dtype]
   features = []
   for s, d in zip(state_shape, state_dtype):
     features.append(Feature(shape=[None] + list(s), dtype=tf.as_dtype(d)))
   policy_layers = self._policy.create_layers(features)
   action_prob = policy_layers['action_prob']
   value = policy_layers['value']
   search_prob = Label(shape=(None, self._env.n_actions))
   search_value = Label(shape=(None,))
   loss = MCTSLoss(
       self.value_weight,
       in_layers=[action_prob, value, search_prob, search_value])
   graph = TensorGraph(
       batch_size=self.max_search_depth,
       use_queue=False,
       graph=tf_graph,
       model_dir=model_dir)
   for f in features:
     graph._add_layer(f)
   graph.add_output(action_prob)
   graph.add_output(value)
   graph.set_loss(loss)
   graph.set_optimizer(self._optimizer)
   with graph._get_tf("Graph").as_default():
     with tf.variable_scope(scope):
       graph.build()
   if len(graph.rnn_initial_states) > 0:
     raise ValueError('MCTS does not support policies with recurrent layers')
   return graph, features, action_prob, value, search_prob, search_value
예제 #2
0
def test_Conv1D_pickle():
  tg = TensorGraph()
  feature = Feature(shape=(tg.batch_size, 1, 1))
  conv = Conv1D(2, 1, in_layers=feature)
  tg.add_output(conv)
  tg.set_loss(conv)
  tg.build()
  tg.save()
예제 #3
0
def test_LSTM_pickle():
  tg = TensorGraph()
  feature = Feature(shape=(tg.batch_size, 10, 10))
  layer = LSTM(n_hidden=10, batch_size=tg.batch_size, in_layers=feature)
  tg.add_output(layer)
  tg.set_loss(layer)
  tg.build()
  tg.save()
예제 #4
0
def test_BatchNorm_pickle():
  tg = TensorGraph()
  feature = Feature(shape=(tg.batch_size, 10))
  layer = BatchNorm(in_layers=feature)
  tg.add_output(layer)
  tg.set_loss(layer)
  tg.build()
  tg.save()
예제 #5
0
def test_CombineMeanStd_pickle():
  tg = TensorGraph()
  feature = Feature(shape=(tg.batch_size, 1))
  layer = CombineMeanStd(in_layers=[feature, feature])
  tg.add_output(layer)
  tg.set_loss(layer)
  tg.build()
  tg.save()
예제 #6
0
def test_Dense_pickle():
  tg = TensorGraph()
  feature = Feature(shape=(tg.batch_size, 1))
  dense = Dense(out_channels=1, in_layers=feature)
  tg.add_output(dense)
  tg.set_loss(dense)
  tg.build()
  tg.save()
예제 #7
0
def test_Exp_pickle():
  tg = TensorGraph()
  feature = Feature(shape=(tg.batch_size, 1))
  layer = Exp(feature)
  tg.add_output(layer)
  tg.set_loss(layer)
  tg.build()
  tg.save()
예제 #8
0
def test_DTNNEmbedding_pickle():
  tg = TensorGraph()
  atom_numbers = Feature(shape=(None, 23), dtype=tf.int32)
  Embedding = DTNNEmbedding(in_layers=[atom_numbers])
  tg.add_output(Embedding)
  tg.set_loss(Embedding)
  tg.build()
  tg.save()
예제 #9
0
def test_Squeeze_pickle():
  tg = TensorGraph()
  feature = Feature(shape=(tg.batch_size, 1))
  layer = Squeeze(in_layers=feature)
  tg.add_output(layer)
  tg.set_loss(layer)
  tg.build()
  tg.save()
예제 #10
0
def test_Transpose_pickle():
  tg = TensorGraph()
  feature = Feature(shape=(tg.batch_size, 1))
  layer = Transpose(perm=(1, 0), in_layers=feature)
  tg.add_output(layer)
  tg.set_loss(layer)
  tg.build()
  tg.save()
예제 #11
0
def test_Reshape_pickle():
  tg = TensorGraph()
  feature = Feature(shape=(tg.batch_size, 1))
  layer = Reshape(shape=(None, 2), in_layers=feature)
  tg.add_output(layer)
  tg.set_loss(layer)
  tg.build()
  tg.save()
예제 #12
0
def test_StopGradient_pickle():
  tg = TensorGraph()
  feature = Feature(shape=(tg.batch_size, 1))
  output = StopGradient(feature)
  tg.add_output(output)
  tg.set_loss(output)
  tg.build()
  tg.save()
예제 #13
0
def test_Repeat_pickle():
  tg = TensorGraph()
  feature = Feature(shape=(tg.batch_size, 1))
  layer = Repeat(n_times=10, in_layers=feature)
  tg.add_output(layer)
  tg.set_loss(layer)
  tg.build()
  tg.save()
예제 #14
0
def test_WeightedError_pickle():
  tg = TensorGraph()
  feature = Feature(shape=(tg.batch_size, 10))
  layer = WeightedError(in_layers=[feature, feature])
  tg.add_output(layer)
  tg.set_loss(layer)
  tg.build()
  tg.save()
예제 #15
0
def test_Conv3DTranspose_pickle():
  tg = TensorGraph()
  feature = Feature(shape=(tg.batch_size, 10, 10, 10, 1))
  layer = Conv3DTranspose(num_outputs=3, in_layers=feature)
  tg.add_output(layer)
  tg.set_loss(layer)
  tg.build()
  tg.save()
예제 #16
0
def test_ReduceSquareDifference_pickle():
  tg = TensorGraph()
  feature = Feature(shape=(tg.batch_size, 1))
  layer = ReduceSquareDifference(in_layers=[feature, feature])
  tg.add_output(layer)
  tg.set_loss(layer)
  tg.build()
  tg.save()
예제 #17
0
def test_ToFloat_pickle():
  tg = TensorGraph()
  feature = Feature(shape=(tg.batch_size, 1))
  layer = ToFloat(in_layers=[feature])
  tg.add_output(layer)
  tg.set_loss(layer)
  tg.build()
  tg.save()
예제 #18
0
def test_SigmoidCrossEntropy_pickle():
  tg = TensorGraph()
  feature = Feature(shape=(tg.batch_size, 1))
  layer = SigmoidCrossEntropy(in_layers=[feature, feature])
  tg.add_output(layer)
  tg.set_loss(layer)
  tg.build()
  tg.save()
예제 #19
0
def test_DTNNExtract_pickle():
  tg = TensorGraph()
  atom_features = Feature(shape=(None, 30))
  Ext = DTNNExtract(0, in_layers=[atom_features])
  tg.add_output(Ext)
  tg.set_loss(Ext)
  tg.build()
  tg.save()
예제 #20
0
def test_hingeloss_pickle():
  tg = TensorGraph()
  feature = Feature(shape=(1, None))
  layer = HingeLoss(in_layers=[feature, feature])
  tg.add_output(layer)
  tg.set_loss(layer)
  tg.build()
  tg.save()
예제 #21
0
def test_Slice_pickle():
  V = Feature(shape=(None, 10))
  out = Slice(5, 1, in_layers=[V])
  tg = TensorGraph()
  tg.add_output(out)
  tg.set_loss(out)
  tg.build()
  tg.save()
예제 #22
0
def test_Cast_pickle():
  tg = TensorGraph()
  feature = Feature(shape=(tg.batch_size, 1))
  layer = Cast(in_layers=feature, dtype=tf.int32)
  tg.add_output(layer)
  tg.set_loss(layer)
  tg.build()
  tg.save()
예제 #23
0
def test_Gather_pickle():
  tg = TensorGraph()
  feature = Feature(shape=(tg.batch_size, 1))
  layer = Gather(indices=[[0], [2], [3]], in_layers=feature)
  tg.add_output(layer)
  tg.set_loss(layer)
  tg.build()
  tg.save()
예제 #24
0
def test_DAGGather_pickle():
  tg = TensorGraph()
  atom_features = Feature(shape=(None, 30))
  membership = Feature(shape=(None,), dtype=tf.int32)
  Gather = DAGGather(in_layers=[atom_features, membership])
  tg.add_output(Gather)
  tg.set_loss(Gather)
  tg.build()
  tg.save()
예제 #25
0
def testGraphCNNPoolLayer_pickle():
  V = Feature(shape=(None, 200, 50))
  A = Feature(shape=(None, 200, 1, 200))
  gcnnpool = GraphEmbedPoolLayer(32, in_layers=[V, A])
  tg = TensorGraph()
  tg.add_output(gcnnpool)
  tg.set_loss(gcnnpool)
  tg.build()
  tg.save()
예제 #26
0
def test_SetGather_pickle():
  tg = TensorGraph()
  atom_feature = Feature(shape=(None, 100))
  atom_split = Feature(shape=(None,), dtype=tf.int32)
  Gather = SetGather(5, 16, in_layers=[atom_feature, atom_split])
  tg.add_output(Gather)
  tg.set_loss(Gather)
  tg.build()
  tg.save()
예제 #27
0
def test_Constant_pickle():
  tg = TensorGraph()
  feature = Feature(shape=(tg.batch_size, 1))
  layer = Constant(np.array([15.0]))
  output = Add(in_layers=[feature, layer])
  tg.add_output(output)
  tg.set_loss(output)
  tg.build()
  tg.save()
예제 #28
0
def test_Variable_pickle():
  tg = TensorGraph()
  feature = Feature(shape=(tg.batch_size, 1))
  layer = Variable(np.array([15.0]))
  output = Multiply(in_layers=[feature, layer])
  tg.add_output(output)
  tg.set_loss(output)
  tg.build()
  tg.save()
예제 #29
0
def test_SparseSoftmaxCrossEntropy_pickle():
  tg = TensorGraph()
  logits = Feature(shape=(tg.batch_size, 5))
  labels = Feature(shape=(tg.batch_size,), dtype=tf.int32)
  layer = SparseSoftMaxCrossEntropy(in_layers=[labels, logits])
  tg.add_output(layer)
  tg.set_loss(layer)
  tg.build()
  tg.save()
예제 #30
0
def testGraphCNN_pickle():
  V = Feature(shape=(None, 200, 50))
  A = Feature(shape=(None, 200, 1, 200))
  gcnn = GraphCNN(32, in_layers=[V, A])
  tg = TensorGraph()
  tg.add_output(gcnn)
  tg.set_loss(gcnn)
  tg.build()
  tg.save()
예제 #31
0
def test_LSTMStep_pickle():
  """Tests that LSTMStep can be pickled."""
  n_feat = 10
  tg = TensorGraph()
  y = Feature(shape=(None, 2 * n_feat))
  state_zero = Feature(shape=(None, n_feat))
  state_one = Feature(shape=(None, n_feat))
  lstm = LSTMStep(n_feat, 2 * n_feat, in_layers=[y, state_zero, state_one])
  tg.add_output(lstm)
  tg.set_loss(lstm)
  tg.build()
  tg.save()
예제 #32
0
def test_Weave_pickle():
  tg = TensorGraph()
  atom_feature = Feature(shape=(None, 75))
  pair_feature = Feature(shape=(None, 14))
  pair_split = Feature(shape=(None,), dtype=tf.int32)
  atom_to_pair = Feature(shape=(None, 2), dtype=tf.int32)
  weave = WeaveLayer(
      in_layers=[atom_feature, pair_feature, pair_split, atom_to_pair])
  tg.add_output(weave)
  tg.set_loss(weave)
  tg.build()
  tg.save()
예제 #33
0
def test_IRVLayer_pickle():
    n_tasks = 10
    K = 10
    V = Feature(shape=(None, 200))
    irv_layer = IRVLayer(n_tasks, K, in_layers=[V])
    irv_reg = IRVRegularize(irv_layer, in_layers=[irv_layer])
    tg = TensorGraph()
    tg.add_output(irv_layer)
    tg.add_output(irv_reg)
    tg.set_loss(irv_reg)
    tg.build()
    tg.save()
예제 #34
0
def test_AtomicDifferentialDense_pickle():
    max_atoms = 23
    atom_features = 100
    tg = TensorGraph()
    atom_feature = Feature(shape=(None, max_atoms, atom_features))
    atom_numbers = Feature(shape=(None, max_atoms))
    atomic_differential_dense = AtomicDifferentiatedDense(
        max_atoms=23, out_channels=5, in_layers=[atom_feature, atom_numbers])
    tg.add_output(atomic_differential_dense)
    tg.set_loss(atomic_differential_dense)
    tg.build()
    tg.save()
예제 #35
0
def test_DTNNStep_pickle():
  tg = TensorGraph()
  atom_features = Feature(shape=(None, 30))
  distance = Feature(shape=(None, 100))
  distance_membership_i = Feature(shape=(None,), dtype=tf.int32)
  distance_membership_j = Feature(shape=(None,), dtype=tf.int32)
  DTNN = DTNNStep(in_layers=[
      atom_features, distance, distance_membership_i, distance_membership_j
  ])
  tg.add_output(DTNN)
  tg.set_loss(DTNN)
  tg.build()
  tg.save()
예제 #36
0
def test_DAGLayer_pickle():
  tg = TensorGraph(use_queue=False)
  atom_features = Feature(shape=(None, 75))
  parents = Feature(shape=(None, 50, 50), dtype=tf.int32)
  calculation_orders = Feature(shape=(None, 50), dtype=tf.int32)
  calculation_masks = Feature(shape=(None, 50), dtype=tf.bool)
  n_atoms = Feature(shape=(), dtype=tf.int32)
  DAG = DAGLayer(in_layers=[
      atom_features, parents, calculation_orders, calculation_masks, n_atoms
  ])
  tg.add_output(DAG)
  tg.set_loss(DAG)
  tg.build()
  tg.save()
예제 #37
0
def test_IterRefLSTM_pickle():
  """Tests that IterRefLSTM can be pickled."""
  n_feat = 10
  max_depth = 5
  n_test = 5
  n_support = 5
  tg = TensorGraph()
  test = Feature(shape=(None, n_feat))
  support = Feature(shape=(None, n_feat))
  lstm = IterRefLSTMEmbedding(
      n_test, n_support, n_feat, max_depth, in_layers=[test, support])
  tg.add_output(lstm)
  tg.set_loss(lstm)
  tg.build()
  tg.save()
예제 #38
0
def test_AttnLSTM_pickle():
  """Tests that AttnLSTM can be pickled."""
  max_depth = 5
  n_test = 5
  n_support = 5
  n_feat = 10

  tg = TensorGraph(batch_size=n_test)
  test = Feature(shape=(None, n_feat))
  support = Feature(shape=(None, n_feat))
  out = AttnLSTMEmbedding(
      n_test, n_support, n_feat, max_depth, in_layers=[test, support])
  tg.add_output(out)
  tg.set_loss(out)
  tg.build()
  tg.save()
예제 #39
0
def test_GraphConv_pickle():
  tg = TensorGraph()
  atom_features = Feature(shape=(None, 75))
  degree_slice = Feature(shape=(None, 2), dtype=tf.int32)
  membership = Feature(shape=(None,), dtype=tf.int32)

  deg_adjs = []
  for i in range(0, 10 + 1):
    deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32)
    deg_adjs.append(deg_adj)
  layer = GraphConv(
      64,
      activation_fn=tf.nn.relu,
      in_layers=[atom_features, degree_slice, membership] + deg_adjs)
  tg.add_output(layer)
  tg.set_loss(layer)
  tg.build()
  tg.save()
예제 #40
0
 def _build_graph(self, tf_graph, scope, model_dir):
     """Construct a TensorGraph containing the policy and loss calculations."""
     state_shape = self._env.state_shape
     state_dtype = self._env.state_dtype
     if not self._state_is_list:
         state_shape = [state_shape]
         state_dtype = [state_dtype]
     features = []
     for s, d in zip(state_shape, state_dtype):
         features.append(
             Feature(shape=[None] + list(s), dtype=tf.as_dtype(d)))
     policy_layers = self._policy.create_layers(features)
     value = policy_layers['value']
     rewards = Weights(shape=(None, ))
     advantages = Weights(shape=(None, ))
     graph = TensorGraph(batch_size=self.max_rollout_length,
                         use_queue=False,
                         graph=tf_graph,
                         model_dir=model_dir)
     for f in features:
         graph._add_layer(f)
     if 'action_prob' in policy_layers:
         self.continuous = False
         action_prob = policy_layers['action_prob']
         actions = Label(shape=(None, self._env.n_actions))
         loss = A3CLossDiscrete(
             self.value_weight,
             self.entropy_weight,
             in_layers=[rewards, actions, action_prob, value, advantages])
         graph.add_output(action_prob)
     else:
         self.continuous = True
         action_mean = policy_layers['action_mean']
         action_std = policy_layers['action_std']
         actions = Label(shape=[None] + list(self._env.action_shape))
         loss = A3CLossContinuous(self.value_weight,
                                  self.entropy_weight,
                                  in_layers=[
                                      rewards, actions, action_mean,
                                      action_std, value, advantages
                                  ])
         graph.add_output(action_mean)
         graph.add_output(action_std)
     graph.add_output(value)
     graph.set_loss(loss)
     graph.set_optimizer(self._optimizer)
     with graph._get_tf("Graph").as_default():
         with tf.variable_scope(scope):
             graph.build()
     if self.continuous:
         return graph, features, rewards, actions, action_mean, action_std, value, advantages
     else:
         return graph, features, rewards, actions, action_prob, value, advantages
예제 #41
0
def testInteratomicL2Distances():
    """
    TODO(LESWING) what is ndim here?
    :return:
    """
    tg = TensorGraph()
    n_atoms = tg.batch_size
    M_nbrs = 4
    n_dim = 3
    feature = Feature(shape=(tg.batch_size, 3))
    neighbors = Feature(shape=(tg.batch_size, M_nbrs), dtype=tf.int32)
    layer = InteratomicL2Distances(N_atoms=n_atoms,
                                   M_nbrs=M_nbrs,
                                   ndim=n_dim,
                                   in_layers=[feature, neighbors])
    tg.add_output(layer)
    tg.set_loss(layer)
    tg.build()
    tg.save()
예제 #42
0
 def _build_graph(self, tf_graph, scope, model_dir):
   """Construct a TensorGraph containing the policy and loss calculations."""
   state_shape = self._env.state_shape
   state_dtype = self._env.state_dtype
   if not self._state_is_list:
     state_shape = [state_shape]
     state_dtype = [state_dtype]
   features = []
   for s, d in zip(state_shape, state_dtype):
     features.append(Feature(shape=[None] + list(s), dtype=tf.as_dtype(d)))
   policy_layers = self._policy.create_layers(features)
   action_prob = policy_layers['action_prob']
   value = policy_layers['value']
   rewards = Weights(shape=(None,))
   advantages = Weights(shape=(None,))
   old_action_prob = Weights(shape=(None,))
   actions = Label(shape=(None, self._env.n_actions))
   loss = PPOLoss(
       self.value_weight,
       self.entropy_weight,
       self.clipping_width,
       in_layers=[
           rewards, actions, action_prob, value, advantages, old_action_prob
       ])
   graph = TensorGraph(
       batch_size=self.max_rollout_length,
       use_queue=False,
       graph=tf_graph,
       model_dir=model_dir)
   for f in features:
     graph._add_layer(f)
   graph.add_output(action_prob)
   graph.add_output(value)
   graph.set_loss(loss)
   graph.set_optimizer(self._optimizer)
   with graph._get_tf("Graph").as_default():
     with tf.variable_scope(scope):
       graph.build()
   assert len(loss.components) > 0
   return graph, features, rewards, actions, action_prob, value, advantages, old_action_prob, loss.components
예제 #43
0
 def _build_graph(self, tf_graph, scope, model_dir):
     """Construct a TensorGraph containing the policy and loss calculations."""
     state_shape = self._env.state_shape
     state_dtype = self._env.state_dtype
     if not self._state_is_list:
         state_shape = [state_shape]
         state_dtype = [state_dtype]
     features = []
     for s, d in zip(state_shape, state_dtype):
         features.append(
             Feature(shape=[None] + list(s), dtype=tf.as_dtype(d)))
     policy_layers = self._policy.create_layers(features)
     action_prob = policy_layers['action_prob']
     value = policy_layers['value']
     search_prob = Label(shape=(None, self._env.n_actions))
     search_value = Label(shape=(None, ))
     loss = MCTSLoss(
         self.value_weight,
         in_layers=[action_prob, value, search_prob, search_value])
     graph = TensorGraph(batch_size=self.max_search_depth,
                         use_queue=False,
                         graph=tf_graph,
                         model_dir=model_dir)
     for f in features:
         graph._add_layer(f)
     graph.add_output(action_prob)
     graph.add_output(value)
     graph.set_loss(loss)
     graph.set_optimizer(self._optimizer)
     with graph._get_tf("Graph").as_default():
         with tf.variable_scope(scope):
             graph.build()
     if len(graph.rnn_initial_states) > 0:
         raise ValueError(
             'MCTS does not support policies with recurrent layers')
     return graph, features, action_prob, value, search_prob, search_value
예제 #44
0
def atomic_conv_model(frag1_num_atoms=70,
                      frag2_num_atoms=634,
                      complex_num_atoms=701,
                      max_num_neighbors=12,
                      batch_size=24,
                      at=[
                          6, 7., 8., 9., 11., 12., 15., 16., 17., 20., 25.,
                          30., 35., 53., -1.
                      ],
                      radial=[[
                          1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0,
                          6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5, 10.0, 10.5, 11.0,
                          11.5, 12.0
                      ], [0.0, 4.0, 8.0], [0.4]],
                      layer_sizes=[32, 32, 16],
                      learning_rate=0.001):
    rp = [x for x in itertools.product(*radial)]
    frag1_X = Feature(shape=(batch_size, frag1_num_atoms, 3))
    frag1_nbrs = Feature(shape=(batch_size, frag1_num_atoms,
                                max_num_neighbors))
    frag1_nbrs_z = Feature(shape=(batch_size, frag1_num_atoms,
                                  max_num_neighbors))
    frag1_z = Feature(shape=(batch_size, frag1_num_atoms))

    frag2_X = Feature(shape=(batch_size, frag2_num_atoms, 3))
    frag2_nbrs = Feature(shape=(batch_size, frag2_num_atoms,
                                max_num_neighbors))
    frag2_nbrs_z = Feature(shape=(batch_size, frag2_num_atoms,
                                  max_num_neighbors))
    frag2_z = Feature(shape=(batch_size, frag2_num_atoms))

    complex_X = Feature(shape=(batch_size, complex_num_atoms, 3))
    complex_nbrs = Feature(shape=(batch_size, complex_num_atoms,
                                  max_num_neighbors))
    complex_nbrs_z = Feature(shape=(batch_size, complex_num_atoms,
                                    max_num_neighbors))
    complex_z = Feature(shape=(batch_size, complex_num_atoms))

    frag1_conv = AtomicConvolution(
        atom_types=at,
        radial_params=rp,
        boxsize=None,
        in_layers=[frag1_X, frag1_nbrs, frag1_nbrs_z])

    frag2_conv = AtomicConvolution(
        atom_types=at,
        radial_params=rp,
        boxsize=None,
        in_layers=[frag2_X, frag2_nbrs, frag2_nbrs_z])

    complex_conv = AtomicConvolution(
        atom_types=at,
        radial_params=rp,
        boxsize=None,
        in_layers=[complex_X, complex_nbrs, complex_nbrs_z])

    score = AtomicConvScore(at,
                            layer_sizes,
                            in_layers=[
                                frag1_conv, frag2_conv, complex_conv, frag1_z,
                                frag2_z, complex_z
                            ])

    label = Label(shape=(None, 1))
    loss = ReduceMean(in_layers=L2Loss(in_layers=[score, label]))

    def feed_dict_generator(dataset, batch_size, epochs=1, pad_batches=True):
        def replace_atom_types(z):
            def place_holder(i):
                if i in at:
                    return i
                return -1

            return np.array([place_holder(x) for x in z])

        for epoch in range(epochs):
            for ind, (F_b, y_b, w_b, ids_b) in enumerate(
                    dataset.iterbatches(batch_size,
                                        deterministic=True,
                                        pad_batches=pad_batches)):
                N = complex_num_atoms
                N_1 = frag1_num_atoms
                N_2 = frag2_num_atoms
                M = max_num_neighbors

                orig_dict = {}
                batch_size = F_b.shape[0]
                num_features = F_b[0][0].shape[1]
                frag1_X_b = np.zeros((batch_size, N_1, num_features))
                for i in range(batch_size):
                    frag1_X_b[i] = F_b[i][0]
                orig_dict[frag1_X] = frag1_X_b

                frag2_X_b = np.zeros((batch_size, N_2, num_features))
                for i in range(batch_size):
                    frag2_X_b[i] = F_b[i][3]
                orig_dict[frag2_X] = frag2_X_b

                complex_X_b = np.zeros((batch_size, N, num_features))
                for i in range(batch_size):
                    complex_X_b[i] = F_b[i][6]
                orig_dict[complex_X] = complex_X_b

                frag1_Nbrs = np.zeros((batch_size, N_1, M))
                frag1_Z_b = np.zeros((batch_size, N_1))
                for i in range(batch_size):
                    z = replace_atom_types(F_b[i][2])
                    frag1_Z_b[i] = z
                frag1_Nbrs_Z = np.zeros((batch_size, N_1, M))
                for atom in range(N_1):
                    for i in range(batch_size):
                        atom_nbrs = F_b[i][1].get(atom, "")
                        frag1_Nbrs[i,
                                   atom, :len(atom_nbrs)] = np.array(atom_nbrs)
                        for j, atom_j in enumerate(atom_nbrs):
                            frag1_Nbrs_Z[i, atom, j] = frag1_Z_b[i, atom_j]
                orig_dict[frag1_nbrs] = frag1_Nbrs
                orig_dict[frag1_nbrs_z] = frag1_Nbrs_Z
                orig_dict[frag1_z] = frag1_Z_b

                frag2_Nbrs = np.zeros((batch_size, N_2, M))
                frag2_Z_b = np.zeros((batch_size, N_2))
                for i in range(batch_size):
                    z = replace_atom_types(F_b[i][5])
                    frag2_Z_b[i] = z
                frag2_Nbrs_Z = np.zeros((batch_size, N_2, M))
                for atom in range(N_2):
                    for i in range(batch_size):
                        atom_nbrs = F_b[i][4].get(atom, "")
                        frag2_Nbrs[i,
                                   atom, :len(atom_nbrs)] = np.array(atom_nbrs)
                        for j, atom_j in enumerate(atom_nbrs):
                            frag2_Nbrs_Z[i, atom, j] = frag2_Z_b[i, atom_j]
                orig_dict[frag2_nbrs] = frag2_Nbrs
                orig_dict[frag2_nbrs_z] = frag2_Nbrs_Z
                orig_dict[frag2_z] = frag2_Z_b

                complex_Nbrs = np.zeros((batch_size, N, M))
                complex_Z_b = np.zeros((batch_size, N))
                for i in range(batch_size):
                    z = replace_atom_types(F_b[i][8])
                    complex_Z_b[i] = z
                complex_Nbrs_Z = np.zeros((batch_size, N, M))
                for atom in range(N):
                    for i in range(batch_size):
                        atom_nbrs = F_b[i][7].get(atom, "")
                        complex_Nbrs[i, atom, :len(atom_nbrs)] = np.array(
                            atom_nbrs)
                        for j, atom_j in enumerate(atom_nbrs):
                            complex_Nbrs_Z[i, atom, j] = complex_Z_b[i, atom_j]

                orig_dict[complex_nbrs] = complex_Nbrs
                orig_dict[complex_nbrs_z] = complex_Nbrs_Z
                orig_dict[complex_z] = complex_Z_b
                orig_dict[label] = np.reshape(y_b, newshape=(batch_size, 1))
                yield orig_dict

    tg = TensorGraph(batch_size=batch_size,
                     mode=str("regression"),
                     model_dir=str("/tmp/atom_conv"),
                     learning_rate=learning_rate)
    tg.add_output(score)
    tg.set_loss(loss)
    return tg, feed_dict_generator, label