def test_to_float(self): """Test that ToFloat can be invoked.""" batch_size = 10 n_features = 5 in_tensor = np.random.rand(batch_size, n_features) with self.session() as sess: in_tensor = tf.convert_to_tensor(in_tensor, dtype=tf.float32) out_tensor = ToFloat()(in_tensor) out_tensor = out_tensor.eval() assert out_tensor.shape == (batch_size, n_features)
def test_neighbor_list_vina(self): """Test under conditions closer to Vina usage.""" N_atoms = 5 M_nbrs = 2 ndim = 3 start = 0 stop = 4 nbr_cutoff = 1 X = NumpyDataset(start + np.random.rand(N_atoms, ndim) * (stop - start)) coords = Feature(shape=(N_atoms, ndim)) # Now an (N, M) shape nbr_list = NeighborList( N_atoms, M_nbrs, ndim, nbr_cutoff, start, stop, in_layers=[coords]) nbr_list = ToFloat(in_layers=[nbr_list]) flattened = Flatten(in_layers=[nbr_list]) dense = Dense(out_channels=1, in_layers=[flattened]) output = ReduceSum(in_layers=[dense]) tg = dc.models.TensorGraph(learning_rate=0.1, use_queue=False) tg.set_loss(output) databag = Databag({coords: X}) tg.fit_generator(databag.iterbatches(epochs=1))
def test_neighbor_list_simple(self): """Test that neighbor lists can be constructed.""" N_atoms = 10 start = 0 stop = 12 nbr_cutoff = 3 ndim = 3 M = 6 X = np.random.rand(N_atoms, ndim) y = np.random.rand(N_atoms, 1) dataset = NumpyDataset(X, y) features = Feature(shape=(N_atoms, ndim)) labels = Label(shape=(N_atoms, )) nbr_list = NeighborList(N_atoms, M, ndim, nbr_cutoff, start, stop, in_layers=[features]) nbr_list = ToFloat(in_layers=[nbr_list]) # This isn't a meaningful loss, but just for test loss = ReduceSum(in_layers=[nbr_list]) tg = dc.models.TensorGraph(use_queue=False) tg.add_output(nbr_list) tg.set_loss(loss) tg.build()
def test_ToFloat_pickle(): tg = TensorGraph() feature = Feature(shape=(tg.batch_size, 1)) layer = ToFloat(in_layers=[feature]) tg.add_output(layer) tg.set_loss(layer) tg.build() tg.save()