def _build_graph(self, tf_graph, scope, model_dir): """Construct a TensorGraph containing the policy and loss calculations.""" state_shape = self._env.state_shape state_dtype = self._env.state_dtype if not self._state_is_list: state_shape = [state_shape] state_dtype = [state_dtype] features = [] for s, d in zip(state_shape, state_dtype): features.append(Feature(shape=[None] + list(s), dtype=tf.as_dtype(d))) policy_layers = self._policy.create_layers(features) action_prob = policy_layers['action_prob'] value = policy_layers['value'] rewards = Weights(shape=(None,)) advantages = Weights(shape=(None,)) actions = Label(shape=(None, self._env.n_actions)) loss = A3CLoss( self.value_weight, self.entropy_weight, in_layers=[rewards, actions, action_prob, value, advantages]) graph = TensorGraph( batch_size=self.max_rollout_length, use_queue=False, graph=tf_graph, model_dir=model_dir) for f in features: graph._add_layer(f) graph.add_output(action_prob) graph.add_output(value) graph.set_loss(loss) graph.set_optimizer(self._optimizer) with graph._get_tf("Graph").as_default(): with tf.variable_scope(scope): graph.build() return graph, features, rewards, actions, action_prob, value, advantages
def build_graph(self): # Layer 1 gc1_input = [self.atom_features, self.indexing, self.membership] + self.deg_adj_list gc1 = GraphConv(64, activation_fn=tf.nn.relu, in_layers=gc1_input) bn1 = BatchNorm(in_layers=[gc1]) gp1_input = [bn1, self.indexing, self.membership] + self.deg_adj_list gp1 = GraphPool(in_layers=gp1_input) # Layer 2 gc2_input = [gp1, self.indexing, self.membership] + self.deg_adj_list gc2 = GraphConv(64, activation_fn=tf.nn.relu, in_layers=gc2_input) bn2 = BatchNorm(in_layers=[gc2]) gp2_input = [bn2, self.indexing, self.membership] + self.deg_adj_list gp2 = GraphPool(in_layers=gp2_input) # Dense layer 1 d1 = Dense(out_channels=128, activation_fn=tf.nn.relu, in_layers=[gp2]) bn3 = BatchNorm(in_layers=[d1]) # Graph gather layer gg1_input = [bn3, self.indexing, self.membership] + self.deg_adj_list gg1 = GraphGather(batch_size=self.batch_size, activation=tf.nn.tanh, in_layers=gg1_input) # Output dense layer d2 = Dense(out_channels=2, activation_fn=None, in_layers=[gg1]) softmax = SoftMax(in_layers=[d2]) self.tg.add_output(softmax) # Set loss function self.label = Label(shape=(None, 2)) cost = SoftMaxCrossEntropy(in_layers=[self.label, d2]) self.weight = Weights(shape=(None, 1)) loss = WeightedError(in_layers=[cost, self.weight]) self.tg.set_loss(loss)
def build_graph(self): """Constructs the graph architecture of IRV as described in: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2750043/ """ self.mol_features = Feature(shape=(None, self.n_features)) self._labels = Label(shape=(None, self.n_tasks)) self._weights = Weights(shape=(None, self.n_tasks)) predictions = IRVLayer(self.n_tasks, self.K, in_layers=[self.mol_features]) costs = [] outputs = [] for task in range(self.n_tasks): task_output = Slice(task, 1, in_layers=[predictions]) sigmoid = Sigmoid(in_layers=[task_output]) outputs.append(sigmoid) label = Slice(task, axis=1, in_layers=[self._labels]) cost = SigmoidCrossEntropy(in_layers=[label, task_output]) costs.append(cost) all_cost = Concat(in_layers=costs, axis=1) loss = WeightedError(in_layers=[all_cost, self._weights]) + \ IRVRegularize(predictions, self.penalty, in_layers=[predictions]) self.set_loss(loss) outputs = Stack(axis=1, in_layers=outputs) outputs = Concat(axis=2, in_layers=[1 - outputs, outputs]) self.add_output(outputs)
def build_graph(self): """ Building graph structures: """ self.m1_features = Feature(shape=(None, self.n_features)) self.m2_features = Feature(shape=(None, self.n_features)) prev_layer1 = self.m1_features prev_layer2 = self.m2_features for layer_size in self.layer_sizes: prev_layer1 = Dense( out_channels=layer_size, in_layers=[prev_layer1], activation_fn=tf.nn.relu) prev_layer2 = prev_layer1.shared([prev_layer2]) if self.dropout > 0.0: prev_layer1 = Dropout(self.dropout, in_layers=prev_layer1) prev_layer2 = Dropout(self.dropout, in_layers=prev_layer2) readout_m1 = Dense( out_channels=1, in_layers=[prev_layer1], activation_fn=None) readout_m2 = readout_m1.shared([prev_layer2]) self.add_output(Sigmoid(readout_m1) * 4 + 1) self.add_output(Sigmoid(readout_m2) * 4 + 1) self.difference = readout_m1 - readout_m2 label = Label(shape=(None, 1)) loss = HingeLoss(in_layers=[label, self.difference]) self.my_task_weights = Weights(shape=(None, 1)) loss = WeightedError(in_layers=[loss, self.my_task_weights]) self.set_loss(loss)
def build_graph(self): self.atom_flags = Feature(shape=(None, self.max_atoms, self.max_atoms)) self.atom_feats = Feature(shape=(None, self.max_atoms, self.n_feat)) previous_layer = self.atom_feats Hiddens = [] for n_hidden in self.layer_structures: Hidden = Dense( out_channels=n_hidden, activation_fn=tf.nn.tanh, in_layers=[previous_layer]) Hiddens.append(Hidden) previous_layer = Hiddens[-1] costs = [] self.labels_fd = [] for task in range(self.n_tasks): regression = Dense( out_channels=1, activation_fn=None, in_layers=[Hiddens[-1]]) output = BPGather(self.max_atoms, in_layers=[regression, self.atom_flags]) self.add_output(output) label = Label(shape=(None, 1)) self.labels_fd.append(label) cost = L2Loss(in_layers=[label, output]) costs.append(cost) all_cost = Stack(in_layers=costs, axis=1) self.weights = Weights(shape=(None, self.n_tasks)) loss = WeightedError(in_layers=[all_cost, self.weights]) self.set_loss(loss)
def _build_graph(self): self.atom_flags = Feature(shape=(None, self.max_atoms * self.max_atoms)) self.atom_feats = Feature(shape=(None, self.max_atoms * self.n_feat)) reshaped_atom_feats = Reshape(in_layers=[self.atom_feats], shape=(-1, self.max_atoms, self.n_feat)) reshaped_atom_flags = Reshape(in_layers=[self.atom_flags], shape=(-1, self.max_atoms, self.max_atoms)) previous_layer = reshaped_atom_feats Hiddens = [] for n_hidden in self.layer_structures: Hidden = Dense(out_channels=n_hidden, activation_fn=tf.nn.tanh, in_layers=[previous_layer]) Hiddens.append(Hidden) previous_layer = Hiddens[-1] regression = Dense(out_channels=1 * self.n_tasks, activation_fn=None, in_layers=[Hiddens[-1]]) output = BPGather(self.max_atoms, in_layers=[regression, reshaped_atom_flags]) self.add_output(output) label = Label(shape=(None, self.n_tasks, 1)) loss = ReduceSum(L2Loss(in_layers=[label, output])) weights = Weights(shape=(None, self.n_tasks)) weighted_loss = WeightedError(in_layers=[loss, weights]) self.set_loss(weighted_loss)
def _build_graph(self): self.one_hot_seq = Feature(shape=(None, self.pad_length, self.num_amino_acids), dtype=tf.float32) conv1 = Conv1D(kernel_size=2, filters=512, in_layers=[self.one_hot_seq]) maxpool1 = MaxPool1D(strides=2, padding="VALID", in_layers=[conv1]) conv2 = Conv1D(kernel_size=3, filters=512, in_layers=[maxpool1]) flattened = Flatten(in_layers=[conv2]) dense1 = Dense(out_channels=400, in_layers=[flattened], activation_fn=tf.nn.tanh) dropout = Dropout(dropout_prob=self.dropout_p, in_layers=[dense1]) output = Dense(out_channels=1, in_layers=[dropout], activation_fn=None) self.add_output(output) if self.mode == "regression": label = Label(shape=(None, 1)) loss = L2Loss(in_layers=[label, output]) else: raise NotImplementedError( "Classification support not added yet. Missing details in paper." ) weights = Weights(shape=(None, )) weighted_loss = WeightedError(in_layers=[loss, weights]) self.set_loss(weighted_loss)
def build_graph(self): # Build placeholders self.atom_features = Feature(shape=(None, self.n_atom_feat)) self.pair_features = Feature(shape=(None, self.n_pair_feat)) self.atom_split = Feature(shape=(None, ), dtype=tf.int32) self.atom_to_pair = Feature(shape=(None, 2), dtype=tf.int32) message_passing = MessagePassing(self.T, message_fn='enn', update_fn='gru', n_hidden=self.n_hidden, in_layers=[ self.atom_features, self.pair_features, self.atom_to_pair ]) atom_embeddings = Dense(self.n_hidden, in_layers=[message_passing]) mol_embeddings = SetGather( self.M, self.batch_size, n_hidden=self.n_hidden, in_layers=[atom_embeddings, self.atom_split]) dense1 = Dense(out_channels=2 * self.n_hidden, activation_fn=tf.nn.relu, in_layers=[mol_embeddings]) costs = [] self.labels_fd = [] for task in range(self.n_tasks): if self.mode == "classification": classification = Dense(out_channels=2, activation_fn=None, in_layers=[dense1]) softmax = SoftMax(in_layers=[classification]) self.add_output(softmax) label = Label(shape=(None, 2)) self.labels_fd.append(label) cost = SoftMaxCrossEntropy(in_layers=[label, classification]) costs.append(cost) if self.mode == "regression": regression = Dense(out_channels=1, activation_fn=None, in_layers=[dense1]) self.add_output(regression) label = Label(shape=(None, 1)) self.labels_fd.append(label) cost = L2Loss(in_layers=[label, regression]) costs.append(cost) if self.mode == "classification": all_cost = Concat(in_layers=costs, axis=1) elif self.mode == "regression": all_cost = Stack(in_layers=costs, axis=1) self.weights = Weights(shape=(None, self.n_tasks)) loss = WeightedError(in_layers=[all_cost, self.weights]) self.set_loss(loss)
def build_graph(self): """Building graph structures: Features => DAGLayer => DAGGather => Classification or Regression """ self.atom_features = Feature(shape=(None, self.n_atom_feat)) self.parents = Feature(shape=(None, self.max_atoms, self.max_atoms), dtype=tf.int32) self.calculation_orders = Feature(shape=(None, self.max_atoms), dtype=tf.int32) self.calculation_masks = Feature(shape=(None, self.max_atoms), dtype=tf.bool) self.membership = Feature(shape=(None, ), dtype=tf.int32) self.n_atoms = Feature(shape=(), dtype=tf.int32) dag_layer1 = DAGLayer(n_graph_feat=self.n_graph_feat, n_atom_feat=self.n_atom_feat, max_atoms=self.max_atoms, batch_size=self.batch_size, in_layers=[ self.atom_features, self.parents, self.calculation_orders, self.calculation_masks, self.n_atoms ]) dag_gather = DAGGather(n_graph_feat=self.n_graph_feat, n_outputs=self.n_outputs, max_atoms=self.max_atoms, in_layers=[dag_layer1, self.membership]) costs = [] self.labels_fd = [] for task in range(self.n_tasks): if self.mode == "classification": classification = Dense(out_channels=2, activation_fn=None, in_layers=[dag_gather]) softmax = SoftMax(in_layers=[classification]) self.add_output(softmax) label = Label(shape=(None, 2)) self.labels_fd.append(label) cost = SoftMaxCrossEntropy(in_layers=[label, classification]) costs.append(cost) if self.mode == "regression": regression = Dense(out_channels=1, activation_fn=None, in_layers=[dag_gather]) self.add_output(regression) label = Label(shape=(None, 1)) self.labels_fd.append(label) cost = L2Loss(in_layers=[label, regression]) costs.append(cost) if self.mode == "classification": all_cost = Concat(in_layers=costs, axis=1) elif self.mode == "regression": all_cost = Stack(in_layers=costs, axis=1) self.weights = Weights(shape=(None, self.n_tasks)) loss = WeightedError(in_layers=[all_cost, self.weights]) self.set_loss(loss)
def build_graph(self): self.smiles_seqs = Feature(shape=(None, self.seq_length), dtype=tf.int32) # Character embedding self.Embedding = DTNNEmbedding( n_embedding=self.n_embedding, periodic_table_length=len(self.char_dict.keys()) + 1, in_layers=[self.smiles_seqs]) self.pooled_outputs = [] self.conv_layers = [] for filter_size, num_filter in zip(self.kernel_sizes, self.num_filters): # Multiple convolutional layers with different filter widths self.conv_layers.append( Conv1D( kernel_size=filter_size, filters=num_filter, padding='valid', in_layers=[self.Embedding])) # Max-over-time pooling self.pooled_outputs.append( ReduceMax(axis=1, in_layers=[self.conv_layers[-1]])) # Concat features from all filters(one feature per filter) concat_outputs = Concat(axis=1, in_layers=self.pooled_outputs) dropout = Dropout(dropout_prob=self.dropout, in_layers=[concat_outputs]) dense = Dense( out_channels=200, activation_fn=tf.nn.relu, in_layers=[dropout]) # Highway layer from https://arxiv.org/pdf/1505.00387.pdf self.gather = Highway(in_layers=[dense]) costs = [] self.labels_fd = [] for task in range(self.n_tasks): if self.mode == "classification": classification = Dense( out_channels=2, activation_fn=None, in_layers=[self.gather]) softmax = SoftMax(in_layers=[classification]) self.add_output(softmax) label = Label(shape=(None, 2)) self.labels_fd.append(label) cost = SoftMaxCrossEntropy(in_layers=[label, classification]) costs.append(cost) if self.mode == "regression": regression = Dense( out_channels=1, activation_fn=None, in_layers=[self.gather]) self.add_output(regression) label = Label(shape=(None, 1)) self.labels_fd.append(label) cost = L2Loss(in_layers=[label, regression]) costs.append(cost) if self.mode == "classification": all_cost = Stack(in_layers=costs, axis=1) elif self.mode == "regression": all_cost = Stack(in_layers=costs, axis=1) self.weights = Weights(shape=(None, self.n_tasks)) loss = WeightedError(in_layers=[all_cost, self.weights]) self.set_loss(loss)
def build_graph(self): self.atom_features = Feature(shape=(None, 75)) self.degree_slice = Feature(shape=(None, 2), dtype=tf.int32) self.membership = Feature(shape=(None, ), dtype=tf.int32) self.deg_adjs = [] for i in range(0, 10 + 1): deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32) self.deg_adjs.append(deg_adj) in_layer = self.atom_features for layer_size in self.graph_conv_layers: gc1_in = [in_layer, self.degree_slice, self.membership ] + self.deg_adjs gc1 = GraphConv(layer_size, activation_fn=tf.nn.relu, in_layers=gc1_in) batch_norm1 = MyBatchNorm(in_layers=[gc1]) gp_in = [batch_norm1, self.degree_slice, self.membership ] + self.deg_adjs in_layer = GraphPool(in_layers=gp_in) dense = Dense(out_channels=self.dense_layer_size[0], activation_fn=tf.nn.relu, in_layers=[in_layer]) batch_norm3 = MyBatchNorm(in_layers=[dense]) batch_norm3 = Dropout(self.dropout, in_layers=[batch_norm3]) readout = GraphGather( batch_size=self.batch_size, activation_fn=tf.nn.tanh, in_layers=[batch_norm3, self.degree_slice, self.membership] + self.deg_adjs) curLayer = readout for myind in range(1, len(self.dense_layer_size) - 1): curLayer = Dense(out_channels=self.dense_layer_size[myind], activation_fn=tf.nn.relu, in_layers=[curLayer]) curLayer = Dropout(self.dropout, in_layers=[curLayer]) classification = Dense(out_channels=self.n_tasks, activation_fn=None, in_layers=[curLayer]) sigmoid = MySigmoid(in_layers=[classification]) self.add_output(sigmoid) self.label = Label(shape=(None, self.n_tasks)) all_cost = MySigmoidCrossEntropy( in_layers=[self.label, classification]) self.weights = Weights(shape=(None, self.n_tasks)) loss = WeightedError(in_layers=[all_cost, self.weights]) self.set_loss(loss) self.mydense = dense self.myreadout = readout self.myclassification = classification self.mysigmoid = sigmoid self.myall_cost = all_cost self.myloss = loss
def build_graph(self): self.vertex_features = Feature(shape=(None, self.max_atoms, 75)) self.adj_matrix = Feature(shape=(None, self.max_atoms, 1, self.max_atoms)) self.mask = Feature(shape=(None, self.max_atoms, 1)) gcnn1 = BatchNorm( GraphCNN( num_filters=64, in_layers=[self.vertex_features, self.adj_matrix, self.mask])) gcnn1 = Dropout(self.dropout, in_layers=gcnn1) gcnn2 = BatchNorm( GraphCNN(num_filters=64, in_layers=[gcnn1, self.adj_matrix, self.mask])) gcnn2 = Dropout(self.dropout, in_layers=gcnn2) gc_pool, adj_matrix = GraphCNNPool( num_vertices=32, in_layers=[gcnn2, self.adj_matrix, self.mask]) gc_pool = BatchNorm(gc_pool) gc_pool = Dropout(self.dropout, in_layers=gc_pool) gcnn3 = BatchNorm(GraphCNN(num_filters=32, in_layers=[gc_pool, adj_matrix])) gcnn3 = Dropout(self.dropout, in_layers=gcnn3) gc_pool2, adj_matrix2 = GraphCNNPool( num_vertices=8, in_layers=[gcnn3, adj_matrix]) gc_pool2 = BatchNorm(gc_pool2) gc_pool2 = Dropout(self.dropout, in_layers=gc_pool2) flattened = Flatten(in_layers=gc_pool2) readout = Dense( out_channels=256, activation_fn=tf.nn.relu, in_layers=flattened) costs = [] self.my_labels = [] for task in range(self.n_tasks): if self.mode == 'classification': classification = Dense( out_channels=2, activation_fn=None, in_layers=[readout]) softmax = SoftMax(in_layers=[classification]) self.add_output(softmax) label = Label(shape=(None, 2)) self.my_labels.append(label) cost = SoftMaxCrossEntropy(in_layers=[label, classification]) costs.append(cost) if self.mode == 'regression': regression = Dense( out_channels=1, activation_fn=None, in_layers=[readout]) self.add_output(regression) label = Label(shape=(None, 1)) self.my_labels.append(label) cost = L2Loss(in_layers=[label, regression]) costs.append(cost) if self.mode == "classification": entropy = Stack(in_layers=costs, axis=-1) elif self.mode == "regression": entropy = Stack(in_layers=costs, axis=1) self.my_task_weights = Weights(shape=(None, self.n_tasks)) loss = WeightedError(in_layers=[entropy, self.my_task_weights]) self.set_loss(loss)
def _build(self): self.A_tilda_k = list() for k in range(1, self.k_max + 1): self.A_tilda_k.append( Feature(name="graph_adjacency_{}".format(k), dtype=tf.float32, shape=[None, self.max_nodes, self.max_nodes])) self.X = Feature(name='atom_features', dtype=tf.float32, shape=[None, self.max_nodes, self.num_node_features]) graph_layers = list() adaptive_filters = list() for index, k in enumerate(range(1, self.k_max + 1)): in_layers = [self.A_tilda_k[index], self.X] adaptive_filters.append( AdaptiveFilter(batch_size=self.batch_size, in_layers=in_layers, num_nodes=self.max_nodes, num_node_features=self.num_node_features, combine_method=self.combine_method)) graph_layers.append( KOrderGraphConv(batch_size=self.batch_size, in_layers=in_layers + [adaptive_filters[index]], num_nodes=self.max_nodes, num_node_features=self.num_node_features, init='glorot_uniform')) graph_features = Concat(in_layers=graph_layers, axis=2) graph_features = ReLU(in_layers=[graph_features]) flattened = Flatten(in_layers=[graph_features]) dense1 = Dense(in_layers=[flattened], out_channels=64, activation_fn=tf.nn.relu) dense2 = Dense(in_layers=[dense1], out_channels=16, activation_fn=tf.nn.relu) dense3 = Dense(in_layers=[dense2], out_channels=1 * self.n_tasks, activation_fn=None) output = Reshape(in_layers=[dense3], shape=(-1, self.n_tasks, 1)) self.add_output(output) label = Label(shape=(None, self.n_tasks, 1)) weights = Weights(shape=(None, self.n_tasks)) loss = ReduceSum(L2Loss(in_layers=[label, output])) weighted_loss = WeightedError(in_layers=[loss, weights]) self.set_loss(weighted_loss)
def _build_graph(self): self.smiles_seqs = Feature(shape=(None, self.seq_length), dtype=tf.int32) # Character embedding Embedding = DTNNEmbedding( n_embedding=self.n_embedding, periodic_table_length=len(self.char_dict.keys()) + 1, in_layers=[self.smiles_seqs]) pooled_outputs = [] conv_layers = [] for filter_size, num_filter in zip(self.kernel_sizes, self.num_filters): # Multiple convolutional layers with different filter widths conv_layers.append( Conv1D(kernel_size=filter_size, filters=num_filter, padding='valid', in_layers=[Embedding])) # Max-over-time pooling pooled_outputs.append( ReduceMax(axis=1, in_layers=[conv_layers[-1]])) # Concat features from all filters(one feature per filter) concat_outputs = Concat(axis=1, in_layers=pooled_outputs) dropout = Dropout(dropout_prob=self.dropout, in_layers=[concat_outputs]) dense = Dense(out_channels=200, activation_fn=tf.nn.relu, in_layers=[dropout]) # Highway layer from https://arxiv.org/pdf/1505.00387.pdf gather = Highway(in_layers=[dense]) if self.mode == "classification": logits = Dense(out_channels=self.n_tasks * 2, activation_fn=None, in_layers=[gather]) logits = Reshape(shape=(-1, self.n_tasks, 2), in_layers=[logits]) output = SoftMax(in_layers=[logits]) self.add_output(output) labels = Label(shape=(None, self.n_tasks, 2)) loss = SoftMaxCrossEntropy(in_layers=[labels, logits]) else: vals = Dense(out_channels=self.n_tasks * 1, activation_fn=None, in_layers=[gather]) vals = Reshape(shape=(-1, self.n_tasks, 1), in_layers=[vals]) self.add_output(vals) labels = Label(shape=(None, self.n_tasks, 1)) loss = ReduceSum(L2Loss(in_layers=[labels, vals])) weights = Weights(shape=(None, self.n_tasks)) weighted_loss = WeightedError(in_layers=[loss, weights]) self.set_loss(weighted_loss)
def build_graph(self): """Building graph structures: Features => DTNNEmbedding => DTNNStep => DTNNStep => DTNNGather => Regression """ self.atom_number = Feature(shape=(None, ), dtype=tf.int32) self.distance = Feature(shape=(None, self.n_distance)) self.atom_membership = Feature(shape=(None, ), dtype=tf.int32) self.distance_membership_i = Feature(shape=(None, ), dtype=tf.int32) self.distance_membership_j = Feature(shape=(None, ), dtype=tf.int32) dtnn_embedding = DTNNEmbedding(n_embedding=self.n_embedding, in_layers=[self.atom_number]) if self.dropout > 0.0: dtnn_embedding = Dropout(self.dropout, in_layers=dtnn_embedding) dtnn_layer1 = DTNNStep(n_embedding=self.n_embedding, n_distance=self.n_distance, in_layers=[ dtnn_embedding, self.distance, self.distance_membership_i, self.distance_membership_j ]) if self.dropout > 0.0: dtnn_layer1 = Dropout(self.dropout, in_layers=dtnn_layer1) dtnn_layer2 = DTNNStep(n_embedding=self.n_embedding, n_distance=self.n_distance, in_layers=[ dtnn_layer1, self.distance, self.distance_membership_i, self.distance_membership_j ]) if self.dropout > 0.0: dtnn_layer2 = Dropout(self.dropout, in_layers=dtnn_layer2) dtnn_gather = DTNNGather(n_embedding=self.n_embedding, layer_sizes=[self.n_hidden], n_outputs=self.n_tasks, output_activation=self.output_activation, in_layers=[dtnn_layer2, self.atom_membership]) if self.dropout > 0.0: dtnn_gather = Dropout(self.dropout, in_layers=dtnn_gather) n_tasks = self.n_tasks weights = Weights(shape=(None, n_tasks)) labels = Label(shape=(None, n_tasks)) output = Reshape( shape=(None, n_tasks), in_layers=[Dense(in_layers=dtnn_gather, out_channels=n_tasks)]) self.add_output(output) weighted_loss = ReduceSum(L2Loss(in_layers=[labels, output, weights])) self.set_loss(weighted_loss)
def build_graph(self): """Building graph structures: Features => DTNNEmbedding => DTNNStep => DTNNStep => DTNNGather => Regression """ self.atom_number = Feature(shape=(None,), dtype=tf.int32) self.distance = Feature(shape=(None, self.n_distance)) self.atom_membership = Feature(shape=(None,), dtype=tf.int32) self.distance_membership_i = Feature(shape=(None,), dtype=tf.int32) self.distance_membership_j = Feature(shape=(None,), dtype=tf.int32) dtnn_embedding = DTNNEmbedding( n_embedding=self.n_embedding, in_layers=[self.atom_number]) dtnn_layer1 = DTNNStep( n_embedding=self.n_embedding, n_distance=self.n_distance, in_layers=[ dtnn_embedding, self.distance, self.distance_membership_i, self.distance_membership_j ]) dtnn_layer2 = DTNNStep( n_embedding=self.n_embedding, n_distance=self.n_distance, in_layers=[ dtnn_layer1, self.distance, self.distance_membership_i, self.distance_membership_j ]) dtnn_gather = DTNNGather( n_embedding=self.n_embedding, layer_sizes=[self.n_hidden], n_outputs=self.n_tasks, output_activation=self.output_activation, in_layers=[dtnn_layer2, self.atom_membership]) costs = [] self.labels_fd = [] for task in range(self.n_tasks): regression = DTNNExtract(task, in_layers=[dtnn_gather]) self.add_output(regression) label = Label(shape=(None, 1)) self.labels_fd.append(label) cost = L2Loss(in_layers=[label, regression]) costs.append(cost) all_cost = Stack(in_layers=costs, axis=1) self.weights = Weights(shape=(None, self.n_tasks)) loss = WeightedError(in_layers=[all_cost, self.weights]) self.set_loss(loss)
def build_graph(self): self.atom_numbers = Feature(shape=(None, self.max_atoms), dtype=tf.int32) self.atom_flags = Feature(shape=(None, self.max_atoms * self.max_atoms)) self.atom_feats = Feature(shape=(None, self.max_atoms * 4)) reshaped_atom_flags = Reshape(in_layers=[self.atom_flags], shape=(-1, self.max_atoms, self.max_atoms)) reshaped_atom_feats = Reshape(in_layers=[self.atom_feats], shape=(-1, self.max_atoms, 4)) previous_layer = ANIFeat(in_layers=reshaped_atom_feats, max_atoms=self.max_atoms) self.featurized = previous_layer Hiddens = [] for n_hidden in self.layer_structures: Hidden = AtomicDifferentiatedDense( self.max_atoms, n_hidden, self.atom_number_cases, activation=self.activation_fn, in_layers=[previous_layer, self.atom_numbers]) Hiddens.append(Hidden) previous_layer = Hiddens[-1] regression = Dense(out_channels=1 * self.n_tasks, activation_fn=None, in_layers=[Hiddens[-1]]) output = BPGather(self.max_atoms, in_layers=[regression, reshaped_atom_flags]) self.add_output(output) label = Label(shape=(None, self.n_tasks, 1)) loss = ReduceSum(L2Loss(in_layers=[label, output])) weights = Weights(shape=(None, self.n_tasks)) weighted_loss = WeightedError(in_layers=[loss, weights]) if self.exp_loss: weighted_loss = Exp(in_layers=[weighted_loss]) self.set_loss(weighted_loss)
def build_graph(self): self.atom_numbers = Feature(shape=(None, self.max_atoms), dtype=tf.int32) self.atom_flags = Feature(shape=(None, self.max_atoms, self.max_atoms)) self.atom_feats = Feature(shape=(None, self.max_atoms, 4)) previous_layer = ANIFeat(in_layers=self.atom_feats, max_atoms=self.max_atoms) self.featurized = previous_layer Hiddens = [] for n_hidden in self.layer_structures: Hidden = AtomicDifferentiatedDense( self.max_atoms, n_hidden, self.atom_number_cases, activation=self.activation_fn, in_layers=[previous_layer, self.atom_numbers]) Hiddens.append(Hidden) previous_layer = Hiddens[-1] costs = [] self.labels_fd = [] for task in range(self.n_tasks): regression = Dense(out_channels=1, activation_fn=None, in_layers=[Hiddens[-1]]) output = BPGather(self.max_atoms, in_layers=[regression, self.atom_flags]) self.add_output(output) label = Label(shape=(None, 1)) self.labels_fd.append(label) cost = L2Loss(in_layers=[label, output]) costs.append(cost) all_cost = Stack(in_layers=costs, axis=1) self.weights = Weights(shape=(None, self.n_tasks)) loss = WeightedError(in_layers=[all_cost, self.weights]) if self.exp_loss: loss = Exp(in_layers=[loss]) self.set_loss(loss)
def build_graph(self): print("building") features = Feature(shape=(None, self.n_features)) last_layer = features for layer_size in self.encoder_layers: last_layer = Dense(in_layers=last_layer, activation_fn=tf.nn.elu, out_channels=layer_size) self.mean = Dense(in_layers=last_layer, activation_fn=None, out_channels=1) self.std = Dense(in_layers=last_layer, activation_fn=None, out_channels=1) readout = CombineMeanStd([self.mean, self.std], training_only=True) last_layer = readout for layer_size in self.decoder_layers: last_layer = Dense(in_layers=readout, activation_fn=tf.nn.elu, out_channels=layer_size) self.reconstruction = Dense(in_layers=last_layer, activation_fn=None, out_channels=self.n_features) weights = Weights(shape=(None, self.n_features)) reproduction_loss = L2Loss( in_layers=[features, self.reconstruction, weights]) reproduction_loss = ReduceSum(in_layers=reproduction_loss, axis=0) global_step = TensorWrapper(self._get_tf("GlobalStep")) kl_loss = KLDivergenceLoss( in_layers=[self.mean, self.std, global_step], annealing_start_step=self.kl_annealing_start_step, annealing_stop_step=self.kl_annealing_stop_step) loss = Add(in_layers=[kl_loss, reproduction_loss], weights=[0.5, 1]) self.add_output(self.mean) self.add_output(self.reconstruction) self.set_loss(loss)
def build_graph(self): """Building graph structures: Features => WeaveLayer => WeaveLayer => Dense => WeaveGather => Classification or Regression """ self.atom_features = Feature(shape=(None, self.n_atom_feat)) self.pair_features = Feature(shape=(None, self.n_pair_feat)) combined = Combine_AP(in_layers=[self.atom_features, self.pair_features]) self.pair_split = Feature(shape=(None,), dtype=tf.int32) self.atom_split = Feature(shape=(None,), dtype=tf.int32) self.atom_to_pair = Feature(shape=(None, 2), dtype=tf.int32) weave_layer1 = WeaveLayer( n_atom_input_feat=self.n_atom_feat, n_pair_input_feat=self.n_pair_feat, n_atom_output_feat=self.n_hidden, n_pair_output_feat=self.n_hidden, in_layers=[combined, self.pair_split, self.atom_to_pair]) weave_layer2 = WeaveLayer( n_atom_input_feat=self.n_hidden, n_pair_input_feat=self.n_hidden, n_atom_output_feat=self.n_hidden, n_pair_output_feat=self.n_hidden, update_pair=False, in_layers=[weave_layer1, self.pair_split, self.atom_to_pair]) separated = Separate_AP(in_layers=[weave_layer2]) dense1 = Dense( out_channels=self.n_graph_feat, activation_fn=tf.nn.tanh, in_layers=[separated]) batch_norm1 = BatchNormalization(epsilon=1e-5, mode=1, in_layers=[dense1]) weave_gather = WeaveGather( self.batch_size, n_input=self.n_graph_feat, gaussian_expand=True, in_layers=[batch_norm1, self.atom_split]) costs = [] self.labels_fd = [] for task in range(self.n_tasks): if self.mode == "classification": classification = Dense( out_channels=2, activation_fn=None, in_layers=[weave_gather]) softmax = SoftMax(in_layers=[classification]) self.add_output(softmax) label = Label(shape=(None, 2)) self.labels_fd.append(label) cost = SoftMaxCrossEntropy(in_layers=[label, classification]) costs.append(cost) if self.mode == "regression": regression = Dense( out_channels=1, activation_fn=None, in_layers=[weave_gather]) self.add_output(regression) label = Label(shape=(None, 1)) self.labels_fd.append(label) cost = L2Loss(in_layers=[label, regression]) costs.append(cost) if self.mode == "classification": all_cost = Concat(in_layers=costs, axis=1) elif self.mode == "regression": all_cost = Stack(in_layers=costs, axis=1) self.weights = Weights(shape=(None, self.n_tasks)) loss = WeightedError(in_layers=[all_cost, self.weights]) self.set_loss(loss)
def build_graph(self): """ Building graph structures: """ self.atom_features = Feature(shape=(None, 75)) self.degree_slice = Feature(shape=(None, 2), dtype=tf.int32) self.membership = Feature(shape=(None,), dtype=tf.int32) self.deg_adjs = [] for i in range(0, 10 + 1): deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32) self.deg_adjs.append(deg_adj) gc1 = GraphConv( 64, activation_fn=tf.nn.relu, in_layers=[self.atom_features, self.degree_slice, self.membership] + self.deg_adjs) batch_norm1 = BatchNorm(in_layers=[gc1]) gp1 = GraphPool(in_layers=[batch_norm1, self.degree_slice, self.membership] + self.deg_adjs) gc2 = GraphConv( 64, activation_fn=tf.nn.relu, in_layers=[gp1, self.degree_slice, self.membership] + self.deg_adjs) batch_norm2 = BatchNorm(in_layers=[gc2]) gp2 = GraphPool(in_layers=[batch_norm2, self.degree_slice, self.membership] + self.deg_adjs) dense = Dense(out_channels=128, activation_fn=tf.nn.relu, in_layers=[gp2]) batch_norm3 = BatchNorm(in_layers=[dense]) readout = GraphGather( batch_size=self.batch_size, activation_fn=tf.nn.tanh, in_layers=[batch_norm3, self.degree_slice, self.membership] + self.deg_adjs) if self.error_bars == True: readout = Dropout(in_layers=[readout], dropout_prob=0.2) costs = [] self.my_labels = [] for task in range(self.n_tasks): if self.mode == 'classification': classification = Dense( out_channels=2, activation_fn=None, in_layers=[readout]) softmax = SoftMax(in_layers=[classification]) self.add_output(softmax) label = Label(shape=(None, 2)) self.my_labels.append(label) cost = SoftMaxCrossEntropy(in_layers=[label, classification]) costs.append(cost) if self.mode == 'regression': regression = Dense( out_channels=1, activation_fn=None, in_layers=[readout]) self.add_output(regression) label = Label(shape=(None, 1)) self.my_labels.append(label) cost = L2Loss(in_layers=[label, regression]) costs.append(cost) if self.mode == "classification": entropy = Concat(in_layers=costs, axis=-1) elif self.mode == "regression": entropy = Stack(in_layers=costs, axis=1) self.my_task_weights = Weights(shape=(None, self.n_tasks)) loss = WeightedError(in_layers=[entropy, self.my_task_weights]) self.set_loss(loss)
def graph_conv_net(batch_size, prior, num_task): """ Build a tensorgraph for multilabel classification task Return: features and labels layers """ tg = TensorGraph(use_queue=False) if prior == True: add_on = num_task else: add_on = 0 atom_features = Feature(shape=(None, 75 + 2 * add_on)) circular_features = Feature(shape=(batch_size, 256), dtype=tf.float32) degree_slice = Feature(shape=(None, 2), dtype=tf.int32) membership = Feature(shape=(None, ), dtype=tf.int32) deg_adjs = [] for i in range(0, 10 + 1): deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32) deg_adjs.append(deg_adj) gc1 = GraphConv(64 + add_on, activation_fn=tf.nn.elu, in_layers=[atom_features, degree_slice, membership] + deg_adjs) batch_norm1 = BatchNorm(in_layers=[gc1]) gp1 = GraphPool(in_layers=[batch_norm1, degree_slice, membership] + deg_adjs) gc2 = GraphConv(64 + add_on, activation_fn=tf.nn.elu, in_layers=[gc1, degree_slice, membership] + deg_adjs) batch_norm2 = BatchNorm(in_layers=[gc2]) gp2 = GraphPool(in_layers=[batch_norm2, degree_slice, membership] + deg_adjs) add = Concat(in_layers=[gp1, gp2]) add = Dropout(0.5, in_layers=[add]) dense = Dense(out_channels=128, activation_fn=tf.nn.elu, in_layers=[add]) batch_norm3 = BatchNorm(in_layers=[dense]) readout = GraphGather(batch_size=batch_size, activation_fn=tf.nn.tanh, in_layers=[batch_norm3, degree_slice, membership] + deg_adjs) batch_norm4 = BatchNorm(in_layers=[readout]) dense1 = Dense(out_channels=128, activation_fn=tf.nn.elu, in_layers=[circular_features]) dense1 = BatchNorm(in_layers=[dense1]) dense1 = Dropout(0.5, in_layers=[dense1]) dense1 = Dense(out_channels=128, activation_fn=tf.nn.elu, in_layers=[circular_features]) dense1 = BatchNorm(in_layers=[dense1]) dense1 = Dropout(0.5, in_layers=[dense1]) merge_feat = Concat(in_layers=[dense1, batch_norm4]) merge = Dense(out_channels=256, activation_fn=tf.nn.elu, in_layers=[merge_feat]) costs = [] labels = [] for task in range(num_task): classification = Dense(out_channels=2, activation_fn=None, in_layers=[merge]) softmax = SoftMax(in_layers=[classification]) tg.add_output(softmax) label = Label(shape=(None, 2)) labels.append(label) cost = SoftMaxCrossEntropy(in_layers=[label, classification]) costs.append(cost) all_cost = Stack(in_layers=costs, axis=1) weights = Weights(shape=(None, num_task)) loss = WeightedError(in_layers=[all_cost, weights]) tg.set_loss(loss) #if prior == True: # return tg, atom_features,circular_features, degree_slice, membership, deg_adjs, labels, weights#, prior_layer return tg, atom_features, circular_features, degree_slice, membership, deg_adjs, labels, weights
def build_graph(self): """Building graph structures: Features => WeaveLayer => WeaveLayer => Dense => WeaveGather => Classification or Regression """ self.atom_features = Feature(shape=(None, self.n_atom_feat)) self.pair_features = Feature(shape=(None, self.n_pair_feat)) self.pair_split = Feature(shape=(None, ), dtype=tf.int32) self.atom_split = Feature(shape=(None, ), dtype=tf.int32) self.atom_to_pair = Feature(shape=(None, 2), dtype=tf.int32) weave_layer1A, weave_layer1P = WeaveLayerFactory( n_atom_input_feat=self.n_atom_feat, n_pair_input_feat=self.n_pair_feat, n_atom_output_feat=self.n_hidden, n_pair_output_feat=self.n_hidden, in_layers=[ self.atom_features, self.pair_features, self.pair_split, self.atom_to_pair ]) weave_layer2A, weave_layer2P = WeaveLayerFactory( n_atom_input_feat=self.n_hidden, n_pair_input_feat=self.n_hidden, n_atom_output_feat=self.n_hidden, n_pair_output_feat=self.n_hidden, update_pair=False, in_layers=[ weave_layer1A, weave_layer1P, self.pair_split, self.atom_to_pair ]) dense1 = Dense(out_channels=self.n_graph_feat, activation_fn=tf.nn.tanh, in_layers=weave_layer2A) batch_norm1 = BatchNorm(epsilon=1e-5, in_layers=[dense1]) weave_gather = WeaveGather(self.batch_size, n_input=self.n_graph_feat, gaussian_expand=True, in_layers=[batch_norm1, self.atom_split]) n_tasks = self.n_tasks weights = Weights(shape=(None, n_tasks)) if self.mode == 'classification': n_classes = self.n_classes labels = Label(shape=(None, n_tasks, n_classes)) logits = Reshape(shape=(None, n_tasks, n_classes), in_layers=[ Dense(in_layers=weave_gather, out_channels=n_tasks * n_classes) ]) output = SoftMax(logits) self.add_output(output) loss = SoftMaxCrossEntropy(in_layers=[labels, logits]) weighted_loss = WeightedError(in_layers=[loss, weights]) self.set_loss(weighted_loss) else: labels = Label(shape=(None, n_tasks)) output = Reshape(shape=(None, n_tasks), in_layers=[ Dense(in_layers=weave_gather, out_channels=n_tasks) ]) self.add_output(output) weighted_loss = ReduceSum( L2Loss(in_layers=[labels, output, weights])) self.set_loss(weighted_loss)
def build_graph(self): # Build placeholders self.atom_features = Feature(shape=(None, self.n_atom_feat)) self.pair_features = Feature(shape=(None, self.n_pair_feat)) self.atom_split = Feature(shape=(None, ), dtype=tf.int32) self.atom_to_pair = Feature(shape=(None, 2), dtype=tf.int32) message_passing = MessagePassing(self.T, message_fn='enn', update_fn='gru', n_hidden=self.n_hidden, in_layers=[ self.atom_features, self.pair_features, self.atom_to_pair ]) atom_embeddings = Dense(self.n_hidden, in_layers=[message_passing]) mol_embeddings = SetGather( self.M, self.batch_size, n_hidden=self.n_hidden, in_layers=[atom_embeddings, self.atom_split]) dense1 = Dense(out_channels=2 * self.n_hidden, activation_fn=tf.nn.relu, in_layers=[mol_embeddings]) n_tasks = self.n_tasks weights = Weights(shape=(None, n_tasks)) if self.mode == 'classification': n_classes = self.n_classes labels = Label(shape=(None, n_tasks, n_classes)) logits = Reshape(shape=(None, n_tasks, n_classes), in_layers=[ Dense(in_layers=dense1, out_channels=n_tasks * n_classes) ]) logits = TrimGraphOutput([logits, weights]) output = SoftMax(logits) self.add_output(output) loss = SoftMaxCrossEntropy(in_layers=[labels, logits]) weighted_loss = WeightedError(in_layers=[loss, weights]) self.set_loss(weighted_loss) else: labels = Label(shape=(None, n_tasks)) output = Reshape( shape=(None, n_tasks), in_layers=[Dense(in_layers=dense1, out_channels=n_tasks)]) output = TrimGraphOutput([output, weights]) self.add_output(output) if self.uncertainty: log_var = Reshape( shape=(None, n_tasks), in_layers=[Dense(in_layers=dense1, out_channels=n_tasks)]) log_var = TrimGraphOutput([log_var, weights]) var = Exp(log_var) self.add_variance(var) diff = labels - output weighted_loss = weights * (diff * diff / var + log_var) weighted_loss = ReduceSum(ReduceMean(weighted_loss, axis=[1])) else: weighted_loss = ReduceSum( L2Loss(in_layers=[labels, output, weights])) self.set_loss(weighted_loss)
def build_graph(self): """ Building graph structures: """ self.atom_features = Feature(shape=(None, self.number_atom_features)) self.degree_slice = Feature(shape=(None, 2), dtype=tf.int32) self.membership = Feature(shape=(None, ), dtype=tf.int32) self.deg_adjs = [] for i in range(0, 10 + 1): deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32) self.deg_adjs.append(deg_adj) in_layer = self.atom_features for layer_size, dropout in zip(self.graph_conv_layers, self.dropout): gc1_in = [in_layer, self.degree_slice, self.membership ] + self.deg_adjs gc1 = GraphConv(layer_size, activation_fn=tf.nn.relu, in_layers=gc1_in) batch_norm1 = BatchNorm(in_layers=[gc1]) if dropout > 0.0: batch_norm1 = Dropout(dropout, in_layers=batch_norm1) gp_in = [batch_norm1, self.degree_slice, self.membership ] + self.deg_adjs in_layer = GraphPool(in_layers=gp_in) dense = Dense(out_channels=self.dense_layer_size, activation_fn=tf.nn.relu, in_layers=[in_layer]) batch_norm3 = BatchNorm(in_layers=[dense]) if self.dropout[-1] > 0.0: batch_norm3 = Dropout(self.dropout[-1], in_layers=batch_norm3) readout = GraphGather( batch_size=self.batch_size, activation_fn=tf.nn.tanh, in_layers=[batch_norm3, self.degree_slice, self.membership] + self.deg_adjs) n_tasks = self.n_tasks weights = Weights(shape=(None, n_tasks)) if self.mode == 'classification': n_classes = self.n_classes labels = Label(shape=(None, n_tasks, n_classes)) logits = Reshape(shape=(None, n_tasks, n_classes), in_layers=[ Dense(in_layers=readout, out_channels=n_tasks * n_classes) ]) logits = TrimGraphOutput([logits, weights]) output = SoftMax(logits) self.add_output(output) loss = SoftMaxCrossEntropy(in_layers=[labels, logits]) weighted_loss = WeightedError(in_layers=[loss, weights]) self.set_loss(weighted_loss) else: labels = Label(shape=(None, n_tasks)) output = Reshape( shape=(None, n_tasks), in_layers=[Dense(in_layers=readout, out_channels=n_tasks)]) output = TrimGraphOutput([output, weights]) self.add_output(output) if self.uncertainty: log_var = Reshape( shape=(None, n_tasks), in_layers=[Dense(in_layers=readout, out_channels=n_tasks)]) log_var = TrimGraphOutput([log_var, weights]) var = Exp(log_var) self.add_variance(var) diff = labels - output weighted_loss = weights * (diff * diff / var + log_var) weighted_loss = ReduceSum(ReduceMean(weighted_loss, axis=[1])) else: weighted_loss = ReduceSum( L2Loss(in_layers=[labels, output, weights])) self.set_loss(weighted_loss)
def build_graph(self): """Building graph structures: Features => DAGLayer => DAGGather => Classification or Regression """ self.atom_features = Feature(shape=(None, self.n_atom_feat)) self.parents = Feature(shape=(None, self.max_atoms, self.max_atoms), dtype=tf.int32) self.calculation_orders = Feature(shape=(None, self.max_atoms), dtype=tf.int32) self.calculation_masks = Feature(shape=(None, self.max_atoms), dtype=tf.bool) self.membership = Feature(shape=(None, ), dtype=tf.int32) self.n_atoms = Feature(shape=(), dtype=tf.int32) dag_layer1 = DAGLayer(n_graph_feat=self.n_graph_feat, n_atom_feat=self.n_atom_feat, max_atoms=self.max_atoms, layer_sizes=self.layer_sizes, dropout=self.dropout, batch_size=self.batch_size, in_layers=[ self.atom_features, self.parents, self.calculation_orders, self.calculation_masks, self.n_atoms ]) dag_gather = DAGGather(n_graph_feat=self.n_graph_feat, n_outputs=self.n_outputs, max_atoms=self.max_atoms, layer_sizes=self.layer_sizes_gather, dropout=self.dropout, in_layers=[dag_layer1, self.membership]) n_tasks = self.n_tasks weights = Weights(shape=(None, n_tasks)) if self.mode == 'classification': n_classes = self.n_classes labels = Label(shape=(None, n_tasks, n_classes)) logits = Reshape(shape=(None, n_tasks, n_classes), in_layers=[ Dense(in_layers=dag_gather, out_channels=n_tasks * n_classes) ]) output = SoftMax(logits) self.add_output(output) loss = SoftMaxCrossEntropy(in_layers=[labels, logits]) weighted_loss = WeightedError(in_layers=[loss, weights]) self.set_loss(weighted_loss) else: labels = Label(shape=(None, n_tasks)) output = Reshape( shape=(None, n_tasks), in_layers=[Dense(in_layers=dag_gather, out_channels=n_tasks)]) self.add_output(output) if self.uncertainty: log_var = Reshape(shape=(None, n_tasks), in_layers=[ Dense(in_layers=dag_gather, out_channels=n_tasks) ]) var = Exp(log_var) self.add_variance(var) diff = labels - output weighted_loss = weights * (diff * diff / var + log_var) weighted_loss = ReduceSum(ReduceMean(weighted_loss, axis=[1])) else: weighted_loss = ReduceSum( L2Loss(in_layers=[labels, output, weights])) self.set_loss(weighted_loss)
def __init__(self, n_tasks, n_features, layer_sizes=[1000], weight_init_stddevs=[0.02], bias_init_consts=[1.0], weight_decay_penalty=0.0, weight_decay_penalty_type="l2", dropouts=[0.5], n_classes=2, **kwargs): """Create a TensorGraphMultiTaskClassifier. In addition to the following arguments, this class also accepts all the keywork arguments from TensorGraph. Parameters ---------- n_tasks: int number of tasks n_features: int number of features layer_sizes: list the size of each dense layer in the network. The length of this list determines the number of layers. weight_init_stddevs: list the standard deviation of the distribution to use for weight initialization of each layer. The length of this list should equal len(layer_sizes). bias_init_consts: list the value to initialize the biases in each layer to. The length of this list should equal len(layer_sizes). weight_decay_penalty: float the magnitude of the weight decay penalty to use weight_decay_penalty_type: str the type of penalty to use for weight decay, either 'l1' or 'l2' dropouts: list the dropout probablity to use for each layer. The length of this list should equal len(layer_sizes). n_classes: int the number of classes """ super(TensorGraphMultiTaskClassifier, self).__init__(mode='classification', **kwargs) self.n_tasks = n_tasks self.n_features = n_features self.n_classes = n_classes # Add the input features. mol_features = Feature(shape=(None, n_features)) prev_layer = mol_features # Add the dense layers for size, weight_stddev, bias_const, dropout in zip( layer_sizes, weight_init_stddevs, bias_init_consts, dropouts): layer = Dense(in_layers=[prev_layer], out_channels=size, activation_fn=tf.nn.relu, weights_initializer=TFWrapper( tf.truncated_normal_initializer, stddev=weight_stddev), biases_initializer=TFWrapper(tf.constant_initializer, value=bias_const)) if dropout > 0.0: layer = Dropout(dropout, in_layers=[layer]) prev_layer = layer # Compute the loss function for each label. output = Reshape(shape=(-1, n_tasks, n_classes), in_layers=[ Dense(in_layers=[prev_layer], out_channels=n_tasks * n_classes) ]) self.add_output(output) labels = Label(shape=(None, n_tasks, n_classes)) weights = Weights(shape=(None, n_tasks)) loss = SoftMaxCrossEntropy(in_layers=[labels, output]) weighted_loss = WeightedError(in_layers=[loss, weights]) if weight_decay_penalty != 0.0: weighted_loss = WeightDecay(weight_decay_penalty, weight_decay_penalty_type, in_layers=[weighted_loss]) self.set_loss(weighted_loss)
def graph_conv_model(batch_size, tasks): model = TensorGraph(model_dir=model_dir, batch_size=batch_size, use_queue=False) atom_features = Feature(shape=(None, 75)) degree_slice = Feature(shape=(None, 2), dtype=tf.int32) membership = Feature(shape=(None, ), dtype=tf.int32) deg_adjs = [] for i in range(0, 10 + 1): deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32) deg_adjs.append(deg_adj) gc1 = GraphConv(64, activation_fn=tf.nn.relu, in_layers=[atom_features, degree_slice, membership] + deg_adjs) batch_norm1 = BatchNorm(in_layers=[gc1]) gp1 = GraphPool(in_layers=[batch_norm1, degree_slice, membership] + deg_adjs) gc2 = GraphConv(64, activation_fn=tf.nn.relu, in_layers=[gp1, degree_slice, membership] + deg_adjs) batch_norm2 = BatchNorm(in_layers=[gc2]) gp2 = GraphPool(in_layers=[batch_norm2, degree_slice, membership] + deg_adjs) dense = Dense(out_channels=128, activation_fn=None, in_layers=[gp2]) batch_norm3 = BatchNorm(in_layers=[dense]) gg1 = GraphGather(batch_size=batch_size, activation_fn=tf.nn.tanh, in_layers=[batch_norm3, degree_slice, membership] + deg_adjs) costs = [] labels = [] for task in tasks: classification = Dense(out_channels=2, activation_fn=None, in_layers=[gg1]) softmax = SoftMax(in_layers=[classification]) model.add_output(softmax) label = Label(shape=(None, 2)) labels.append(label) cost = SoftMaxCrossEntropy(in_layers=[label, classification]) costs.append(cost) entropy = Concat(in_layers=costs) task_weights = Weights(shape=(None, len(tasks))) loss = WeightedError(in_layers=[entropy, task_weights]) model.set_loss(loss) def feed_dict_generator(dataset, batch_size, epochs=1): for epoch in range(epochs): for ind, (X_b, y_b, w_b, ids_b) in enumerate( dataset.iterbatches(batch_size, pad_batches=True)): d = {} for index, label in enumerate(labels): d[label] = to_one_hot(y_b[:, index]) d[task_weights] = w_b multiConvMol = ConvMol.agglomerate_mols(X_b) d[atom_features] = multiConvMol.get_atom_features() d[degree_slice] = multiConvMol.deg_slice d[membership] = multiConvMol.membership for i in range(1, len(multiConvMol.get_deg_adjacency_lists())): d[deg_adjs[i - 1]] = multiConvMol.get_deg_adjacency_lists()[i] yield d return model, feed_dict_generator, labels, task_weights
def sluice_model(batch_size, tasks): model = TensorGraph(model_dir=model_dir, batch_size=batch_size, use_queue=False, tensorboard=True) atom_features = Feature(shape=(None, 75)) degree_slice = Feature(shape=(None, 2), dtype=tf.int32) membership = Feature(shape=(None, ), dtype=tf.int32) sluice_loss = [] deg_adjs = [] for i in range(0, 10 + 1): deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32) deg_adjs.append(deg_adj) gc1 = GraphConv(64, activation_fn=tf.nn.relu, in_layers=[atom_features, degree_slice, membership] + deg_adjs) as1 = AlphaShare(in_layers=[gc1, gc1]) sluice_loss.append(gc1) batch_norm1a = BatchNorm(in_layers=[as1[0]]) batch_norm1b = BatchNorm(in_layers=[as1[1]]) gp1a = GraphPool(in_layers=[batch_norm1a, degree_slice, membership] + deg_adjs) gp1b = GraphPool(in_layers=[batch_norm1b, degree_slice, membership] + deg_adjs) gc2a = GraphConv(64, activation_fn=tf.nn.relu, in_layers=[gp1a, degree_slice, membership] + deg_adjs) gc2b = GraphConv(64, activation_fn=tf.nn.relu, in_layers=[gp1b, degree_slice, membership] + deg_adjs) as2 = AlphaShare(in_layers=[gc2a, gc2b]) sluice_loss.append(gc2a) sluice_loss.append(gc2b) batch_norm2a = BatchNorm(in_layers=[as2[0]]) batch_norm2b = BatchNorm(in_layers=[as2[1]]) gp2a = GraphPool(in_layers=[batch_norm2a, degree_slice, membership] + deg_adjs) gp2b = GraphPool(in_layers=[batch_norm2b, degree_slice, membership] + deg_adjs) densea = Dense(out_channels=128, activation_fn=None, in_layers=[gp2a]) denseb = Dense(out_channels=128, activation_fn=None, in_layers=[gp2b]) batch_norm3a = BatchNorm(in_layers=[densea]) batch_norm3b = BatchNorm(in_layers=[denseb]) as3 = AlphaShare(in_layers=[batch_norm3a, batch_norm3b]) sluice_loss.append(batch_norm3a) sluice_loss.append(batch_norm3b) gg1a = GraphGather(batch_size=batch_size, activation_fn=tf.nn.tanh, in_layers=[as3[0], degree_slice, membership] + deg_adjs) gg1b = GraphGather(batch_size=batch_size, activation_fn=tf.nn.tanh, in_layers=[as3[1], degree_slice, membership] + deg_adjs) costs = [] labels = [] count = 0 for task in tasks: if count < len(tasks) / 2: classification = Dense(out_channels=2, activation_fn=None, in_layers=[gg1a]) print("first half:") print(task) else: classification = Dense(out_channels=2, activation_fn=None, in_layers=[gg1b]) print('second half') print(task) count += 1 softmax = SoftMax(in_layers=[classification]) model.add_output(softmax) label = Label(shape=(None, 2)) labels.append(label) cost = SoftMaxCrossEntropy(in_layers=[label, classification]) costs.append(cost) entropy = Concat(in_layers=costs) task_weights = Weights(shape=(None, len(tasks))) task_loss = WeightedError(in_layers=[entropy, task_weights]) s_cost = SluiceLoss(in_layers=sluice_loss) total_loss = Add(in_layers=[task_loss, s_cost]) model.set_loss(total_loss) def feed_dict_generator(dataset, batch_size, epochs=1): for epoch in range(epochs): for ind, (X_b, y_b, w_b, ids_b) in enumerate( dataset.iterbatches(batch_size, pad_batches=True)): d = {} for index, label in enumerate(labels): d[label] = to_one_hot(y_b[:, index]) d[task_weights] = w_b multiConvMol = ConvMol.agglomerate_mols(X_b) d[atom_features] = multiConvMol.get_atom_features() d[degree_slice] = multiConvMol.deg_slice d[membership] = multiConvMol.membership for i in range(1, len(multiConvMol.get_deg_adjacency_lists())): d[deg_adjs[i - 1]] = multiConvMol.get_deg_adjacency_lists()[i] yield d return model, feed_dict_generator, labels, task_weights
def __init__(self, n_tasks, n_features, layer_sizes=[1000], weight_init_stddevs=0.02, bias_init_consts=1.0, weight_decay_penalty=0.0, weight_decay_penalty_type="l2", dropouts=0.5, activation_fns=tf.nn.relu, n_classes=2, **kwargs): """Create a MultitaskClassifier. In addition to the following arguments, this class also accepts all the keyword arguments from TensorGraph. Parameters ---------- n_tasks: int number of tasks n_features: int number of features layer_sizes: list the size of each dense layer in the network. The length of this list determines the number of layers. weight_init_stddevs: list or float the standard deviation of the distribution to use for weight initialization of each layer. The length of this list should equal len(layer_sizes). Alternatively this may be a single value instead of a list, in which case the same value is used for every layer. bias_init_consts: list or loat the value to initialize the biases in each layer to. The length of this list should equal len(layer_sizes). Alternatively this may be a single value instead of a list, in which case the same value is used for every layer. weight_decay_penalty: float the magnitude of the weight decay penalty to use weight_decay_penalty_type: str the type of penalty to use for weight decay, either 'l1' or 'l2' dropouts: list or float the dropout probablity to use for each layer. The length of this list should equal len(layer_sizes). Alternatively this may be a single value instead of a list, in which case the same value is used for every layer. activation_fns: list or object the Tensorflow activation function to apply to each layer. The length of this list should equal len(layer_sizes). Alternatively this may be a single value instead of a list, in which case the same value is used for every layer. n_classes: int the number of classes """ super(MultitaskClassifier, self).__init__(**kwargs) self.n_tasks = n_tasks self.n_features = n_features self.n_classes = n_classes n_layers = len(layer_sizes) if not isinstance(weight_init_stddevs, collections.Sequence): weight_init_stddevs = [weight_init_stddevs] * n_layers if not isinstance(bias_init_consts, collections.Sequence): bias_init_consts = [bias_init_consts] * n_layers if not isinstance(dropouts, collections.Sequence): dropouts = [dropouts] * n_layers if not isinstance(activation_fns, collections.Sequence): activation_fns = [activation_fns] * n_layers # Add the input features. mol_features = Feature(shape=(None, n_features)) prev_layer = mol_features # Add the dense layers for size, weight_stddev, bias_const, dropout, activation_fn in zip( layer_sizes, weight_init_stddevs, bias_init_consts, dropouts, activation_fns): layer = Dense(in_layers=[prev_layer], out_channels=size, activation_fn=activation_fn, weights_initializer=TFWrapper( tf.truncated_normal_initializer, stddev=weight_stddev), biases_initializer=TFWrapper(tf.constant_initializer, value=bias_const)) if dropout > 0.0: layer = Dropout(dropout, in_layers=[layer]) prev_layer = layer # Compute the loss function for each label. self.neural_fingerprint = prev_layer logits = Reshape(shape=(-1, n_tasks, n_classes), in_layers=[ Dense(in_layers=[prev_layer], out_channels=n_tasks * n_classes) ]) output = SoftMax(logits) self.add_output(output) labels = Label(shape=(None, n_tasks, n_classes)) weights = Weights(shape=(None, n_tasks)) loss = SoftMaxCrossEntropy(in_layers=[labels, logits]) weighted_loss = WeightedError(in_layers=[loss, weights]) if weight_decay_penalty != 0.0: weighted_loss = WeightDecay(weight_decay_penalty, weight_decay_penalty_type, in_layers=[weighted_loss]) self.set_loss(weighted_loss)