def build_graph(self): """ Building graph structures: """ self.m1_features = Feature(shape=(None, self.n_features)) self.m2_features = Feature(shape=(None, self.n_features)) prev_layer1 = self.m1_features prev_layer2 = self.m2_features for layer_size in self.layer_sizes: prev_layer1 = Dense( out_channels=layer_size, in_layers=[prev_layer1], activation_fn=tf.nn.relu) prev_layer2 = prev_layer1.shared([prev_layer2]) if self.dropout > 0.0: prev_layer1 = Dropout(self.dropout, in_layers=prev_layer1) prev_layer2 = Dropout(self.dropout, in_layers=prev_layer2) readout_m1 = Dense( out_channels=1, in_layers=[prev_layer1], activation_fn=None) readout_m2 = readout_m1.shared([prev_layer2]) self.add_output(Sigmoid(readout_m1) * 4 + 1) self.add_output(Sigmoid(readout_m2) * 4 + 1) self.difference = readout_m1 - readout_m2 label = Label(shape=(None, 1)) loss = HingeLoss(in_layers=[label, self.difference]) self.my_task_weights = Weights(shape=(None, 1)) loss = WeightedError(in_layers=[loss, self.my_task_weights]) self.set_loss(loss)
def build_graph(self): self.atom_features = Feature(shape=(None, 75)) self.degree_slice = Feature(shape=(None, 2), dtype=tf.int32) self.membership = Feature(shape=(None, ), dtype=tf.int32) self.deg_adjs = [] for i in range(0, 10 + 1): deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32) self.deg_adjs.append(deg_adj) in_layer = self.atom_features for layer_size in self.graph_conv_layers: gc1_in = [in_layer, self.degree_slice, self.membership ] + self.deg_adjs gc1 = GraphConv(layer_size, activation_fn=tf.nn.relu, in_layers=gc1_in) batch_norm1 = MyBatchNorm(in_layers=[gc1]) gp_in = [batch_norm1, self.degree_slice, self.membership ] + self.deg_adjs in_layer = GraphPool(in_layers=gp_in) dense = Dense(out_channels=self.dense_layer_size[0], activation_fn=tf.nn.relu, in_layers=[in_layer]) batch_norm3 = MyBatchNorm(in_layers=[dense]) batch_norm3 = Dropout(self.dropout, in_layers=[batch_norm3]) readout = GraphGather( batch_size=self.batch_size, activation_fn=tf.nn.tanh, in_layers=[batch_norm3, self.degree_slice, self.membership] + self.deg_adjs) curLayer = readout for myind in range(1, len(self.dense_layer_size) - 1): curLayer = Dense(out_channels=self.dense_layer_size[myind], activation_fn=tf.nn.relu, in_layers=[curLayer]) curLayer = Dropout(self.dropout, in_layers=[curLayer]) classification = Dense(out_channels=self.n_tasks, activation_fn=None, in_layers=[curLayer]) sigmoid = MySigmoid(in_layers=[classification]) self.add_output(sigmoid) self.label = Label(shape=(None, self.n_tasks)) all_cost = MySigmoidCrossEntropy( in_layers=[self.label, classification]) self.weights = Weights(shape=(None, self.n_tasks)) loss = WeightedError(in_layers=[all_cost, self.weights]) self.set_loss(loss) self.mydense = dense self.myreadout = readout self.myclassification = classification self.mysigmoid = sigmoid self.myall_cost = all_cost self.myloss = loss
def build_graph(self): self.vertex_features = Feature(shape=(None, self.max_atoms, 75)) self.adj_matrix = Feature(shape=(None, self.max_atoms, 1, self.max_atoms)) self.mask = Feature(shape=(None, self.max_atoms, 1)) gcnn1 = BatchNorm( GraphCNN( num_filters=64, in_layers=[self.vertex_features, self.adj_matrix, self.mask])) gcnn1 = Dropout(self.dropout, in_layers=gcnn1) gcnn2 = BatchNorm( GraphCNN(num_filters=64, in_layers=[gcnn1, self.adj_matrix, self.mask])) gcnn2 = Dropout(self.dropout, in_layers=gcnn2) gc_pool, adj_matrix = GraphCNNPool( num_vertices=32, in_layers=[gcnn2, self.adj_matrix, self.mask]) gc_pool = BatchNorm(gc_pool) gc_pool = Dropout(self.dropout, in_layers=gc_pool) gcnn3 = BatchNorm(GraphCNN(num_filters=32, in_layers=[gc_pool, adj_matrix])) gcnn3 = Dropout(self.dropout, in_layers=gcnn3) gc_pool2, adj_matrix2 = GraphCNNPool( num_vertices=8, in_layers=[gcnn3, adj_matrix]) gc_pool2 = BatchNorm(gc_pool2) gc_pool2 = Dropout(self.dropout, in_layers=gc_pool2) flattened = Flatten(in_layers=gc_pool2) readout = Dense( out_channels=256, activation_fn=tf.nn.relu, in_layers=flattened) costs = [] self.my_labels = [] for task in range(self.n_tasks): if self.mode == 'classification': classification = Dense( out_channels=2, activation_fn=None, in_layers=[readout]) softmax = SoftMax(in_layers=[classification]) self.add_output(softmax) label = Label(shape=(None, 2)) self.my_labels.append(label) cost = SoftMaxCrossEntropy(in_layers=[label, classification]) costs.append(cost) if self.mode == 'regression': regression = Dense( out_channels=1, activation_fn=None, in_layers=[readout]) self.add_output(regression) label = Label(shape=(None, 1)) self.my_labels.append(label) cost = L2Loss(in_layers=[label, regression]) costs.append(cost) if self.mode == "classification": entropy = Stack(in_layers=costs, axis=-1) elif self.mode == "regression": entropy = Stack(in_layers=costs, axis=1) self.my_task_weights = Weights(shape=(None, self.n_tasks)) loss = WeightedError(in_layers=[entropy, self.my_task_weights]) self.set_loss(loss)
def build_graph(self): """Building graph structures: Features => DTNNEmbedding => DTNNStep => DTNNStep => DTNNGather => Regression """ self.atom_number = Feature(shape=(None, ), dtype=tf.int32) self.distance = Feature(shape=(None, self.n_distance)) self.atom_membership = Feature(shape=(None, ), dtype=tf.int32) self.distance_membership_i = Feature(shape=(None, ), dtype=tf.int32) self.distance_membership_j = Feature(shape=(None, ), dtype=tf.int32) dtnn_embedding = DTNNEmbedding(n_embedding=self.n_embedding, in_layers=[self.atom_number]) if self.dropout > 0.0: dtnn_embedding = Dropout(self.dropout, in_layers=dtnn_embedding) dtnn_layer1 = DTNNStep(n_embedding=self.n_embedding, n_distance=self.n_distance, in_layers=[ dtnn_embedding, self.distance, self.distance_membership_i, self.distance_membership_j ]) if self.dropout > 0.0: dtnn_layer1 = Dropout(self.dropout, in_layers=dtnn_layer1) dtnn_layer2 = DTNNStep(n_embedding=self.n_embedding, n_distance=self.n_distance, in_layers=[ dtnn_layer1, self.distance, self.distance_membership_i, self.distance_membership_j ]) if self.dropout > 0.0: dtnn_layer2 = Dropout(self.dropout, in_layers=dtnn_layer2) dtnn_gather = DTNNGather(n_embedding=self.n_embedding, layer_sizes=[self.n_hidden], n_outputs=self.n_tasks, output_activation=self.output_activation, in_layers=[dtnn_layer2, self.atom_membership]) if self.dropout > 0.0: dtnn_gather = Dropout(self.dropout, in_layers=dtnn_gather) n_tasks = self.n_tasks weights = Weights(shape=(None, n_tasks)) labels = Label(shape=(None, n_tasks)) output = Reshape( shape=(None, n_tasks), in_layers=[Dense(in_layers=dtnn_gather, out_channels=n_tasks)]) self.add_output(output) weighted_loss = ReduceSum(L2Loss(in_layers=[labels, output, weights])) self.set_loss(weighted_loss)
def _build_graph(self): self.one_hot_seq = Feature(shape=(None, self.pad_length, self.num_amino_acids), dtype=tf.float32) conv1 = Conv1D(kernel_size=2, filters=512, in_layers=[self.one_hot_seq]) maxpool1 = MaxPool1D(strides=2, padding="VALID", in_layers=[conv1]) conv2 = Conv1D(kernel_size=3, filters=512, in_layers=[maxpool1]) flattened = Flatten(in_layers=[conv2]) dense1 = Dense(out_channels=400, in_layers=[flattened], activation_fn=tf.nn.tanh) dropout = Dropout(dropout_prob=self.dropout_p, in_layers=[dense1]) output = Dense(out_channels=1, in_layers=[dropout], activation_fn=None) self.add_output(output) if self.mode == "regression": label = Label(shape=(None, 1)) loss = L2Loss(in_layers=[label, output]) else: raise NotImplementedError( "Classification support not added yet. Missing details in paper." ) weights = Weights(shape=(None, )) weighted_loss = WeightedError(in_layers=[loss, weights]) self.set_loss(weighted_loss)
def build_graph(self): self.smiles_seqs = Feature(shape=(None, self.seq_length), dtype=tf.int32) # Character embedding self.Embedding = DTNNEmbedding( n_embedding=self.n_embedding, periodic_table_length=len(self.char_dict.keys()) + 1, in_layers=[self.smiles_seqs]) self.pooled_outputs = [] self.conv_layers = [] for filter_size, num_filter in zip(self.kernel_sizes, self.num_filters): # Multiple convolutional layers with different filter widths self.conv_layers.append( Conv1D( kernel_size=filter_size, filters=num_filter, padding='valid', in_layers=[self.Embedding])) # Max-over-time pooling self.pooled_outputs.append( ReduceMax(axis=1, in_layers=[self.conv_layers[-1]])) # Concat features from all filters(one feature per filter) concat_outputs = Concat(axis=1, in_layers=self.pooled_outputs) dropout = Dropout(dropout_prob=self.dropout, in_layers=[concat_outputs]) dense = Dense( out_channels=200, activation_fn=tf.nn.relu, in_layers=[dropout]) # Highway layer from https://arxiv.org/pdf/1505.00387.pdf self.gather = Highway(in_layers=[dense]) costs = [] self.labels_fd = [] for task in range(self.n_tasks): if self.mode == "classification": classification = Dense( out_channels=2, activation_fn=None, in_layers=[self.gather]) softmax = SoftMax(in_layers=[classification]) self.add_output(softmax) label = Label(shape=(None, 2)) self.labels_fd.append(label) cost = SoftMaxCrossEntropy(in_layers=[label, classification]) costs.append(cost) if self.mode == "regression": regression = Dense( out_channels=1, activation_fn=None, in_layers=[self.gather]) self.add_output(regression) label = Label(shape=(None, 1)) self.labels_fd.append(label) cost = L2Loss(in_layers=[label, regression]) costs.append(cost) if self.mode == "classification": all_cost = Stack(in_layers=costs, axis=1) elif self.mode == "regression": all_cost = Stack(in_layers=costs, axis=1) self.weights = Weights(shape=(None, self.n_tasks)) loss = WeightedError(in_layers=[all_cost, self.weights]) self.set_loss(loss)
def _build_graph(self): self.smiles_seqs = Feature(shape=(None, self.seq_length), dtype=tf.int32) # Character embedding Embedding = DTNNEmbedding( n_embedding=self.n_embedding, periodic_table_length=len(self.char_dict.keys()) + 1, in_layers=[self.smiles_seqs]) pooled_outputs = [] conv_layers = [] for filter_size, num_filter in zip(self.kernel_sizes, self.num_filters): # Multiple convolutional layers with different filter widths conv_layers.append( Conv1D(kernel_size=filter_size, filters=num_filter, padding='valid', in_layers=[Embedding])) # Max-over-time pooling pooled_outputs.append( ReduceMax(axis=1, in_layers=[conv_layers[-1]])) # Concat features from all filters(one feature per filter) concat_outputs = Concat(axis=1, in_layers=pooled_outputs) dropout = Dropout(dropout_prob=self.dropout, in_layers=[concat_outputs]) dense = Dense(out_channels=200, activation_fn=tf.nn.relu, in_layers=[dropout]) # Highway layer from https://arxiv.org/pdf/1505.00387.pdf gather = Highway(in_layers=[dense]) if self.mode == "classification": logits = Dense(out_channels=self.n_tasks * 2, activation_fn=None, in_layers=[gather]) logits = Reshape(shape=(-1, self.n_tasks, 2), in_layers=[logits]) output = SoftMax(in_layers=[logits]) self.add_output(output) labels = Label(shape=(None, self.n_tasks, 2)) loss = SoftMaxCrossEntropy(in_layers=[labels, logits]) else: vals = Dense(out_channels=self.n_tasks * 1, activation_fn=None, in_layers=[gather]) vals = Reshape(shape=(-1, self.n_tasks, 1), in_layers=[vals]) self.add_output(vals) labels = Label(shape=(None, self.n_tasks, 1)) loss = ReduceSum(L2Loss(in_layers=[labels, vals])) weights = Weights(shape=(None, self.n_tasks)) weighted_loss = WeightedError(in_layers=[loss, weights]) self.set_loss(weighted_loss)
def __init__(self, n_tasks, n_features, layer_sizes=[1000], weight_init_stddevs=0.02, bias_init_consts=1.0, weight_decay_penalty=0.0, weight_decay_penalty_type="l2", dropouts=0.5, activation_fns=tf.nn.relu, n_classes=2, **kwargs): """Create a MultitaskClassifier. In addition to the following arguments, this class also accepts all the keyword arguments from TensorGraph. Parameters ---------- n_tasks: int number of tasks n_features: int number of features layer_sizes: list the size of each dense layer in the network. The length of this list determines the number of layers. weight_init_stddevs: list or float the standard deviation of the distribution to use for weight initialization of each layer. The length of this list should equal len(layer_sizes). Alternatively this may be a single value instead of a list, in which case the same value is used for every layer. bias_init_consts: list or loat the value to initialize the biases in each layer to. The length of this list should equal len(layer_sizes). Alternatively this may be a single value instead of a list, in which case the same value is used for every layer. weight_decay_penalty: float the magnitude of the weight decay penalty to use weight_decay_penalty_type: str the type of penalty to use for weight decay, either 'l1' or 'l2' dropouts: list or float the dropout probablity to use for each layer. The length of this list should equal len(layer_sizes). Alternatively this may be a single value instead of a list, in which case the same value is used for every layer. activation_fns: list or object the Tensorflow activation function to apply to each layer. The length of this list should equal len(layer_sizes). Alternatively this may be a single value instead of a list, in which case the same value is used for every layer. n_classes: int the number of classes """ super(MultitaskClassifier, self).__init__(**kwargs) self.n_tasks = n_tasks self.n_features = n_features self.n_classes = n_classes n_layers = len(layer_sizes) if not isinstance(weight_init_stddevs, collections.Sequence): weight_init_stddevs = [weight_init_stddevs] * n_layers if not isinstance(bias_init_consts, collections.Sequence): bias_init_consts = [bias_init_consts] * n_layers if not isinstance(dropouts, collections.Sequence): dropouts = [dropouts] * n_layers if not isinstance(activation_fns, collections.Sequence): activation_fns = [activation_fns] * n_layers # Add the input features. mol_features = Feature(shape=(None, n_features)) prev_layer = mol_features # Add the dense layers for size, weight_stddev, bias_const, dropout, activation_fn in zip( layer_sizes, weight_init_stddevs, bias_init_consts, dropouts, activation_fns): layer = Dense(in_layers=[prev_layer], out_channels=size, activation_fn=activation_fn, weights_initializer=TFWrapper( tf.truncated_normal_initializer, stddev=weight_stddev), biases_initializer=TFWrapper(tf.constant_initializer, value=bias_const)) if dropout > 0.0: layer = Dropout(dropout, in_layers=[layer]) prev_layer = layer # Compute the loss function for each label. self.neural_fingerprint = prev_layer logits = Reshape(shape=(-1, n_tasks, n_classes), in_layers=[ Dense(in_layers=[prev_layer], out_channels=n_tasks * n_classes) ]) output = SoftMax(logits) self.add_output(output) labels = Label(shape=(None, n_tasks, n_classes)) weights = Weights(shape=(None, n_tasks)) loss = SoftMaxCrossEntropy(in_layers=[labels, logits]) weighted_loss = WeightedError(in_layers=[loss, weights]) if weight_decay_penalty != 0.0: weighted_loss = WeightDecay(weight_decay_penalty, weight_decay_penalty_type, in_layers=[weighted_loss]) self.set_loss(weighted_loss)
def graph_conv_net(batch_size, prior, num_task): """ Build a tensorgraph for multilabel classification task Return: features and labels layers """ tg = TensorGraph(use_queue=False) if prior == True: add_on = num_task else: add_on = 0 atom_features = Feature(shape=(None, 75 + 2 * add_on)) circular_features = Feature(shape=(batch_size, 256), dtype=tf.float32) degree_slice = Feature(shape=(None, 2), dtype=tf.int32) membership = Feature(shape=(None, ), dtype=tf.int32) deg_adjs = [] for i in range(0, 10 + 1): deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32) deg_adjs.append(deg_adj) gc1 = GraphConv(64 + add_on, activation_fn=tf.nn.elu, in_layers=[atom_features, degree_slice, membership] + deg_adjs) batch_norm1 = BatchNorm(in_layers=[gc1]) gp1 = GraphPool(in_layers=[batch_norm1, degree_slice, membership] + deg_adjs) gc2 = GraphConv(64 + add_on, activation_fn=tf.nn.elu, in_layers=[gc1, degree_slice, membership] + deg_adjs) batch_norm2 = BatchNorm(in_layers=[gc2]) gp2 = GraphPool(in_layers=[batch_norm2, degree_slice, membership] + deg_adjs) add = Concat(in_layers=[gp1, gp2]) add = Dropout(0.5, in_layers=[add]) dense = Dense(out_channels=128, activation_fn=tf.nn.elu, in_layers=[add]) batch_norm3 = BatchNorm(in_layers=[dense]) readout = GraphGather(batch_size=batch_size, activation_fn=tf.nn.tanh, in_layers=[batch_norm3, degree_slice, membership] + deg_adjs) batch_norm4 = BatchNorm(in_layers=[readout]) dense1 = Dense(out_channels=128, activation_fn=tf.nn.elu, in_layers=[circular_features]) dense1 = BatchNorm(in_layers=[dense1]) dense1 = Dropout(0.5, in_layers=[dense1]) dense1 = Dense(out_channels=128, activation_fn=tf.nn.elu, in_layers=[circular_features]) dense1 = BatchNorm(in_layers=[dense1]) dense1 = Dropout(0.5, in_layers=[dense1]) merge_feat = Concat(in_layers=[dense1, batch_norm4]) merge = Dense(out_channels=256, activation_fn=tf.nn.elu, in_layers=[merge_feat]) costs = [] labels = [] for task in range(num_task): classification = Dense(out_channels=2, activation_fn=None, in_layers=[merge]) softmax = SoftMax(in_layers=[classification]) tg.add_output(softmax) label = Label(shape=(None, 2)) labels.append(label) cost = SoftMaxCrossEntropy(in_layers=[label, classification]) costs.append(cost) all_cost = Stack(in_layers=costs, axis=1) weights = Weights(shape=(None, num_task)) loss = WeightedError(in_layers=[all_cost, weights]) tg.set_loss(loss) #if prior == True: # return tg, atom_features,circular_features, degree_slice, membership, deg_adjs, labels, weights#, prior_layer return tg, atom_features, circular_features, degree_slice, membership, deg_adjs, labels, weights
def build_graph(self): """ Building graph structures: """ self.atom_features = Feature(shape=(None, 75)) self.degree_slice = Feature(shape=(None, 2), dtype=tf.int32) self.membership = Feature(shape=(None,), dtype=tf.int32) self.deg_adjs = [] for i in range(0, 10 + 1): deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32) self.deg_adjs.append(deg_adj) gc1 = GraphConv( 64, activation_fn=tf.nn.relu, in_layers=[self.atom_features, self.degree_slice, self.membership] + self.deg_adjs) batch_norm1 = BatchNorm(in_layers=[gc1]) gp1 = GraphPool(in_layers=[batch_norm1, self.degree_slice, self.membership] + self.deg_adjs) gc2 = GraphConv( 64, activation_fn=tf.nn.relu, in_layers=[gp1, self.degree_slice, self.membership] + self.deg_adjs) batch_norm2 = BatchNorm(in_layers=[gc2]) gp2 = GraphPool(in_layers=[batch_norm2, self.degree_slice, self.membership] + self.deg_adjs) dense = Dense(out_channels=128, activation_fn=tf.nn.relu, in_layers=[gp2]) batch_norm3 = BatchNorm(in_layers=[dense]) readout = GraphGather( batch_size=self.batch_size, activation_fn=tf.nn.tanh, in_layers=[batch_norm3, self.degree_slice, self.membership] + self.deg_adjs) if self.error_bars == True: readout = Dropout(in_layers=[readout], dropout_prob=0.2) costs = [] self.my_labels = [] for task in range(self.n_tasks): if self.mode == 'classification': classification = Dense( out_channels=2, activation_fn=None, in_layers=[readout]) softmax = SoftMax(in_layers=[classification]) self.add_output(softmax) label = Label(shape=(None, 2)) self.my_labels.append(label) cost = SoftMaxCrossEntropy(in_layers=[label, classification]) costs.append(cost) if self.mode == 'regression': regression = Dense( out_channels=1, activation_fn=None, in_layers=[readout]) self.add_output(regression) label = Label(shape=(None, 1)) self.my_labels.append(label) cost = L2Loss(in_layers=[label, regression]) costs.append(cost) if self.mode == "classification": entropy = Concat(in_layers=costs, axis=-1) elif self.mode == "regression": entropy = Stack(in_layers=costs, axis=1) self.my_task_weights = Weights(shape=(None, self.n_tasks)) loss = WeightedError(in_layers=[entropy, self.my_task_weights]) self.set_loss(loss)
def build_graph(self): """ Building graph structures: """ self.atom_features = Feature(shape=(None, self.number_atom_features)) self.degree_slice = Feature(shape=(None, 2), dtype=tf.int32) self.membership = Feature(shape=(None, ), dtype=tf.int32) self.deg_adjs = [] for i in range(0, 10 + 1): deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32) self.deg_adjs.append(deg_adj) in_layer = self.atom_features for layer_size, dropout in zip(self.graph_conv_layers, self.dropout): gc1_in = [in_layer, self.degree_slice, self.membership ] + self.deg_adjs gc1 = GraphConv(layer_size, activation_fn=tf.nn.relu, in_layers=gc1_in) batch_norm1 = BatchNorm(in_layers=[gc1]) if dropout > 0.0: batch_norm1 = Dropout(dropout, in_layers=batch_norm1) gp_in = [batch_norm1, self.degree_slice, self.membership ] + self.deg_adjs in_layer = GraphPool(in_layers=gp_in) dense = Dense(out_channels=self.dense_layer_size, activation_fn=tf.nn.relu, in_layers=[in_layer]) batch_norm3 = BatchNorm(in_layers=[dense]) if self.dropout[-1] > 0.0: batch_norm3 = Dropout(self.dropout[-1], in_layers=batch_norm3) readout = GraphGather( batch_size=self.batch_size, activation_fn=tf.nn.tanh, in_layers=[batch_norm3, self.degree_slice, self.membership] + self.deg_adjs) n_tasks = self.n_tasks weights = Weights(shape=(None, n_tasks)) if self.mode == 'classification': n_classes = self.n_classes labels = Label(shape=(None, n_tasks, n_classes)) logits = Reshape(shape=(None, n_tasks, n_classes), in_layers=[ Dense(in_layers=readout, out_channels=n_tasks * n_classes) ]) logits = TrimGraphOutput([logits, weights]) output = SoftMax(logits) self.add_output(output) loss = SoftMaxCrossEntropy(in_layers=[labels, logits]) weighted_loss = WeightedError(in_layers=[loss, weights]) self.set_loss(weighted_loss) else: labels = Label(shape=(None, n_tasks)) output = Reshape( shape=(None, n_tasks), in_layers=[Dense(in_layers=readout, out_channels=n_tasks)]) output = TrimGraphOutput([output, weights]) self.add_output(output) if self.uncertainty: log_var = Reshape( shape=(None, n_tasks), in_layers=[Dense(in_layers=readout, out_channels=n_tasks)]) log_var = TrimGraphOutput([log_var, weights]) var = Exp(log_var) self.add_variance(var) diff = labels - output weighted_loss = weights * (diff * diff / var + log_var) weighted_loss = ReduceSum(ReduceMean(weighted_loss, axis=[1])) else: weighted_loss = ReduceSum( L2Loss(in_layers=[labels, output, weights])) self.set_loss(weighted_loss)
def __init__(self, n_tasks, n_features, layer_sizes=[1000], weight_init_stddevs=[0.02], bias_init_consts=[1.0], weight_decay_penalty=0.0, weight_decay_penalty_type="l2", dropouts=[0.5], n_classes=2, **kwargs): """Create a TensorGraphMultiTaskClassifier. In addition to the following arguments, this class also accepts all the keywork arguments from TensorGraph. Parameters ---------- n_tasks: int number of tasks n_features: int number of features layer_sizes: list the size of each dense layer in the network. The length of this list determines the number of layers. weight_init_stddevs: list the standard deviation of the distribution to use for weight initialization of each layer. The length of this list should equal len(layer_sizes). bias_init_consts: list the value to initialize the biases in each layer to. The length of this list should equal len(layer_sizes). weight_decay_penalty: float the magnitude of the weight decay penalty to use weight_decay_penalty_type: str the type of penalty to use for weight decay, either 'l1' or 'l2' dropouts: list the dropout probablity to use for each layer. The length of this list should equal len(layer_sizes). n_classes: int the number of classes """ super(TensorGraphMultiTaskClassifier, self).__init__(mode='classification', **kwargs) self.n_tasks = n_tasks self.n_features = n_features self.n_classes = n_classes # Add the input features. mol_features = Feature(shape=(None, n_features)) prev_layer = mol_features # Add the dense layers for size, weight_stddev, bias_const, dropout in zip( layer_sizes, weight_init_stddevs, bias_init_consts, dropouts): layer = Dense(in_layers=[prev_layer], out_channels=size, activation_fn=tf.nn.relu, weights_initializer=TFWrapper( tf.truncated_normal_initializer, stddev=weight_stddev), biases_initializer=TFWrapper(tf.constant_initializer, value=bias_const)) if dropout > 0.0: layer = Dropout(dropout, in_layers=[layer]) prev_layer = layer # Compute the loss function for each label. output = Reshape(shape=(-1, n_tasks, n_classes), in_layers=[ Dense(in_layers=[prev_layer], out_channels=n_tasks * n_classes) ]) self.add_output(output) labels = Label(shape=(None, n_tasks, n_classes)) weights = Weights(shape=(None, n_tasks)) loss = SoftMaxCrossEntropy(in_layers=[labels, output]) weighted_loss = WeightedError(in_layers=[loss, weights]) if weight_decay_penalty != 0.0: weighted_loss = WeightDecay(weight_decay_penalty, weight_decay_penalty_type, in_layers=[weighted_loss]) self.set_loss(weighted_loss)
def build_graph(self): self.atom_features = Feature(shape=(None, self.n_atom_feat)) self.pair_features = Feature(shape=(None, self.n_pair_feat)) self.pair_split = Feature(shape=(None, ), dtype=tf.int32) self.atom_split = Feature(shape=(None, ), dtype=tf.int32) self.atom_to_pair = Feature(shape=(None, 2), dtype=tf.int32) weave_layer1A, weave_layer1P = WeaveLayerFactory( n_atom_input_feat=self.n_atom_feat, n_pair_input_feat=self.n_pair_feat, n_atom_output_feat=self.n_hidden[0], n_pair_output_feat=self.n_hidden[0], in_layers=[ self.atom_features, self.pair_features, self.pair_split, self.atom_to_pair ]) for myind in range(1, len(self.n_hidden) - 1): weave_layer1A, weave_layer1P = WeaveLayerFactory( n_atom_input_feat=self.n_hidden[myind - 1], n_pair_input_feat=self.n_hidden[myind - 1], n_atom_output_feat=self.n_hidden[myind], n_pair_output_feat=self.n_hidden[myind], update_pair=True, in_layers=[ weave_layer1A, weave_layer1P, self.pair_split, self.atom_to_pair ]) if len(self.n_hidden) > 1.5: myind = len(self.n_hidden) - 1 weave_layer1A, weave_layer1P = WeaveLayerFactory( n_atom_input_feat=self.n_hidden[myind - 1], n_pair_input_feat=self.n_hidden[myind - 1], n_atom_output_feat=self.n_hidden[myind], n_pair_output_feat=self.n_hidden[myind], update_pair=False, in_layers=[ weave_layer1A, weave_layer1P, self.pair_split, self.atom_to_pair ]) dense1 = Dense(out_channels=self.n_graph_feat[0], activation_fn=tf.nn.tanh, in_layers=weave_layer1A) #batch_norm1 = BatchNormalization(epsilon=1e-5, mode=1, in_layers=[dense1]) batch_norm1 = MyBatchNorm(in_layers=[dense1]) weave_gather = WeaveGather(self.batch_size, n_input=self.n_graph_feat[0], gaussian_expand=False, in_layers=[batch_norm1, self.atom_split]) weave_gatherBatchNorm2 = MyBatchNorm(in_layers=[weave_gather]) curLayer = weave_gatherBatchNorm2 for myind in range(1, len(self.n_graph_feat) - 1): curLayer = Dense(out_channels=self.n_graph_feat[myind], activation_fn=tf.nn.relu, in_layers=[curLayer]) curLayer = Dropout(self.dropout, in_layers=[curLayer]) classification = Dense(out_channels=self.n_tasks, activation_fn=None, in_layers=[curLayer]) sigmoid = MySigmoid(in_layers=[classification]) self.add_output(sigmoid) self.label = Label(shape=(None, self.n_tasks)) all_cost = MySigmoidCrossEntropy( in_layers=[self.label, classification]) self.weights = Weights(shape=(None, self.n_tasks)) loss = WeightedError(in_layers=[all_cost, self.weights]) self.set_loss(loss) self.mydense1 = dense1 self.mybatch_norm1 = batch_norm1 self.myweave_gather = weave_gather self.myclassification = classification self.mysigmoid = sigmoid self.myall_cost = all_cost self.myloss = loss
deg_adjs.append(deg_adj) label15 = [] for ts in range(ntask): label_t = Label(shape=(None, 2)) label15.append(label_t) ## Setup Graph Convolution Network tg = TensorGraph(use_queue=False, learning_rate=0.001, model_dir='ckpt') gc1 = GraphConv(64, activation_fn=tf.nn.relu, in_layers=[atom_features, degree_slice, membership] + deg_adjs) bn1 = BatchNorm(in_layers=[gc1]) gp1 = GraphPool(in_layers=[bn1, degree_slice, membership] + deg_adjs) dp1 = Dropout(0.2, in_layers=gp1) gc2 = GraphConv(64, activation_fn=tf.nn.relu, in_layers=[dp1, degree_slice, membership] + deg_adjs) bn2 = BatchNorm(in_layers=[gc2]) gp2 = GraphPool(in_layers=[bn2, degree_slice, membership] + deg_adjs) dp2 = Dropout(0.5, in_layers=gp2) gc3 = GraphConv(64, activation_fn=tf.nn.relu, in_layers=[dp2, degree_slice, membership] + deg_adjs) bn3 = BatchNorm(in_layers=[gc3]) gp3 = GraphPool(in_layers=[b3, degree_slice, membership] + deg_adjs) dp3 = Dropout(0.5, in_layers=gp3)
def __init__(self, n_tasks, n_features, alpha_init_stddevs=0.02, layer_sizes=[1000], weight_init_stddevs=0.02, bias_init_consts=1.0, weight_decay_penalty=0.0, weight_decay_penalty_type="l2", dropouts=0.5, activation_fns=tf.nn.relu, **kwargs): """Creates a progressive network. Only listing parameters specific to progressive networks here. Parameters ---------- n_tasks: int Number of tasks n_features: int Number of input features alpha_init_stddevs: list List of standard-deviations for alpha in adapter layers. layer_sizes: list the size of each dense layer in the network. The length of this list determines the number of layers. weight_init_stddevs: list or float the standard deviation of the distribution to use for weight initialization of each layer. The length of this list should equal len(layer_sizes)+1. The final element corresponds to the output layer. Alternatively this may be a single value instead of a list, in which case the same value is used for every layer. bias_init_consts: list or float the value to initialize the biases in each layer to. The length of this list should equal len(layer_sizes)+1. The final element corresponds to the output layer. Alternatively this may be a single value instead of a list, in which case the same value is used for every layer. weight_decay_penalty: float the magnitude of the weight decay penalty to use weight_decay_penalty_type: str the type of penalty to use for weight decay, either 'l1' or 'l2' dropouts: list or float the dropout probablity to use for each layer. The length of this list should equal len(layer_sizes). Alternatively this may be a single value instead of a list, in which case the same value is used for every layer. activation_fns: list or object the Tensorflow activation function to apply to each layer. The length of this list should equal len(layer_sizes). Alternatively this may be a single value instead of a list, in which case the same value is used for every layer. """ super(ProgressiveMultitaskRegressor, self).__init__(**kwargs) self.n_tasks = n_tasks self.n_features = n_features self.layer_sizes = layer_sizes self.alpha_init_stddevs = alpha_init_stddevs self.weight_init_stddevs = weight_init_stddevs self.bias_init_consts = bias_init_consts self.dropouts = dropouts self.activation_fns = activation_fns n_layers = len(layer_sizes) if not isinstance(weight_init_stddevs, collections.Sequence): self.weight_init_stddevs = [weight_init_stddevs] * n_layers if not isinstance(alpha_init_stddevs, collections.Sequence): self.alpha_init_stddevs = [alpha_init_stddevs] * n_layers if not isinstance(bias_init_consts, collections.Sequence): self.bias_init_consts = [bias_init_consts] * n_layers if not isinstance(dropouts, collections.Sequence): self.dropouts = [dropouts] * n_layers if not isinstance(activation_fns, collections.Sequence): self.activation_fns = [activation_fns] * n_layers # Add the input features. self.mol_features = Feature(shape=(None, n_features)) all_layers = {} outputs = [] for task in range(self.n_tasks): task_layers = [] for i in range(n_layers): if i == 0: prev_layer = self.mol_features else: prev_layer = all_layers[(i - 1, task)] if task > 0: lateral_contrib, trainables = self.add_adapter( all_layers, task, i) task_layers.extend(trainables) layer = Dense(in_layers=[prev_layer], out_channels=layer_sizes[i], activation_fn=None, weights_initializer=TFWrapper( tf.truncated_normal_initializer, stddev=self.weight_init_stddevs[i]), biases_initializer=TFWrapper( tf.constant_initializer, value=self.bias_init_consts[i])) task_layers.append(layer) if i > 0 and task > 0: layer = layer + lateral_contrib assert self.activation_fns[ i] is tf.nn.relu, "Only ReLU is supported" layer = ReLU(in_layers=[layer]) if self.dropouts[i] > 0.0: layer = Dropout(self.dropouts[i], in_layers=[layer]) all_layers[(i, task)] = layer prev_layer = all_layers[(n_layers - 1, task)] layer = Dense(in_layers=[prev_layer], out_channels=1, weights_initializer=TFWrapper( tf.truncated_normal_initializer, stddev=self.weight_init_stddevs[-1]), biases_initializer=TFWrapper( tf.constant_initializer, value=self.bias_init_consts[-1])) task_layers.append(layer) if task > 0: lateral_contrib, trainables = self.add_adapter( all_layers, task, n_layers) task_layers.extend(trainables) layer = layer + lateral_contrib outputs.append(layer) self.add_output(layer) task_label = Label(shape=(None, 1)) task_weight = Weights(shape=(None, 1)) weighted_loss = ReduceSum( L2Loss(in_layers=[task_label, layer, task_weight])) self.create_submodel(layers=task_layers, loss=weighted_loss, optimizer=None) # Weight decay not activated """
def __init__(self, n_tasks, n_features, layer_sizes=[1000], weight_init_stddevs=0.02, bias_init_consts=1.0, weight_decay_penalty=0.0, weight_decay_penalty_type="l2", dropouts=0.5, activation_fns=tf.nn.relu, bypass_layer_sizes=[100], bypass_weight_init_stddevs=[.02], bypass_bias_init_consts=[1.], bypass_dropouts=[.5], **kwargs): """ Create a RobustMultitaskRegressor. Parameters ---------- n_tasks: int number of tasks n_features: int number of features layer_sizes: list the size of each dense layer in the network. The length of this list determines the number of layers. weight_init_stddevs: list or float the standard deviation of the distribution to use for weight initialization of each layer. The length of this list should equal len(layer_sizes). Alternatively this may be a single value instead of a list, in which case the same value is used for every layer. bias_init_consts: list or loat the value to initialize the biases in each layer to. The length of this list should equal len(layer_sizes). Alternatively this may be a single value instead of a list, in which case the same value is used for every layer. weight_decay_penalty: float the magnitude of the weight decay penalty to use weight_decay_penalty_type: str the type of penalty to use for weight decay, either 'l1' or 'l2' dropouts: list or float the dropout probablity to use for each layer. The length of this list should equal len(layer_sizes). Alternatively this may be a single value instead of a list, in which case the same value is used for every layer. activation_fns: list or object the Tensorflow activation function to apply to each layer. The length of this list should equal len(layer_sizes). Alternatively this may be a single value instead of a list, in which case the same value is used for every layer. bypass_layer_sizes: list the size of each dense layer in the bypass network. The length of this list determines the number of bypass layers. bypass_weight_init_stddevs: list or float the standard deviation of the distribution to use for weight initialization of bypass layers. same requirements as weight_init_stddevs bypass_bias_init_consts: list or float the value to initialize the biases in bypass layers same requirements as bias_init_consts bypass_dropouts: list or float the dropout probablity to use for bypass layers. same requirements as dropouts """ super(RobustMultitaskRegressor, self).__init__(**kwargs) self.n_tasks = n_tasks self.n_features = n_features n_layers = len(layer_sizes) if not isinstance(weight_init_stddevs, collections.Sequence): weight_init_stddevs = [weight_init_stddevs] * n_layers if not isinstance(bias_init_consts, collections.Sequence): bias_init_consts = [bias_init_consts] * n_layers if not isinstance(dropouts, collections.Sequence): dropouts = [dropouts] * n_layers if not isinstance(activation_fns, collections.Sequence): activation_fns = [activation_fns] * n_layers n_bypass_layers = len(bypass_layer_sizes) if not isinstance(bypass_weight_init_stddevs, collections.Sequence): bypass_weight_init_stddevs = [bypass_weight_init_stddevs ] * n_bypass_layers if not isinstance(bypass_bias_init_consts, collections.Sequence): bypass_bias_init_consts = [bypass_bias_init_consts ] * n_bypass_layers if not isinstance(bypass_dropouts, collections.Sequence): bypass_dropouts = [bypass_dropouts] * n_bypass_layers bypass_activation_fns = [activation_fns[0]] * n_bypass_layers # Add the input features. mol_features = Feature(shape=(None, n_features)) prev_layer = mol_features # Add the shared dense layers for size, weight_stddev, bias_const, dropout, activation_fn in zip( layer_sizes, weight_init_stddevs, bias_init_consts, dropouts, activation_fns): layer = Dense(in_layers=[prev_layer], out_channels=size, activation_fn=activation_fn, weights_initializer=TFWrapper( tf.truncated_normal_initializer, stddev=weight_stddev), biases_initializer=TFWrapper(tf.constant_initializer, value=bias_const)) if dropout > 0.0: layer = Dropout(dropout, in_layers=[layer]) prev_layer = layer top_multitask_layer = prev_layer task_outputs = [] for i in range(self.n_tasks): prev_layer = mol_features # Add task-specific bypass layers for size, weight_stddev, bias_const, dropout, activation_fn in zip( bypass_layer_sizes, bypass_weight_init_stddevs, bypass_bias_init_consts, bypass_dropouts, bypass_activation_fns): layer = Dense(in_layers=[prev_layer], out_channels=size, activation_fn=activation_fn, weights_initializer=TFWrapper( tf.truncated_normal_initializer, stddev=weight_stddev), biases_initializer=TFWrapper( tf.constant_initializer, value=bias_const)) if dropout > 0.0: layer = Dropout(dropout, in_layers=[layer]) prev_layer = layer top_bypass_layer = prev_layer if n_bypass_layers > 0: task_layer = Concat( axis=1, in_layers=[top_multitask_layer, top_bypass_layer]) else: task_layer = top_multitask_layer task_out = Dense(in_layers=[task_layer], out_channels=1) task_outputs.append(task_out) output = Concat(axis=1, in_layers=task_outputs) self.add_output(output) labels = Label(shape=(None, n_tasks)) weights = Weights(shape=(None, n_tasks)) weighted_loss = ReduceSum(L2Loss(in_layers=[labels, output, weights])) if weight_decay_penalty != 0.0: weighted_loss = WeightDecay(weight_decay_penalty, weight_decay_penalty_type, in_layers=[weighted_loss]) self.set_loss(weighted_loss)
def __init__(self, n_tasks, n_features, layer_sizes=[1000], weight_init_stddevs=0.02, bias_init_consts=1.0, weight_decay_penalty=0.0, weight_decay_penalty_type="l2", dropouts=0.5, activation_fns=tf.nn.relu, uncertainty=False, **kwargs): """Create a MultitaskRegressor. In addition to the following arguments, this class also accepts all the keywork arguments from TensorGraph. Parameters ---------- n_tasks: int number of tasks n_features: int number of features layer_sizes: list the size of each dense layer in the network. The length of this list determines the number of layers. weight_init_stddevs: list or float the standard deviation of the distribution to use for weight initialization of each layer. The length of this list should equal len(layer_sizes)+1. The final element corresponds to the output layer. Alternatively this may be a single value instead of a list, in which case the same value is used for every layer. bias_init_consts: list or float the value to initialize the biases in each layer to. The length of this list should equal len(layer_sizes)+1. The final element corresponds to the output layer. Alternatively this may be a single value instead of a list, in which case the same value is used for every layer. weight_decay_penalty: float the magnitude of the weight decay penalty to use weight_decay_penalty_type: str the type of penalty to use for weight decay, either 'l1' or 'l2' dropouts: list or float the dropout probablity to use for each layer. The length of this list should equal len(layer_sizes). Alternatively this may be a single value instead of a list, in which case the same value is used for every layer. activation_fns: list or object the Tensorflow activation function to apply to each layer. The length of this list should equal len(layer_sizes). Alternatively this may be a single value instead of a list, in which case the same value is used for every layer. uncertainty: bool if True, include extra outputs and loss terms to enable the uncertainty in outputs to be predicted """ super(MultitaskRegressor, self).__init__(**kwargs) self.n_tasks = n_tasks self.n_features = n_features n_layers = len(layer_sizes) if not isinstance(weight_init_stddevs, collections.Sequence): weight_init_stddevs = [weight_init_stddevs] * (n_layers + 1) if not isinstance(bias_init_consts, collections.Sequence): bias_init_consts = [bias_init_consts] * (n_layers + 1) if not isinstance(dropouts, collections.Sequence): dropouts = [dropouts] * n_layers if not isinstance(activation_fns, collections.Sequence): activation_fns = [activation_fns] * n_layers if uncertainty: if any(d == 0.0 for d in dropouts): raise ValueError( 'Dropout must be included in every layer to predict uncertainty' ) # Add the input features. mol_features = Feature(shape=(None, n_features)) prev_layer = mol_features # Add the dense layers for size, weight_stddev, bias_const, dropout, activation_fn in zip( layer_sizes, weight_init_stddevs, bias_init_consts, dropouts, activation_fns): layer = Dense(in_layers=[prev_layer], out_channels=size, activation_fn=activation_fn, weights_initializer=TFWrapper( tf.truncated_normal_initializer, stddev=weight_stddev), biases_initializer=TFWrapper(tf.constant_initializer, value=bias_const)) if dropout > 0.0: layer = Dropout(dropout, in_layers=[layer]) prev_layer = layer self.neural_fingerprint = prev_layer # Compute the loss function for each label. output = Reshape(shape=(-1, n_tasks, 1), in_layers=[ Dense(in_layers=[prev_layer], out_channels=n_tasks, weights_initializer=TFWrapper( tf.truncated_normal_initializer, stddev=weight_init_stddevs[-1]), biases_initializer=TFWrapper( tf.constant_initializer, value=bias_init_consts[-1])) ]) self.add_output(output) labels = Label(shape=(None, n_tasks, 1)) weights = Weights(shape=(None, n_tasks, 1)) if uncertainty: log_var = Reshape( shape=(-1, n_tasks, 1), in_layers=[ Dense(in_layers=[prev_layer], out_channels=n_tasks, weights_initializer=TFWrapper( tf.truncated_normal_initializer, stddev=weight_init_stddevs[-1]), biases_initializer=TFWrapper(tf.constant_initializer, value=0.0)) ]) var = Exp(log_var) self.add_variance(var) diff = labels - output weighted_loss = weights * (diff * diff / var + log_var) weighted_loss = ReduceSum(ReduceMean(weighted_loss, axis=[1, 2])) else: weighted_loss = ReduceSum( L2Loss(in_layers=[labels, output, weights])) if weight_decay_penalty != 0.0: weighted_loss = WeightDecay(weight_decay_penalty, weight_decay_penalty_type, in_layers=[weighted_loss]) self.set_loss(weighted_loss)