def forward(self, g, n_feat, e_feat): """Predict molecule labels Parameters ---------- g : DGLGraph Input DGLGraph for molecule(s) n_feat : tensor of dtype float32 and shape (B1, D1) Node features. B1 for number of nodes and D1 for the node feature size. e_feat : tensor of dtype float32 and shape (B2, D2) Edge features. B2 for number of edges and D2 for the edge feature size. Returns ------- res : Predicted labels """ out = F.relu(self.lin0(n_feat)) # (B1, H1) h = out.unsqueeze(0) # (1, B1, H1) c = torch.zeros_like(h) for i in range(self.num_step_message_passing): m = F.relu(self.conv(g, out, e_feat)) # (B1, H1) if self.lstm_as_gate: out, (h, c) = self.lstm(m.unsqueeze(0), (h, c)) else: out, h = self.gru(m.unsqueeze(0), h) out = out.squeeze(0) return out
def forward(self, x): if self.linear_or_not: # If linear model return self.linear(x) else: # If MLP h = x for i in range(self.num_layers - 1): h = F.relu(self.batch_norms[i](self.linears[i](h))) return self.linears[-1](h)
def forward(self, x): residual = x out1 = self.bn1(x) out1 = F.relu(out1, True) out1 = self.conv1(out1) out2 = self.bn2(out1) out2 = F.relu(out2, True) out2 = self.conv2(out2) out3 = self.bn3(out2) out3 = F.relu(out3, True) out3 = self.conv3(out3) out3 = torch.cat((out1, out2, out3), 1) if self.downsample is not None: residual = self.downsample(residual) out3 += residual return out3
def forward(self, x): # 1x1 bottleneck conv h = self.conv1(F.relu(x)) # 3x3 convs h = self.conv2(self.activation(h)) h = self.conv3(self.activation(h)) # relu before downsample h = self.activation(h) # downsample if self.downsample: h = self.downsample(h) # final 1x1 conv h = self.conv4(h) return h + self.shortcut(x)
def forward(self, x): x, _ = self.conv1(x) x = F.relu(self.bn1(x), True) x = F.avg_pool2d(self.conv2(x), 2, stride=2) x = self.conv3(x) x = self.conv4(x) outputs = [] boundary_channels = [] tmp_out = None ll, boundary_channel = self._sub_layers['m0'](x, tmp_out) ll = self._sub_layers['top_m_0'](ll) ll = F.relu( self._sub_layers['bn_end0'](self._sub_layers['conv_last0'](ll)), True) # Predict heatmaps tmp_out = self._sub_layers['l0'](ll) if self.end_relu: tmp_out = F.relu(tmp_out) # HACK: Added relu outputs.append(tmp_out) boundary_channels.append(boundary_channel) return outputs, boundary_channels
def forward(self, x): if self.preactivation: # h = self.activation(x) # NOT TODAY SATAN # Andy's note: This line *must* be an out-of-place ReLU or it # will negatively affect the shortcut connection. h = F.relu(x) else: h = x h = self.conv1(h) h = self.conv2(self.activation(h)) if self.downsample: h = self.downsample(h) return h + self.shortcut(x) # dogball
def forward(self, g, h, efeat): # list of hidden representation at each layer (including input) hidden_rep = [h] for i in range(self.num_layers - 1): h = self.ginlayers[i](g, h) h = self.batch_norms[i](h) h = F.relu(h) hidden_rep.append(h) score_over_layer = 0 # perform pooling over all nodes in each graph in every layer all_outputs = [] for i, h in list(enumerate(hidden_rep)): pooled_h = self.pool(g, h) all_outputs.append(pooled_h) score_over_layer += self.drop(self.linears_prediction[i](pooled_h)) return score_over_layer, all_outputs[1:]
def loss_hinge_dis(dis_fake, dis_real): loss_real = torch.mean(F.relu(1. - dis_real)) loss_fake = torch.mean(F.relu(1. + dis_fake)) return loss_real, loss_fake
def forward(self, x): x = self.conv(x) x = self.bn(x) return F.relu(x, inplace=True)
def forward(self, h): h = self.mlp(h) h = self.bn(h) h = F.relu(h) return h