def __init__(self, max_atom_types, output_channels):
     super(GCN, self).__init__()
     self.gcn1 = GCNConv(max_atom_types, 200)
     self.gcn2 = GCNConv(200, 150)
     self.dropout1 = nn.Dropout(0.25)
     self.linear1 = exnn.Linear(100)
     self.linear2 = exnn.Linear(output_channels)
示例#2
0
 def __init__(self):
     super(Net, self).__init__()
     self.conv = nn.Sequential(exnn.Conv2d(10, kernel_size=5),
                               nn.MaxPool2d(2), nn.ReLU(),
                               exnn.Conv2d(20, kernel_size=5),
                               nn.Dropout2d(), nn.MaxPool2d(2), nn.ReLU())
     self.linear = nn.Sequential(exnn.Linear(320, 50), nn.ReLU(),
                                 nn.Dropout(), exnn.Linear(50, 10),
                                 nn.LogSoftmax(dim=1))
示例#3
0
 def __init__(self, out_channels, batch_size):
     super(Tox21ConcatFlatten, self).__init__()
     self.out_channels = out_channels
     self.batch_size = batch_size
     self.linear = exnn.Linear(self.out_channels)
     self.relu = nn.ReLU()
     self.pool = Tox21GlobalMeanPool()
示例#4
0
 def __init__(self, max_atom_types, output_channels):
     super(GCN, self).__init__()
     self.g1 = GCNConv(100, 42)
     self.g2 = GCNConv(42, 24)
     self.g3 = GCNConv(24, 16)
     self.l1 = nn.Linear(16, 10)
     self.l2 = exnn.Linear(output_channels)
示例#5
0
 def __init__(self, out_channels, batch_size, activation=True):
     super(Tox21Linear, self).__init__()
     self.batch_size = batch_size
     self.pool = Tox21GlobalMeanPool()
     self.out_channels = out_channels
     self.linear = exnn.Linear(out_channels)
     self.relu = nn.ReLU()
     self.activation = activation
示例#6
0
 def __init__(self, out_channels, activation='relu'):
     super(FlattenLinear, self).__init__()
     self.out_channels = out_channels
     self.linear = exnn.Linear(self.out_channels)
     self.flatten = exnn.Flatten()
     if activation == 'relu':
         self.activation = nn.ReLU()
     else:
         self.activation = nn.Identity()
示例#7
0
 def __init__(self, out_channels, batch_size):
     super(Tox21GCNorLinear, self).__init__()
     self.batch_size = batch_size
     self.linear = exnn.Linear(out_channels)
     self.gcn = LazyGCNConv(out_channels)
     self.relu = nn.ReLU()
示例#8
0
 def __init__(self, out_channels):
     super(Tox21ConcatLinear, self).__init__()
     self.pool = Tox21GlobalMeanPool()
     self.linear = exnn.Linear(out_channels)
示例#9
0
 def __init__(self, out_channels):
     super(ChemblLinear, self).__init__()
     self.out_channels = out_channels
     self.l = exnn.Linear(out_channels)
     self.relu = nn.ReLU()
示例#10
0
def test_linear():
    net = exnn.Linear(3)
    x = torch.randn(10, 20)
    y = net(x)
    assert list(y.shape) == [10, 3]
示例#11
0
def test_cuda_linear():
    net = exnn.Linear(3).to('cuda')
    x = torch.randn(10, 20).cuda()
    y = net(x)
    assert list(y.shape) == [10, 3]
示例#12
0
 def __init__(self, out_channels):
     super(ConcatFlatten, self).__init__()
     self.out_channels = out_channels
     self.linear = exnn.Linear(self.out_channels)
示例#13
0
 def __get_identity_or_linear_at_random(self, out_channels: int):
     return nn.Identity() if random.randrange(2) == 0 else exnn.Linear(
         out_channels)
示例#14
0
 def __init__(self, out_channels):
     super(ChemblConcatFlatten, self).__init__()
     self.out_channels = out_channels
     self.linear = exnn.Linear(self.out_channels)
     self.relu = nn.ReLU()
示例#15
0
 def __init__(self):
     super(MyLinear, self).__init__()
     self.linear = nn.Sequential(nn.Linear(20, 10), exnn.Linear(3))
示例#16
0
 def add_layer(self, v: int, module):
     previous_nodes = [f"{u}" for (_, u) in self.g_inv.edges([v])]
     out_channels = self.output_channels[v]
     node_feature = None
     if v in self.starts:
         module.add_input_node(f"{v}")
         node_feature_dict = dict(identity=1)
         node_feature = self.module_vec.get_vector(node_feature_dict)
     elif v in self.ends:
         module.add_node(f"{v}",
                         previous=previous_nodes,
                         module=FlattenLinear(out_channels))
         node_feature_dict = dict(flatten=1,
                                  linear=1,
                                  out_channels=out_channels)
         node_feature = self.module_vec.get_vector(node_feature_dict)
     elif self.is_concat_flatten_node(v):
         module.add_node(f"{v}",
                         previous=previous_nodes,
                         module=ConcatFlatten(out_channels))
         node_feature_dict = dict(flatten=1,
                                  concat=1,
                                  out_channels=out_channels)
         node_feature = self.module_vec.get_vector(node_feature_dict)
     elif self.is_flatten_node(v):
         module.add_node(f"{v}",
                         previous=previous_nodes,
                         module=FlattenLinear(out_channels))
         node_feature_dict = dict(concat=1,
                                  linear=1,
                                  out_channels=out_channels)
         node_feature = self.module_vec.get_vector(node_feature_dict)
     elif self.is_concat_conv_node(v):
         k, s = find_conv_layer(self.node_input_sizes[v],
                                self.node_output_sizes[v],
                                self.kernel_sizes, self.strides)
         module.add_node(f"{v}",
                         previous=previous_nodes,
                         module=ConcatConv(out_channels=out_channels,
                                           kernel_size=k,
                                           stride=s))
         node_feature_dict = dict(concat=1,
                                  conv2d=1,
                                  kernel=k,
                                  stride=s,
                                  out_channels=out_channels)
         node_feature = self.module_vec.get_vector(node_feature_dict)
     elif self.is_concat_node(v):
         module.add_node(f"{v}",
                         previous=previous_nodes,
                         module=Concatenate())
         node_feature_dict = dict(concat=1)
         node_feature = self.module_vec.get_vector(node_feature_dict)
     elif self.is_linear_node(v):
         module.add_node(f"{v}",
                         previous=previous_nodes,
                         module=exnn.Linear(out_channels))
         node_feature_dict = dict(linear=1, out_channels=out_channels)
         node_feature = self.module_vec.get_vector(node_feature_dict)
     elif self.node_output_sizes[v] == self.node_input_sizes[v]:
         module.add_node(f"{v}", previous=previous_nodes, module=nn.ReLU())
         node_feature_dict = dict(relu=1)
         node_feature = self.module_vec.get_vector(node_feature_dict)
     else:
         k, s = find_conv_layer(self.node_input_sizes[v],
                                self.node_output_sizes[v],
                                self.kernel_sizes, self.strides)
         module.add_node(f"{v}",
                         previous=previous_nodes,
                         module=conv2d(out_channels=out_channels,
                                       kernel_size=k,
                                       stride=s))
         node_feature_dict = dict(conv2d=1,
                                  kernel=k,
                                  stride=s,
                                  out_channels=out_channels)
         node_feature = self.module_vec.get_vector(node_feature_dict)
     return node_feature
示例#17
0
def test_intialize_with_kwargs():
    net = exnn.Linear(out_features=30)
    x = torch.randn(10, 20)
    y = net(x)
    assert list(y.shape) == [10, 30]
示例#18
0
 def _get_linear(out_channels):
     return exnn.Linear(out_channels)
示例#19
0
def test_cuda_linear_with_sequential():
    net = nn.Sequential(exnn.Linear(3))
    x = torch.randn(10, 20).cuda()
    net = net.cuda()
    y = net(x)
    assert list(y.shape) == [10, 3]
示例#20
0
 def __init__(self, out_channels):
     super(FlattenLinear, self).__init__()
     self.out_channels = out_channels
     self.linear = exnn.Linear(self.out_channels)
     self.flatten = exnn.Flatten()
示例#21
0
 def __init__(self):
     super(MyLinear, self).__init__()
     self.linear = exnn.Linear(3)
示例#22
0
文件: layer.py 项目: 0h-n0/thdbonas
 def __init__(self, out_channels):
     super(FlattenLinear, self).__init__()
     self.out_channels = out_channels
     self.linear = exnn.Linear(self.out_channels)
     self.relu = nn.ReLU()