Пример #1
0
    def __init__(self, n_actions=4, n_channels=4):
        super().__init__()

        self.phi = Sequential(
            # f_32 k_3 s_2 p_1
            Conv2d(n_channels, 32, 3, stride=2, padding=1, bias=True),
            ELU(),
            Conv2d(32, 32, 3, stride=2, padding=1, bias=True),
            ELU(),
            Conv2d(32, 32, 3, stride=2, padding=1, bias=True),
            ELU(),
            Conv2d(32, 32, 3, stride=2, padding=1, bias=True),
            ELU(),
            Identity(),  # tap
            Flatten(1, -1),
        )

        self.gee = torch.nn.Sequential(
            Linear(2 * 32 * 3 * 3, 256, bias=True),
            ReLU(),
            Linear(256, n_actions, bias=True),
        )

        self.eff = torch.nn.Sequential(
            Linear(32 * 3 * 3 + n_actions, 256, bias=True),
            ReLU(),
            Linear(256, 32 * 3 * 3, bias=True),
        )

        self.n_actions, self.n_emb_dim = n_actions, 32 * 3 * 3
Пример #2
0
    def __init__(self, num_features, dim_node, dim_graph, config):
        """ GIN model from PyG examples. Output distance matrix.
        https://github.com/rusty1s/pytorch_geometric/blob/master/examples/mutag_gin.py
        """
        super().__init__()

        dim = config["hidden_units"]

        nn1 = Sequential(Linear(num_features, dim), ELU(), Linear(dim, dim))
        self.conv1 = GINConv(nn1)
        self.bn1 = torch.nn.BatchNorm1d(dim)

        nn2 = Sequential(Linear(dim, dim), ELU(), Linear(dim, dim))
        self.conv2 = GINConv(nn2)
        self.bn2 = torch.nn.BatchNorm1d(dim)

        nn3 = Sequential(Linear(dim, dim), ELU(), Linear(dim, dim))
        self.conv3 = GINConv(nn3)
        self.bn3 = torch.nn.BatchNorm1d(dim)

        nn4 = Sequential(Linear(dim, dim), ELU(), Linear(dim, dim))
        self.conv4 = GINConv(nn4)
        self.bn4 = torch.nn.BatchNorm1d(dim)

        nn5 = Sequential(Linear(dim, dim), ELU(), Linear(dim, dim_node))
        self.conv5 = GINConv(nn5)
        self.bn5 = torch.nn.BatchNorm1d(dim_node)

        self.fc1 = Linear(dim_node, dim_node)
        self.fc2 = Linear(dim_node, dim_graph)
Пример #3
0
    def __init__(self, initial_num_channels: int, num_classes: int,
                 num_channels: int) -> None:
        super().__init__()

        self.convnet = Sequential(
            Conv1d(in_channels=initial_num_channels,
                   out_channels=num_channels,
                   kernel_size=3),
            ELU(),
            Conv1d(in_channels=num_channels,
                   out_channels=num_channels,
                   kernel_size=3,
                   stride=2),
            ELU(),
            Conv1d(in_channels=num_channels,
                   out_channels=num_channels,
                   kernel_size=3,
                   stride=2),
            ELU(),
            Conv1d(in_channels=num_channels,
                   out_channels=num_channels,
                   kernel_size=3),
            ELU(),
        )
        self.fc = Linear(num_channels, num_classes)
Пример #4
0
    def __init__(
        self,
        in_channels,
        out_channels,
        dim,
        kernel_size,
        hidden_channels=None,
        dilation=1,
        bias=True,
        **kwargs,
    ):
        super(XConv, self).__init__()

        self.in_channels = in_channels
        if hidden_channels is None:
            hidden_channels = in_channels // 4
        assert hidden_channels > 0
        self.hidden_channels = hidden_channels
        self.out_channels = out_channels
        self.dim = dim
        self.kernel_size = kernel_size
        self.dilation = dilation
        self.kwargs = kwargs

        C_in, C_delta, C_out = in_channels, hidden_channels, out_channels
        D, K = dim, kernel_size

        self.mlp1 = S(
            L(dim, C_delta),
            ELU(),
            BN(C_delta),
            L(C_delta, C_delta),
            ELU(),
            BN(C_delta),
            Reshape(-1, K, C_delta),
        )

        self.mlp2 = S(
            L(D * K, K**2),
            ELU(),
            BN(K**2),
            Reshape(-1, K, K),
            Conv1d(K, K**2, K, groups=K),
            ELU(),
            BN(K**2),
            Reshape(-1, K, K),
            Conv1d(K, K**2, K, groups=K),
            BN(K**2),
            Reshape(-1, K, K),
        )

        C_in = C_in + C_delta
        depth_multiplier = int(ceil(C_out / C_in))
        self.conv = S(
            Conv1d(C_in, C_in * depth_multiplier, K, groups=C_in),
            Reshape(-1, C_in * depth_multiplier),
            L(C_in * depth_multiplier, C_out, bias=bias),
        )

        self.reset_parameters()
Пример #5
0
    def __init__(self, in_channels, out_channels, samples=10):
        super(BCNN, self).__init__(in_channels, out_channels, samples)

        self.layers = torch.nn.Sequential(
            Conv2d(in_channels, 32, 5, padding=2, stride=2), BatchNorm2d(32),
            ELU(), Conv2d(32, 32, 3, padding=1, stride=1), ELU(),
            Conv2d(32, 64, 3, padding=0, stride=2), ELU(),
            NormalConv2d(64, 64, 3, padding=1, stride=2), ELU(), Flatten(),
            NormalLinear(576, out_channels), Softmax(dim=-1))
Пример #6
0
 def __init__(self,
              in_dim,
              hidden_dim,
              out_dim,
              dropout=0.,
              name='gat',
              residual=True,
              use_mlp=False,
              join_with_mlp=False):
     super(GNNModelDGL, self).__init__()
     self.name = name
     self.use_mlp = use_mlp
     self.join_with_mlp = join_with_mlp
     self.normalize_input_columns = True
     if use_mlp:
         self.mlp = MLPRegressor(in_dim, hidden_dim, out_dim)
         if join_with_mlp:
             in_dim += out_dim
         else:
             in_dim = out_dim
     if name == 'gat':
         self.l1 = GATConvDGL(in_dim,
                              hidden_dim // 8,
                              8,
                              feat_drop=dropout,
                              attn_drop=dropout,
                              residual=False,
                              activation=F.elu)
         self.l2 = GATConvDGL(hidden_dim,
                              out_dim,
                              1,
                              feat_drop=dropout,
                              attn_drop=dropout,
                              residual=residual,
                              activation=None)
     elif name == 'gcn':
         self.l1 = GraphConv(in_dim, hidden_dim, activation=F.elu)
         self.l2 = GraphConv(hidden_dim, out_dim, activation=F.elu)
         self.drop = Dropout(p=dropout)
     elif name == 'cheb':
         self.l1 = ChebConvDGL(in_dim, hidden_dim, k=3)
         self.l2 = ChebConvDGL(hidden_dim, out_dim, k=3)
         self.drop = Dropout(p=dropout)
     elif name == 'agnn':
         self.lin1 = Sequential(Dropout(p=dropout),
                                Linear(in_dim, hidden_dim), ELU())
         self.l1 = AGNNConvDGL(learn_beta=False)
         self.l2 = AGNNConvDGL(learn_beta=True)
         self.lin2 = Sequential(Dropout(p=dropout),
                                Linear(hidden_dim, out_dim), ELU())
     elif name == 'appnp':
         self.lin1 = Sequential(Dropout(p=dropout),
                                Linear(in_dim, hidden_dim), ReLU(),
                                Dropout(p=dropout),
                                Linear(hidden_dim, out_dim))
         self.l1 = APPNPConv(k=10, alpha=0.1, edge_drop=0.)
Пример #7
0
    def __init__(self, in_channels: int, out_channels: int, dim: int,
                 kernel_size: int, hidden_channels: Optional[int] = None,
                 dilation: int = 1, bias: bool = True, num_workers: int = 1):
        super(XConv, self).__init__()

        if knn_graph is None:
            raise ImportError('`XConv` requires `torch-cluster`.')

        self.in_channels = in_channels
        if hidden_channels is None:
            hidden_channels = in_channels // 4
        assert hidden_channels > 0
        self.hidden_channels = hidden_channels
        self.out_channels = out_channels
        self.dim = dim
        self.kernel_size = kernel_size
        self.dilation = dilation
        self.num_workers = num_workers

        C_in, C_delta, C_out = in_channels, hidden_channels, out_channels
        D, K = dim, kernel_size

        self.mlp1 = S(
            L(dim, C_delta),
            ELU(),
            BN(C_delta),
            L(C_delta, C_delta),
            ELU(),
            BN(C_delta),
            Reshape(-1, K, C_delta),
        )

        self.mlp2 = S(
            L(D * K, K**2),
            ELU(),
            BN(K**2),
            Reshape(-1, K, K),
            Conv1d(K, K**2, K, groups=K),
            ELU(),
            BN(K**2),
            Reshape(-1, K, K),
            Conv1d(K, K**2, K, groups=K),
            BN(K**2),
            Reshape(-1, K, K),
        )

        C_in = C_in + C_delta
        depth_multiplier = int(ceil(C_out / C_in))
        self.conv = S(
            Conv1d(C_in, C_in * depth_multiplier, K, groups=C_in),
            Reshape(-1, C_in * depth_multiplier),
            L(C_in * depth_multiplier, C_out, bias=bias),
        )

        self.reset_parameters()
Пример #8
0
    def __init__(self, device, size, getRawData=False, mode='udacity'):
        super(Challenge, self).__init__()
        if mode == 'udacity':
            self.fc1 = Linear(8295, 128)
            self.fc2 = Linear(1938, 128)
            self.fc3 = Linear(408, 128)
            self.fc4 = Linear(4480, 128)
            self.fc5 = Linear(4480, 1024)
        else:
            self.fc1 = Linear(6195, 128)
            self.fc2 = Linear(1428, 128)
            self.fc3 = Linear(288, 128)
            self.fc4 = Linear(2560, 128)
            self.fc5 = Linear(2560, 1024)
        self.conv1 = Conv3d(size,
                            64,
                            kernel_size=(3, 12, 12),
                            stride=(1, 6, 6))
        self.conv2 = Conv2d(64, 64, kernel_size=(5, 5), stride=(2, 2))
        self.conv3 = Conv2d(64, 64, kernel_size=(5, 5), stride=(2, 2))
        self.conv4 = Conv2d(64, 64, kernel_size=(5, 5), stride=(2, 2))

        self.fc6 = Linear(1024, 512)
        self.fc7 = Linear(512, 256)
        self.fc8 = Linear(256, 128)
        self.fc9 = Linear(258, 1)
        self.lstm1 = LSTM(130, 128, 32)

        self.h1 = torch.zeros(32, 1, 128).to(device)
        self.c1 = torch.zeros(32, 1, 128).to(device)
        self.drop = Dropout3d(.25)
        self.elu = ELU()
        self.relu = ReLU()
        self.laynorm = GroupNorm(1, 128)
        self.getRawData = getRawData
Пример #9
0
def get_activation(name):
    act_name = name.lower()
    m = re.match(r"(\w+)\((\d+\.\d+)\)", act_name)
    if m is not None:
        act_name, alpha = m.groups()
        alpha = float(alpha)
        print(act_name, alpha)
    else:
        alpha = 1.0
    if act_name == 'softplus':
        return Softplus()
    elif act_name == 'ssp':
        return SSP()
    elif act_name == 'elu':
        return ELU(alpha)
    elif act_name == 'relu':
        return ReLU()
    elif act_name == 'selu':
        return SELU()
    elif act_name == 'celu':
        return CELU(alpha)
    elif act_name == 'sigmoid':
        return Sigmoid()
    elif act_name == 'tanh':
        return Tanh()
    else:
        raise NameError("Not supported activation: {}".format(name))
Пример #10
0
    def __init__(self, args):
        super(GIN, self).__init__()
        self.args = args
        self.num_layer = int(self.args["num_layers"])
        assert self.num_layer > 2, "Number of layers in GIN should not less than 3"

        missing_keys = list(
            set([
                "features_num", "num_class", "num_graph_features",
                "num_layers", "hidden", "dropout", "act", "mlp_layers", "eps"
            ]) - set(self.args.keys()))
        if len(missing_keys) > 0:
            raise Exception("Missing keys: %s." % ','.join(missing_keys))
        if not self.num_layer == len(self.args['hidden']) + 1:
            LOGGER.warn(
                'Warning: layer size does not match the length of hidden units'
            )
        self.num_graph_features = self.args['num_graph_features']

        if self.args["act"] == "leaky_relu":
            act = LeakyReLU()
        elif self.args["act"] == "relu":
            act = ReLU()
        elif self.args["act"] == "elu":
            act = ELU()
        elif self.args["act"] == "tanh":
            act = Tanh()
        else:
            act = ReLU()

        train_eps = True if self.args["eps"] == "True" else False

        self.convs = torch.nn.ModuleList()
        self.bns = torch.nn.ModuleList()

        nn = [Linear(self.args["features_num"], self.args["hidden"][0])]
        for _ in range(self.args["mlp_layers"] - 1):
            nn.append(act)
            nn.append(Linear(self.args["hidden"][0], self.args["hidden"][0]))
        # nn.append(BatchNorm1d(self.args['hidden'][0]))
        self.convs.append(GINConv(Sequential(*nn), train_eps=train_eps))
        self.bns.append(BatchNorm1d(self.args["hidden"][0]))

        for i in range(self.num_layer - 3):
            nn = [Linear(self.args["hidden"][i], self.args["hidden"][i + 1])]
            for _ in range(self.args["mlp_layers"] - 1):
                nn.append(act)
                nn.append(
                    Linear(self.args["hidden"][i + 1],
                           self.args["hidden"][i + 1]))
            # nn.append(BatchNorm1d(self.args['hidden'][i+1]))
            self.convs.append(GINConv(Sequential(*nn), train_eps=train_eps))
            self.bns.append(BatchNorm1d(self.args["hidden"][i + 1]))

        self.fc1 = Linear(
            self.args["hidden"][self.num_layer - 3] + self.num_graph_features,
            self.args["hidden"][self.num_layer - 2],
        )
        self.fc2 = Linear(self.args["hidden"][self.num_layer - 2],
                          self.args["num_class"])
Пример #11
0
    def __init__(self, hidden, num_aggr, config, **kwargs):
        super(ExpandingBConv, self).__init__(aggr='add', **kwargs)
        self.hidden = hidden
        self.num_aggr = num_aggr

        if config.fea_activation == 'ELU':
            self.fea_activation = ELU()
        elif config.fea_activation == 'ReLU':
            self.fea_activation = ReLU()

        self.fea_mlp = Sequential(
            Linear(hidden * self.num_aggr, hidden),
            ReLU(),
            Linear(hidden, hidden),
            self.fea_activation)

        self.aggr_mlp = Sequential(
            Linear(hidden * 2, self.num_aggr),
            Tanh())

        self.edge_encoder = torch.nn.Linear(5, hidden)

        if config.BN == 'Y':
            self.BN = BN(hidden)
        else:
            self.BN = None

        self.reset_parameters()
Пример #12
0
    def __init__(self, device, size, outNum, batch=None):
        super(Challenge, self).__init__()
        self.fc1 = Linear(8295, 128)
        self.fc2 = Linear(1938, 128)
        self.fc3 = Linear(408, 128)
        self.fc4 = Linear(4480, 128)
        self.fc5 = Linear(4480, 1024)

        self.conv1 = Conv3d(size,
                            64,
                            kernel_size=(3, 12, 12),
                            stride=(1, 6, 6))
        self.conv2 = Conv2d(64, 64, kernel_size=(5, 5), stride=(2, 2))
        self.conv3 = Conv2d(64, 64, kernel_size=(5, 5), stride=(2, 2))
        self.conv4 = Conv2d(64, 64, kernel_size=(5, 5), stride=(2, 2))

        self.fc6 = Linear(1024, 512)
        self.fc7 = Linear(512, 256)
        self.fc8 = Linear(256, 128)
        self.fc9 = Linear(258, outNum)
        self.lstm1 = LSTM(130, 128, 32)

        self.h1 = (torch.rand((32, 1, 128)) / 64).to(device)
        self.c1 = (torch.rand((32, 1, 128)) / 64).to(device)
        self.drop = Dropout3d(.25)
        self.elu = ELU()
        self.relu = ReLU()
        self.laynorm = GroupNorm(1, 128)
Пример #13
0
    def __init__(self, hidden, config, **kwargs):
        super(CombAConv, self).__init__(aggr='add', **kwargs)

        if config.fea_activation == 'ELU':
            self.fea_activation = ELU()
        elif config.fea_activation == 'ReLU':
            self.fea_activation = ReLU()

        self.fea_mlp = Sequential(
            Linear(hidden, hidden),
            ReLU(),
            Linear(hidden, hidden),
            self.fea_activation)

        self.aggr_mlp = Sequential(
            Linear(hidden * 2, hidden),
            Tanh())

        if config.BN == 'Y':
            self.BN = BN(hidden)
        else:
            self.BN = None

        self.edge_encoder = torch.nn.Linear(7, hidden)

        self.reset_parameters()
Пример #14
0
 def __init__(self,
              c_dim: int,
              m_dim: int,
              p_dim: int,
              radius=2,
              use_cuda=False,
              dropout=0.,
              use_gru=True):
     super(AlignAttendPooling, self).__init__()
     self.use_cuda = use_cuda
     self.use_gru = use_gru
     self.radius = radius
     self.map = Linear(c_dim, m_dim)
     self.relu = LeakyReLU()
     self.relu1 = LeakyReLU()
     if use_gru:
         self.gru = GRUCell(c_dim, m_dim)
     else:
         self.linear = Linear(c_dim + m_dim, m_dim)
     self.attend = Linear(c_dim + p_dim, c_dim)
     self.align = Linear(m_dim + c_dim + p_dim, 1)
     self.softmax = Softmax(dim=1)
     self.elu = ELU()
     self.relu2 = ReLU()
     self.dropout = Dropout(p=dropout)
Пример #15
0
 def __init__(self, p_dim, q_dim, h_dim=128):
     super(DirectDerivation, self).__init__()
     self.p_dim = p_dim
     self.q_dim = q_dim
     self.gcl = GraphConvolutionLayer(p_dim + q_dim, h_dim, h_dims=[])
     self.relu = ELU()
     self.linear = Linear(h_dim, p_dim + q_dim)
Пример #16
0
 def __init__(self,
              in_dim,
              pq_dim,
              h_dim=128,
              num_layers=1,
              use_cuda=False,
              disturb=False,
              use_lstm=True):
     super(LstmPQEncoder, self).__init__()
     self.use_cuda = use_cuda
     self.disturb = disturb
     self.use_lstm = use_lstm
     self.pq_dim = pq_dim
     if self.use_lstm:
         self.gcl = GraphConvolutionLayer(in_dim,
                                          h_dim,
                                          h_dims=[h_dim],
                                          activation='tanh',
                                          residual=True)
         self.relu = ELU()
         self.rnn = LSTM(in_dim + h_dim * 2, 2 * pq_dim, num_layers)
     else:
         self.gcl = GraphConvolutionLayer(in_dim,
                                          2 * pq_dim,
                                          h_dims=[h_dim],
                                          activation='tanh',
                                          residual=False)
Пример #17
0
    def __init__(self, n_feat, n_hid, n_class, dropout, alpha, n_heads,
                 n_orders, method, graph_convolve):
        super().__init__()

        assert n_heads >= n_orders
        self.n_orders = n_orders
        self.n_heads = n_heads
        self.method = method
        assert method in ['distributed', 'single']

        self.attentions = ModuleList([
            GraphAttentionLayer(n_feat,
                                n_hid,
                                dropout=dropout,
                                alpha=alpha,
                                graph_convolve=graph_convolve)
            for _ in range(n_heads)
        ])

        self.out_att = GraphAttentionLayer(n_hid * n_heads,
                                           n_class,
                                           dropout=dropout,
                                           alpha=alpha,
                                           graph_convolve=graph_convolve)

        self.dropout = Dropout(dropout)
        self.elu = ELU()
Пример #18
0
	def __init__(self, num_features, n_hidden, min_score):
		super(GCNNet, self).__init__()
		self.conv1 = GCNConv(num_features, n_hidden)
		self.conv2 = GCNConv(n_hidden, n_hidden)
		self.conv3 = GCNConv(n_hidden, n_hidden // 4) 
		self.pool = SAGPooling(n_hidden, min_score=min_score, GNN=GCNConv)      
		self.activation = ELU()
		self.final_pooling = global_add_pool
Пример #19
0
 def __init__(self,
              in_features,
              out_features,
              activation=ELU(),
              use_batch_norm=False,
              bias=False):
     # One layer Perceptron
     super(OLP, self).__init__()
     self.linear = Linear(in_features, out_features, bias=bias)
     self.activation = _bn_act(out_features, activation, use_batch_norm)
Пример #20
0
    def __init__(self, num_functions, expirement_name, screen_width,
                 screen_height):
        super(A2CModel, self).__init__()
        self.embed_dim = 8
        self.embed = nn.Embedding(5, self.embed_dim)
        self.embed_mm = nn.Embedding(5, self.embed_dim)
        self.num_functions = num_functions
        self.screen_width = screen_width
        self.screen_height = screen_height

        # our model specification
        self.conv1 = Conv2d(self.embed_dim,
                            16,
                            kernel_size=8,
                            stride=4,
                            padding=1)
        #self.conv1 = weight_norm(self.conv1, name="weight")
        self.elu1 = ELU(inplace=True)
        self.conv2 = Conv2d(16, 32, kernel_size=4, stride=2, padding=2)
        #self.conv2 = weight_norm(self.conv2, name="weight")
        self.elu2 = ELU(inplace=True)

        self.conv_mm1 = Conv2d(self.embed_dim,
                               16,
                               kernel_size=8,
                               stride=4,
                               padding=1)
        #self.conv_mm1 = weight_norm(self.conv_mm1, name="weight")
        self.elu_mm1 = ELU(inplace=True)
        self.conv_mm2 = Conv2d(16, 32, kernel_size=4, stride=2, padding=2)
        #self.conv_mm2 = weight_norm(self.conv_mm2, name="weight")
        self.elu_mm2 = ELU(inplace=True)

        self.feature_input = nn.Linear(11, 128)

        self.fc = nn.Linear(64 * 64, 128)
        self.fc_relu = ELU(inplace=True)
        self.action_head = nn.Linear(128, self.num_functions)
        self.value_head = nn.Linear(128, 1)

        self.x = nn.Linear(128, self.screen_width)
        self.y = nn.Linear(128, self.screen_height)
        self._initialize_weights()
Пример #21
0
 def __init__(self, n_dim: int, e_dim: int, c_dim: int, dropout=0.):
     super(ConcatMesPassing, self).__init__()
     self.linear = Linear(n_dim + e_dim + n_dim, c_dim, bias=True)
     self.linear_e = Linear(n_dim + e_dim + n_dim, e_dim, bias=True)
     self.relu1 = LeakyReLU()
     self.relu2 = LeakyReLU()
     self.relu_e = LeakyReLU()
     self.attention = Linear(n_dim + e_dim + n_dim, 1, bias=True)
     self.softmax = Softmax(dim=1)
     self.elu = ELU()
     self.dropout = Dropout(p=dropout)
Пример #22
0
 def __init__(self, p_dim, q_dim, h_dim=128, dropout=0.0):
     super(HamiltonianDerivation, self).__init__()
     # self.gcl = GraphConvolutionLayer(p_dim + q_dim, h_dim, h_dims=[], dropout=dropout)
     self.align_attend = AlignAttendPooling(p_dim + q_dim,
                                            h_dim,
                                            radius=1,
                                            dropout=dropout,
                                            use_gru=False)
     self.relu = ELU()
     self.linear = Linear(h_dim, 1)
     self.softplus = Softplus()
Пример #23
0
 def __init__(self,
              in_features,
              out_features,
              activation=ELU(),
              use_batch_norm=False,
              bias=False):
     super(Gated_pooling, self).__init__()
     self.linear1 = Linear(in_features, out_features, bias=bias)
     self.activation1 = _bn_act(out_features, activation, use_batch_norm)
     self.linear2 = Linear(in_features, out_features, bias=bias)
     self.activation2 = _bn_act(out_features, activation, use_batch_norm)
Пример #24
0
    def __init__(
        self,
        embedding_size,
        num_embeddings,
        num_channels,
        hidden_dim,
        num_classes,
        dropout_p,
        pretrained_embeddings=None,
        padding_idx=0,
    ):
        super().__init__()

        if pretrained_embeddings is None:
            self.emb = Embedding(
                embedding_dim=embedding_size, num_embeddings=num_embeddings, padding_idx=padding_idx
            )
        else:
            self.emb = Embedding(
                embedding_dim=embedding_size,
                num_embeddings=num_embeddings,
                padding_idx=padding_idx,
                _weight=pretrained_embeddings,
            )

        self.convnet = Sequential(
            Conv1d(in_channels=embedding_size, out_channels=num_channels, kernel_size=3),
            ELU(),
            Conv1d(in_channels=num_channels, out_channels=num_channels, kernel_size=3, stride=2),
            ELU(),
            Conv1d(in_channels=num_channels, out_channels=num_channels, kernel_size=3, stride=2),
            ELU(),
            Conv1d(in_channels=num_channels, out_channels=num_channels, kernel_size=3),
            ELU(),
        )

        self._dropout_p = dropout_p
        self.fc1 = Linear(num_channels, hidden_dim)
        self.fc2 = Linear(hidden_dim, num_classes)
Пример #25
0
    def __init__(self, in_channels, out_channels, samples=20):
        super(BCNN, self).__init__(in_channels, out_channels, samples)

        self.layers = torch.nn.Sequential(
            Conv2d(in_channels, 64, 5, padding=2, stride=2), BatchNorm2d(64),
            ELU(), Conv2d(64, 128, 5, padding=2, stride=2), ELU(),
            Conv2d(128, 128, 5, padding=2, stride=2), ELU(),
            Conv2d(128, 128, 3, padding=1), ELU(),
            Conv2d(128, 128, 3, padding=1), ELU(),
            NormalConv2d(128, 128, 3, padding=1), ELU(), Flatten(),
            Linear(2048, 128), ELU(),
            MultivariateNormalLinear(128, out_channels), Softmax(dim=-1))
Пример #26
0
    def __init__(self, hidden, config, **kwargs):
        super(GinConv, self).__init__(aggr='add', **kwargs)

        if config.fea_activation == 'ELU':
            self.fea_activation = ELU()
        elif config.fea_activation == 'ReLU':
            self.fea_activation = ReLU()

        self.fea_mlp = Sequential(Linear(hidden, hidden), ReLU(),
                                  Linear(hidden, hidden), self.fea_activation)

        self.edge_encoder = torch.nn.Linear(5, hidden)

        if config.BN == 'Y':
            self.BN = BN(hidden)
        else:
            self.BN = None
Пример #27
0
    def __init__(self,
                 device,
                 size,
                 getRawData=False,
                 batch=1,
                 mode='udacity'):
        super(TSNENet, self).__init__()
        self.fc1 = Linear(8295, 128)  # 8374
        self.fc2 = Linear(475, 128)
        self.fc3 = Linear(88, 128)
        self.fc4 = Linear(512, 128)
        self.fc5 = Linear(512, 1024)

        self.conv1 = Conv3d(size,
                            64,
                            kernel_size=(3, 12, 12),
                            stride=(1, 6, 6))  # , padding=1)
        self.conv2 = Conv2d(64, 64, kernel_size=(5, 5), stride=(2, 2))
        self.conv3 = Conv2d(64, 64, kernel_size=(5, 5), stride=(2, 2))
        self.conv4 = Conv2d(64, 64, kernel_size=(5, 5), stride=(2, 2))

        self.fc6 = Linear(1024, 512)
        self.fc7 = Linear(512, 256)
        self.fc8 = Linear(256, 128)
        self.fc9 = Linear(258, 128)
        self.fc10 = Linear(128, 15)
        self.lstm1 = LSTM(130, 128, 32)

        self.h1 = (torch.rand((32, 1, 128)) / 64).to(device)
        self.c1 = (torch.rand((32, 1, 128)) / 64).to(device)
        self.drop = Dropout3d(.05)
        self.elu = ELU()
        self.relu = ReLU()
        self.laynorm = GroupNorm(1, 128)

        self.bnorm1 = BatchNorm3d(64)
        self.bnorm2 = BatchNorm2d(64)
        self.bnorm3 = BatchNorm2d(64)
        self.bnorm4 = BatchNorm2d(64)

        self.pool1 = MaxPool2d(2)
        self.pool2 = MaxPool2d(2)

        self.getRawData = getRawData
        self.batch = batch
Пример #28
0
 def __init__(self,
              n_dim: int,
              e_dim: int,
              p_dim: int,
              c_dim: int,
              dropout=0.,
              use_cuda=False):
     super(MolGATMesPassing, self).__init__()
     self.linear = Linear(n_dim + p_dim + n_dim, c_dim, bias=True)
     self.linear_e = Linear(n_dim + p_dim + n_dim, e_dim, bias=True)
     self.relu1 = LeakyReLU()
     self.relu2 = LeakyReLU()
     self.relu_e = LeakyReLU()
     self.attention = Linear(e_dim + p_dim, 1, bias=True)
     self.softmax = Softmax(dim=1)
     self.elu = ELU()
     self.dropout = Dropout(p=dropout)
     self.use_cuda = use_cuda
Пример #29
0
    def __init__(self, config, device, vocab_size, pad_idx=0):
        super().__init__()

        self.emb_dim = config.pop("embedding_dim")
        self.hidden_size = config.pop("hidden_size")
        self.d = numpy.sqrt(self.hidden_size)
        self.vocab_size = vocab_size
        self.pad_idx = pad_idx

        self.embedding = Embedding(self.vocab_size,
                                   self.emb_dim,
                                   padding_idx=self.pad_idx)

        self.state_embedder = PytorchSeq2SeqWrapper(
            LSTM(batch_first=True,
                 input_size=self.emb_dim,
                 hidden_size=self.hidden_size))

        self.state_recurrence = PytorchSeq2VecWrapper(
            GRU(
                batch_first=True,
                input_size=self.hidden_size,
                hidden_size=self.hidden_size,
            ))

        self.action_embedder = PytorchSeq2VecWrapper(
            GRU(batch_first=True,
                input_size=self.emb_dim,
                hidden_size=self.hidden_size))

        self.recipe_embedder = PytorchSeq2VecWrapper(
            LSTM(batch_first=True,
                 input_size=self.emb_dim,
                 hidden_size=self.hidden_size))

        self.state_to_hidden = Linear(self.hidden_size, self.hidden_size)
        self.state_to_hidden2 = Linear(self.hidden_size, self.hidden_size // 2)

        self.action_to_hidden = Linear(self.hidden_size, self.hidden_size)
        self.action_to_hidden2 = Linear(self.hidden_size,
                                        self.hidden_size // 2)

        self.elu = ELU()
        self.device = device
Пример #30
0
 def __init__(self, device):
     super(zModel, self).__init__()
     self.conv1 = Conv1d(1, 16, kernel_size=1, stride=1)
     self.conv2 = Conv1d(16, 16, kernel_size=2, stride=2)
     self.conv3 = Conv1d(16, 16, kernel_size=3, stride=2)
     self.fc1 = Linear(10, 32)
     self.fc2 = Linear(5, 32)
     self.fc3 = Linear(2, 32)
     self.fc4 = Linear(32, 128)
     self.fc5 = Linear(128, 64)
     self.fc6 = Linear(64, 32)
     self.fc7 = Linear(32, 1)
     self.lstm1 = LSTM(32, 16, 32)
     self.h1 = torch.zeros(32, 1, 16).to(device)
     self.c1 = torch.zeros(32, 1, 16).to(device)
     self.drop = Dropout(.1)
     self.elu = ELU()
     self.relu = ReLU()
     self.laynorm = GroupNorm(1, 32)