Пример #1
0
def test_graph_unet():
    model = GraphUNet(16, 32, 8, depth=3)
    out = 'GraphUNet(16, 32, 8, depth=3, pool_ratios=[0.5, 0.5, 0.5])'
    assert model.__repr__() == out

    x = torch.randn(3, 16)
    edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]])

    out = model(x, edge_index)
    assert out.size() == (3, 8)
Пример #2
0
    def __init__(self, in_channels, hidden_channels=16, out_channels=16):
        super(EncoderGraphUNet, self).__init__()

        self.unet = GraphUNet(in_channels=in_channels,
                              hidden_channels=hidden_channels,
                              out_channels=out_channels,
                              depth=4)
Пример #3
0
    def __init__(self, n_class):
        super().__init__()

        self.base_model = torchvision.models.resnet18(True)
        self.base_layers = list(self.base_model.children())
        self.layer1 = nn.Sequential(
            nn.Conv2d(1,
                      64,
                      kernel_size=(7, 7),
                      stride=(2, 2),
                      padding=(3, 3),
                      bias=False), self.base_layers[1], self.base_layers[2])
        self.layer2 = nn.Sequential(*self.base_layers[3:5])
        self.layer3 = self.base_layers[5]
        self.layer4 = self.base_layers[6]
        #self.layer5 = self.base_layers[7]
        #self.decode4 = Decoder(512, 256+256, 256)
        self.decode3 = Decoder(256, 256 + 128, 256)
        self.decode2 = Decoder(256, 128 + 64, 128)
        self.decode1 = Decoder(128, 64 + 64, 64)
        self.decode0 = nn.Sequential(
            nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
            nn.Conv2d(64, 32, kernel_size=3, padding=1, bias=False),
            nn.Conv2d(32, 64, kernel_size=3, padding=1, bias=False))
        self.conv_last = nn.Conv2d(64, n_class, 1)
        self.gu = GraphUNet(256, 64, 256, depth=2)
Пример #4
0
 def __init__(self, in_ch, hid_ch, out_ch, depth, pool_ratios):
     super(GUNET, self).__init__()
     self.unet = GraphUNet(in_channels=in_ch,
                           hidden_channels=hid_ch,
                           out_channels=out_ch,
                           depth=depth,
                           pool_ratios=pool_ratios)
Пример #5
0
 def __init__(self):
     super(Net, self).__init__()
     pool_ratios = [2000 / data.num_nodes, 0.5]
     self.unet = GraphUNet(dataset.num_features,
                           32,
                           dataset.num_classes,
                           depth=3,
                           pool_ratios=pool_ratios)
Пример #6
0
    def __init__(self,
                 input_dim=3,
                 hidden_dim=32,
                 embedding_dim=64,
                 output_dim_id=len(class_to_id),
                 output_dim_p4=4,
                 dropout_rate=0.5,
                 convlayer="sgconv",
                 space_dim=2,
                 nearest=3):
        super(PFNet8, self).__init__()

        act = nn.SELU

        self.inp = nn.Sequential(
            nn.Linear(input_dim, hidden_dim),
            act(),
            nn.Linear(hidden_dim, hidden_dim),
            act(),
            nn.Linear(hidden_dim, hidden_dim),
            act(),
            nn.Linear(hidden_dim, hidden_dim),
            act(),
            nn.Linear(hidden_dim, embedding_dim),
        )
        self.unet = GraphUNet(embedding_dim,
                              hidden_dim,
                              embedding_dim,
                              3,
                              pool_ratios=0.2,
                              act=torch.nn.functional.selu)

        self.nn1 = nn.Sequential(
            nn.Linear(embedding_dim, hidden_dim),
            act(),
            nn.Linear(hidden_dim, hidden_dim),
            act(),
            nn.Linear(hidden_dim, hidden_dim),
            act(),
            nn.Linear(hidden_dim, hidden_dim),
            act(),
            nn.Linear(hidden_dim, output_dim_id),
            act(),
        )
        self.nn2 = nn.Sequential(
            nn.Linear(embedding_dim + output_dim_id, hidden_dim),
            act(),
            nn.Linear(hidden_dim, hidden_dim),
            act(),
            nn.Linear(hidden_dim, hidden_dim),
            act(),
            nn.Linear(hidden_dim, hidden_dim),
            act(),
            nn.Linear(hidden_dim, output_dim_p4),
        )
        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        self.embedding_dim = embedding_dim
Пример #7
0
    def __init__(self, in_feats, hidden_size, out_feats, num_layers, dropout, num_nodes):
        super(UNet, self).__init__()

        self.in_feats = in_feats
        self.out_feats = out_feats
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.dropout = dropout
        self.num_nodes = 0

        self.unet = GraphUNet(
            self.in_feats, self.hidden_size, self.out_feats, depth=3, pool_ratios=[2000 / num_nodes, 0.5], act=F.elu
        )
Пример #8
0
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

        self.save_hyperparameters()

        assert kwargs["num_layers"] >= 2

        self.unet = GraphUNet(
            kwargs["num_features"],
            kwargs["hidden_channels"],
            kwargs["num_classes"],
            depth=kwargs["num_layers"],
            pool_ratios=kwargs["pool_ratios"],
        )
Пример #9
0
    def __init__(self, num_features, num_classes, hidden_size, num_layers,
                 dropout):
        super(UNet, self).__init__()

        self.num_features = num_features
        self.num_classes = num_classes
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.dropout = dropout

        self.unet = GraphUNet(num_features,
                              hidden_size,
                              num_classes,
                              depth=3,
                              pool_ratios=[0.5, 0.5])
Пример #10
0
    def __init__(self,
        input_dim=3, hidden_dim=32, encoding_dim=256,
        output_dim_id=len(class_to_id),
        output_dim_p4=4,
        convlayer="gravnet-radius",
        convlayer2="none",
        space_dim=2, nearest=3, dropout_rate=0.0, activation="leaky_relu", return_edges=False, radius=0.1, input_encoding=0):

        super(PFNet7, self).__init__()

        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        self.return_edges = return_edges
        self.convlayer = convlayer
        self.input_encoding = input_encoding

        if activation == "leaky_relu":
            self.act = nn.LeakyReLU
            self.act_f = torch.nn.functional.leaky_relu
        elif activation == "selu":
            self.act = nn.SELU
            self.act_f = torch.nn.functional.selu
        elif activation == "relu":
            self.act = nn.ReLU
            self.act_f = torch.nn.functional.relu

        # if you want to add an initial encoding of the input
        conv_in_dim = input_dim
        if self.input_encoding>0:
            self.nn1 = nn.Sequential(
                nn.Linear(input_dim, hidden_dim),
                self.act(),
                nn.Dropout(dropout_rate) if dropout_rate > 0 else nn.Identity(),
                nn.Linear(hidden_dim, hidden_dim),
                self.act(),
                nn.Linear(hidden_dim, encoding_dim),
            )
            conv_in_dim = encoding_dim

        # (1) GNN layer
        if convlayer == "gravnet-knn":
            self.conv1 = GravNetConv(conv_in_dim, encoding_dim, space_dim, hidden_dim, nearest, neighbor_algo="knn")
        elif convlayer == "gravnet-radius":
            self.conv1 = GravNetConv(conv_in_dim, encoding_dim, space_dim, hidden_dim, nearest, neighbor_algo="radius", radius=radius)
        else:
            raise Exception("Unknown convolution layer: {}".format(convlayer))

        #decoding layer receives the raw inputs and the gravnet output
        num_decode_in = input_dim + encoding_dim

        # (2) another GNN layer if you want
        self.convlayer2 = convlayer2
        if convlayer2 == "none":
            self.conv2_1 = None
            self.conv2_2 = None
        elif convlayer2 == "sgconv":
            self.conv2_1 = SGConv(num_decode_in, hidden_dim, K=1)
            self.conv2_2 = SGConv(num_decode_in, hidden_dim, K=1)
            num_decode_in += hidden_dim
        elif convlayer2 == "graphunet":
            self.conv2_1 = GraphUNet(num_decode_in, hidden_dim, hidden_dim, 2, pool_ratios=0.1)
            self.conv2_2 = GraphUNet(num_decode_in, hidden_dim, hidden_dim, 2, pool_ratios=0.1)
            num_decode_in += hidden_dim
        elif convlayer2 == "gatconv":
            self.conv2_1 = GATConv(num_decode_in, hidden_dim, 4, concat=False, dropout=dropout_rate)
            self.conv2_2 = GATConv(num_decode_in, hidden_dim, 4, concat=False, dropout=dropout_rate)
            num_decode_in += hidden_dim
        else:
            raise Exception("Unknown convolution layer: {}".format(convlayer2))

        # (3) dropout layer if you want
        self.dropout1 = nn.Dropout(dropout_rate) if dropout_rate > 0 else nn.Identity()

        # (4) DNN layer: classifying PID
        self.nn2 = nn.Sequential(
            nn.Linear(num_decode_in, hidden_dim),
            self.act(),
            nn.Dropout(dropout_rate) if dropout_rate > 0 else nn.Identity(),
            nn.Linear(hidden_dim, hidden_dim),
            self.act(),
            nn.Dropout(dropout_rate) if dropout_rate > 0 else nn.Identity(),
            nn.Linear(hidden_dim, hidden_dim),
            self.act(),
            nn.Linear(hidden_dim, output_dim_id),
        )

        # (5) DNN layer: regressing p4
        self.nn3 = nn.Sequential(
            nn.Linear(num_decode_in + output_dim_id, hidden_dim),
            self.act(),
            nn.Dropout(dropout_rate) if dropout_rate > 0 else nn.Identity(),
            nn.Linear(hidden_dim, hidden_dim),
            self.act(),
            nn.Dropout(dropout_rate) if dropout_rate > 0 else nn.Identity(),
            nn.Linear(hidden_dim, hidden_dim),
            self.act(),
            nn.Linear(hidden_dim, output_dim_p4),
        )
Пример #11
0
    def __init__(
        self,
        device,
        input_dim,
        output_size,
        seq_len,
        pred_seq_len,
        max_transports=1000,
        # New
        use_rnn=False,
        rnn_size=64,
        embedding_size=64,
        dropout=0.,
        gru=False,
    ):
        super().__init__(
            device,
            input_dim,
            output_size,
            seq_len,
            pred_seq_len,
            max_transports=max_transports,
        )
        self.rnn_size = rnn_size
        self.embedding_size = embedding_size
        self.dropout = dropout
        self.use_rnn = use_rnn

        node_fts = 8

        # Linear layer to embed the input position
        self.input_embedding_layer = nn.Linear(node_fts, self.embedding_size)

        self.net_node_hidden_dim = 64  # 128
        self.embed_stations = weight_norm(
            nn.Linear(node_fts, self.net_node_hidden_dim))
        self.embed_temp = nn.Linear(node_fts, self.net_node_hidden_dim)

        # EdgeGATConv
        self.conv1 = GATConv(self.net_node_hidden_dim, 8, heads=8, dropout=0.6)
        self.conv2 = GATConv(8 * 8, 64, heads=1, concat=True, dropout=0.6)

        self.conv1 = MyConv(self.net_node_hidden_dim, 64)

        # Linear layer to map the hidden state of LSTM to output
        self.output_layer = nn.Linear(self.rnn_size, 64)  # self.output_size)

        self.final_layer = nn.Linear(128 + 64,
                                     self.output_size)  # self.rnn_size

        # Just for debugging
        self.visualize = nn.Linear(64, 1)  # self.rnn_size

        # self.hidden_dim = 32 # 64  # 32
        self.hidden_dim = self.embedding_size
        self.input_dim = 8 + 3
        self.encoder = nn.Linear(self.input_dim, self.hidden_dim)

        chans = [128, 64]
        self.temp_conv = TemporalConvNet(
            num_inputs=self.hidden_dim,
            num_channels=chans,
            dropout=dropout,
            kernel_size=4,
        )

        chans = [self.hidden_dim, self.hidden_dim]
        self.spat_conv = TemporalConvNet(
            num_inputs=self.hidden_dim,
            num_channels=chans,
            dropout=dropout,
            kernel_size=4,
        )

        # self.conv_hidden = 32
        self.conv_hidden = self.hidden_dim  # 64
        self.spat_k = self.seq_len + self.pred_seq_len
        self.spat_convs = nn.ModuleList()

        self.spat_convs.append(
            GATConv(self.net_node_hidden_dim,
                    self.conv_hidden,
                    heads=1,
                    concat=True,
                    dropout=0.2))

        self.ground_encoder = nn.Linear(5, 16)
        self.ground_encoder2 = nn.Linear(5, self.hidden_dim + 16)

        self.ground_k = 3
        self.ground_convs = nn.ModuleList()
        self.ground_convs.append(GCNConv(self.conv_hidden, self.conv_hidden))
        for _ in range(self.ground_k - 1):
            self.ground_convs.append(
                GCNConv(self.conv_hidden, self.conv_hidden))

        self.unet = GraphUNet(self.conv_hidden,
                              16,
                              self.conv_hidden,
                              depth=4,
                              pool_ratios=0.5)
        self.pool = TopKPooling(self.conv_hidden, ratio=0.3)
        self.ground_sync = MyConv(self.hidden_dim + 16, self.conv_hidden)
        self.ground_sync2 = MyConv(self.hidden_dim + 16, self.conv_hidden)

        self.breadths = torch.nn.ModuleList([
            Breadth(self.conv_hidden, self.conv_hidden)
            for i in range(self.spat_k)
        ])

        self.depths = torch.nn.ModuleList([
            Depth(self.conv_hidden * 2, self.conv_hidden)
            for i in range(self.spat_k)
        ])

        self.pred_conv = TemporalConvNet(
            num_inputs=self.conv_hidden,
            num_channels=[128, 64],
            dropout=dropout,
            kernel_size=4,
        )

        tcn_hidden = self.embedding_size  # 64

        self.bn = BatchNorm(2 * self.conv_hidden)
        self.final_conv = TemporalConvNet(
            # 128 -> 64 -> 32
            num_inputs=(3 if self.use_rnn else 2) * self.conv_hidden + 16,
            # num_inputs=3 * self.conv_hidden,
            num_channels=[tcn_hidden, tcn_hidden, tcn_hidden],
            dropout=dropout,
            kernel_size=8,
        )

        self.start_conv = TemporalConvNet(
            num_inputs=self.conv_hidden,
            num_channels=[tcn_hidden, tcn_hidden, tcn_hidden],
            dropout=dropout,
            kernel_size=8,
        )

        # ReLU and dropout unit
        self.lrelu = nn.LeakyReLU()  # nn.ReLU()
        self.dropout = nn.Dropout(dropout)

        self.cell = nn.LSTMCell(self.hidden_dim, self.rnn_size)

        self.lin1 = nn.Linear(tcn_hidden, tcn_hidden)
        self.lin2 = nn.Linear(tcn_hidden, tcn_hidden)
        self.lin3 = nn.Linear(tcn_hidden, self.pred_seq_len)

        self.stgblock = STConvBlock(self.device,
                                    k_s=1,
                                    k_t=1,
                                    dropout=dropout,
                                    channels=(self.conv_hidden + 16,
                                              self.conv_hidden + 16,
                                              self.conv_hidden + 16))
Пример #12
0
    def __init__(self,
                 in_dim,
                 hidden_dim,
                 out_dim,
                 dropout=0.5,
                 name='gat',
                 heads=8,
                 residual=True):
        super(GNNModelPYG, self).__init__()
        self.dropout = dropout
        self.name = name
        self.residual = None
        if residual:
            if in_dim == out_dim:
                self.residual = Identity()
            else:
                self.residual = Linear(in_dim, out_dim)

        if name == 'gat':
            self.conv1 = GATConv(in_dim,
                                 hidden_dim,
                                 heads=heads,
                                 dropout=dropout)
            self.conv2 = GATConv(hidden_dim * heads,
                                 out_dim,
                                 heads=1,
                                 concat=False,
                                 dropout=dropout)
        elif name == 'gcn':
            self.conv1 = GCNConv(in_dim,
                                 hidden_dim,
                                 cached=True,
                                 normalize=True,
                                 add_self_loops=False)
            self.conv2 = GCNConv(hidden_dim,
                                 out_dim,
                                 cached=True,
                                 normalize=True,
                                 add_self_loops=False)
        elif name == 'cheb':
            self.conv1 = ChebConv(in_dim, hidden_dim, K=2)
            self.conv2 = ChebConv(hidden_dim, out_dim, K=2)
        elif name == 'spline':
            self.conv1 = SplineConv(in_dim, hidden_dim, dim=1, kernel_size=2)
            self.conv2 = SplineConv(hidden_dim, out_dim, dim=1, kernel_size=2)
        elif name == 'gin':
            self.conv1 = GINConv(
                Sequential(Linear(in_dim, hidden_dim), ReLU(),
                           Linear(hidden_dim, hidden_dim)))
            self.conv2 = GINConv(
                Sequential(Linear(hidden_dim, hidden_dim), ReLU(),
                           Linear(hidden_dim, out_dim)))
        elif name == 'unet':
            self.conv1 = GraphUNet(in_dim, hidden_dim, out_dim, depth=3)
        elif name == 'agnn':
            self.lin1 = Linear(in_dim, hidden_dim)
            self.conv1 = AGNNConv(requires_grad=False)
            self.conv2 = AGNNConv(requires_grad=True)
            self.lin2 = Linear(hidden_dim, out_dim)
        else:
            raise NotImplemented("""
            Unknown model name. Choose from gat, gcn, cheb, spline, gin, unet, agnn."""
                                 )
Пример #13
0
    def __init__(self, num_vocab, max_seq_len, node_encoder, emb_dim,
                 num_edge_feats, num_layers, #out_dim=1024, hidden_dim=100, num_classes=2,
                 k=30):
        super(UNet, self).__init__(num_vocab, max_seq_len, node_encoder, emb_dim, num_edge_feats, 4, k=k, init_gnn=False)

        self.unet = GraphUNet(emb_dim+num_edge_feats, emb_dim, self.total_latent_dim, depth=4, pool_ratios=[0.9, 0.7, 0.6, 0.5])
Пример #14
0
 def __init__(self):
     super(Net, self).__init__()
     pool_ratios = [.75, 0.5]
     self.unet = GraphUNet(3, 200, 2, depth=3, pool_ratios=pool_ratios)