예제 #1
0
 def __init__(self, num_layers, num_mlp_layers, feature_dim, hidden_dim, num_classes,
              final_dropout, graph_pooling_type, device, embedded_dim):
     super(gcnn_new, self).__init__()
     self.final_dropout = final_dropout
     self.device = device
     self.num_layers = num_layers
     self.hidden_dim = hidden_dim
     self.graph_pooling_type = graph_pooling_type
     self.feature_dim = feature_dim
     self.embedded_dim = embedded_dim
     self.mlps = torch.nn.ModuleList()
     ###List of batchnorms applied to the output of MLP (input of the final prediction linear layer)
     self.batch_norms = torch.nn.ModuleList()
     for layer in range(self.num_layers - 1):
         if layer == 0:
             self.mlps.append(MLP(num_mlp_layers, feature_dim, hidden_dim, hidden_dim))
         else:
             self.mlps.append(MLP(num_mlp_layers, hidden_dim, hidden_dim, hidden_dim))
     self.linears_prediction = torch.nn.ModuleList()
     for layer in range(num_layers):
         if layer == 0:
             self.linears_prediction.append(nn.Linear(feature_dim, num_classes))
         else:
             self.linears_prediction.append(nn.Linear(hidden_dim, num_classes))
     self.conv_kernel = torch.nn.ModuleList()
     for layer in range(self.num_layers - 1):
         if layer == 0:
             self.conv_kernel.append(MLP(num_mlp_layers, embedded_dim, hidden_dim, feature_dim))
             self.batch_norms.append(nn.BatchNorm1d(feature_dim))
         else:
             self.conv_kernel.append(MLP(num_mlp_layers, embedded_dim, hidden_dim, hidden_dim))
             self.batch_norms.append(nn.BatchNorm1d(hidden_dim))
예제 #2
0
 def __init__(self, layers, in_features, hidden_features, out_features, prop_depth, dropout=0.0, model_name='DE-GNN'):
     super(GNNModel, self).__init__()
     self.layers, self.in_features, self.hidden_features, self.out_features, self.model_name = layers, in_features, hidden_features, out_features, model_name
     Layer = self.get_layer_class()
     self.act = nn.ReLU()
     self.dropout = nn.Dropout(p=dropout)
     self.layers = nn.ModuleList()
     if self.model_name == 'DE-GNN':
         self.layers.append(Layer(in_channels=in_features, out_channels=hidden_features, K=prop_depth))
     elif self.model_name == 'GIN':
         self.layers.append(
             Layer(MLP(num_layers=2, input_dim=in_features, hidden_dim=hidden_features, output_dim=hidden_features)))
     else:
         self.layers.append(Layer(in_channels=in_features, out_channels=hidden_features))
     if layers > 1:
         for i in range(layers - 1):
             if self.model_name == 'DE-GNN':
                 self.layers.append(Layer(in_channels=hidden_features, out_channels=hidden_features, K=prop_depth))
             elif self.model_name == 'GIN':
                 self.layers.append(Layer(MLP(num_layers=2, input_dim=hidden_features, hidden_dim=hidden_features,
                                              output_dim=hidden_features)))
             elif self.model_name == 'GAT':
                 self.layers.append(Layer(in_channels=hidden_features, out_channels=hidden_features, heads=8))
             else:
                 # for GCN and GraphSAGE
                 self.layers.append(Layer(in_channels=hidden_features, out_channels=hidden_features))
     self.layer_norms = nn.ModuleList([nn.LayerNorm(hidden_features) for i in range(layers)])
     self.merger = nn.Linear(3 * hidden_features, hidden_features)
     self.feed_forward = FeedForwardNetwork(hidden_features, out_features)
예제 #3
0
def _create_model(model_name):
    if model_name == "vgg":
        model = cifar10vgg(n_reps=3,
                           train=False,
                           weight_file="data/cifar10vgg.h5")
    if model_name == "vgg19_89acc":
        model = general_vgg(
            n_reps=4,
            train=False,
            weight_file="cifar10_vgg19weights_init130-50-40_5e-3lr_30.h5")
    if model_name == "vgg19_highestacc":
        model = general_vgg(
            n_reps=4,
            train=False,
            weight_file=
            "cifar10_vgg19weights/cifar10_vgg19weights_init130-50-40-5e-3lr30-30_250.h5"
        )
    elif model_name == "mlp5":
        model = MLP(train=False,
                    num_layers=5,
                    hidden_dim=1000,
                    weight_file="models/weights/mlp_l5_h1000.h5")
    elif model_name == "mlp8":
        model = MLP(train=False,
                    num_layers=8,
                    hidden_dim=1000,
                    weight_file="models/weights/mlp_l8_h1000.h5")
    elif model_name == "mlp12":
        model = MLP(train=False,
                    num_layers=12,
                    hidden_dim=1000,
                    weight_file="data/mlp.h5")

    return model
예제 #4
0
    def __init__(self, state_dim: int, action_space, hidden_dims: List[int],
                 state_normalizer: Optional[nn.Module], use_limited_entropy=False, use_tanh_squash=False,
                 use_state_dependent_std=False, **kwargs):
        super(Actor, self).__init__()
        self.state_dim = state_dim
        self.action_space = action_space
        self.hidden_dims = hidden_dims
        self.use_limited_entropy = use_limited_entropy
        self.use_tanh_squash = use_tanh_squash

        if isinstance(action_space, Box) or isinstance(action_space, MultiBinary):
            self.action_dim = action_space.shape[0]
        else:
            assert isinstance(action_space, Discrete)
            self.action_dim = action_space.n

        mlp_kwargs = kwargs.copy()
        mlp_kwargs['activation'] = kwargs.get('activation', 'relu')
        mlp_kwargs['last_activation'] = kwargs.get('activation', 'relu')

        self.actor_feature = MLP(state_dim, hidden_dims[-1], hidden_dims[:-1], **mlp_kwargs)

        self.state_normalizer = state_normalizer or nn.Identity()

        self.actor_layer = TanhGaussainActorLayer(hidden_dims[-1], self.action_dim,
                                                  use_state_dependent_std)

        def init_(m): init(m, fanin_init, lambda x: nn.init.constant_(x, 0))
        self.actor_feature.init(init_, init_)
예제 #5
0
 def __init__(self, num_layers, in_features, hidden_features, out_features, prop_depth, dropout=0.0, model_name='DE-GNN',type_norm='group'):
     super(GNNModel, self).__init__()
     self.num_layers, self.in_features, self.hidden_features, self.out_features, self.model_name = num_layers, in_features, hidden_features, out_features, model_name
     Layer = self.get_layer_class()
     self.act = nn.ReLU()
     self.dropout = nn.Dropout(p=dropout)
     self.layers = nn.ModuleList()
     self.type_norm = type_norm
     if self.model_name == 'DE-GNN':
         self.layers.append(Layer(in_channels=in_features, out_channels=hidden_features, K=prop_depth))
     elif self.model_name == 'GIN':
         self.layers.append(
             Layer(MLP(num_layers=2, input_dim=in_features, hidden_dim=hidden_features, output_dim=hidden_features)))
     else:
         self.layers.append(Layer(in_channels=in_features, out_channels=hidden_features))
     if self.num_layers > 1:
         for i in range(self.num_layers - 1):
             if self.model_name == 'DE-GNN':
                 self.layers.append(Layer(in_channels=hidden_features, out_channels=hidden_features, K=prop_depth))
             elif self.model_name == 'GIN':
                 self.layers.append(Layer(MLP(num_layers=2, input_dim=hidden_features, hidden_dim=hidden_features,
                                              output_dim=hidden_features)))
             elif self.model_name == 'GAT':
                 self.layers.append(Layer(in_channels=hidden_features, out_channels=hidden_features, heads=8))
             else:
                 # for GCN and GraphSAGE
                 self.layers.append(Layer(in_channels=hidden_features, out_channels=hidden_features))
     # we're  building up the normalization layers here.
     if self.type_norm == 'group':
         self.layer_norms = nn.ModuleList([batch_norm(hidden_features,'group',skip_connect = True) for i in range(self.num_layers)])
     else:
         self.layer_norms = nn.ModuleList([nn.LayerNorm(hidden_features) for i in range(self.num_layers)])
     self.merger = nn.Linear(3 * hidden_features, hidden_features)
     self.feed_forward = FeedForwardNetwork(hidden_features, out_features)
예제 #6
0
    def __init__(self,
                 edge_in_dim=None,
                 node_in_dim=None,
                 edge_out_dim=None,
                 node_out_dim=None,
                 node_fc_dims=None,
                 edge_fc_dims=None,
                 dropout_p=None,
                 use_batchnorm=None):
        super(MLPGraphIndependent, self).__init__()

        if node_in_dim is not None:
            self.node_mlp = MLP(input_dim=node_in_dim,
                                fc_dims=list(node_fc_dims) + [node_out_dim],
                                dropout_p=dropout_p,
                                use_batchnorm=use_batchnorm)
        else:
            self.node_mlp = None

        if edge_in_dim is not None:
            self.edge_mlp = MLP(input_dim=edge_in_dim,
                                fc_dims=list(edge_fc_dims) + [edge_out_dim],
                                dropout_p=dropout_p,
                                use_batchnorm=use_batchnorm)
        else:
            self.edge_mlp = None
예제 #7
0
파일: spgat.py 프로젝트: HaoZhongkai/gnn
    def _build_model(self):
        self.gconv = nn.ModuleList()
        self.classifier = MLP(self.num_mlp_layers, self.hidden_dim, int(self.hidden_dim / 2), self.output_dim)
        self.node_mlp = MLP(2, self.hidden_dim, self.hidden_dim, self.hidden_dim)

        self.gconv.append(AGraphATLayer(self.input_dim, self.hidden_dim, self.embeddim))
        for layer in range(self.num_layers - 1):
            self.gconv.append(AGraphATLayer(self.hidden_dim, self.hidden_dim, self.embeddim,self.shortcut))
        return
    def __init__(self,
                 out_dim,
                 v_hdim,
                 cnn_fdim,
                 no_cnn=False,
                 frame_shape=(3, 64, 64),
                 mlp_dim=(300, 200),
                 cnn_type='resnet',
                 v_net_type='lstm',
                 v_net_param=None,
                 cnn_rs=True,
                 causal=False,
                 device=None):
        super().__init__()
        self.out_dim = out_dim
        self.cnn_fdim = cnn_fdim
        self.v_hdim = v_hdim
        self.no_cnn = no_cnn
        self.cnn_type = cnn_type
        self.frame_shape = frame_shape
        self.device = device

        if no_cnn:
            self.cnn = None
        else:
            self.frame_shape = (1, 32, 32, 64)
            """ only for ResNet based models """
            if v_net_param is None:
                v_net_param = {}
            spec = v_net_param.get('spec', 'resnet18')
            self.cnn = P2PsfNet(cnn_fdim,
                                device=self.device,
                                running_stats=cnn_rs,
                                spec=spec)
        self.v_net_type = v_net_type

        if v_net_type == 'lstm':
            self.v_net = RNN(cnn_fdim, v_hdim, v_net_type, bi_dir=not causal)
        elif v_net_type == 'tcn':
            if v_net_param is None:
                v_net_param = {}
            tcn_size = v_net_param.get('size', [64, 128])
            dropout = v_net_param.get('dropout', 0.2)
            kernel_size = v_net_param.get('kernel_size', 3)
            assert tcn_size[-1] == v_hdim
            self.v_net = TemporalConvNet(cnn_fdim,
                                         tcn_size,
                                         kernel_size=kernel_size,
                                         dropout=dropout,
                                         causal=causal)
        if self.v_net_type is 'no_lstm':
            self.mlp = MLP(self.cnn_fdim, mlp_dim, 'relu')
        else:
            self.mlp = MLP(v_hdim, mlp_dim, 'relu')
        self.linear = nn.Linear(self.mlp.out_dim, out_dim)
예제 #9
0
    def __init__(self, num_layers, num_mlp_layers, input_dim, hidden_dim,
                 output_dim, final_dropout, learn_eps, graph_pooling_type,
                 neighbor_pooling_type, device):
        '''
            num_layers: number of layers in the neural networks (INCLUDING the input layer)
            num_mlp_layers: number of layers in mlps (EXCLUDING the input layer)
            input_dim: dimensionality of input features
            hidden_dim: dimensionality of hidden units at ALL layers
            output_dim: number of classes for prediction
            final_dropout: dropout ratio on the final linear layer
            learn_eps: If True, learn epsilon to distinguish center nodes from neighboring nodes. If False, aggregate neighbors and center nodes altogether. 
            neighbor_pooling_type: how to aggregate neighbors (mean, average, or max)
            graph_pooling_type: how to aggregate entire nodes in a graph (mean, average)
            device: which device to use
        '''

        super(GraphCNN, self).__init__()

        self.final_dropout = final_dropout
        self.device = device
        self.num_layers = num_layers
        self.graph_pooling_type = graph_pooling_type
        self.neighbor_pooling_type = neighbor_pooling_type
        self.learn_eps = learn_eps
        self.eps = nn.Parameter(torch.zeros(self.num_layers - 1))

        ###List of MLPs
        self.mlps = torch.nn.ModuleList()

        ###List of batchnorms applied to the output of MLP (input of the final prediction linear layer)
        self.batch_norms = torch.nn.ModuleList()

        for layer in range(self.num_layers - 1):
            if layer == 0:
                self.mlps.append(
                    MLP(num_mlp_layers, input_dim, hidden_dim, hidden_dim))
            else:
                self.mlps.append(
                    MLP(num_mlp_layers, hidden_dim, hidden_dim, hidden_dim))

            self.batch_norms.append(nn.BatchNorm1d(hidden_dim))

        #Linear function that maps the hidden representation at dofferemt layers into a prediction score
        self.linears_prediction = torch.nn.ModuleList()
        for layer in range(num_layers):
            if layer == 0:
                self.linears_prediction.append(nn.Linear(
                    input_dim, output_dim))
            else:
                self.linears_prediction.append(
                    nn.Linear(hidden_dim, output_dim))
예제 #10
0
    def __init__(self, device, type_vocab, type_id_dict, word_vec_dim,
                 lstm_dim, mlp_hidden_dim, type_embed_dim):
        super(SRLFET, self).__init__()

        # TODO to other class
        self.type_vocab, self.type_id_dict = type_vocab, type_id_dict
        self.l1_type_indices, self.l1_type_vec, self.child_type_vecs = fetutils.build_hierarchy_vecs(
            self.type_vocab, self.type_id_dict)
        self.n_types = len(self.type_vocab)

        self.device = device
        self.word_vec_dim = word_vec_dim
        self.lstm_dim = lstm_dim

        self.lstm1 = nn.LSTM(input_size=self.word_vec_dim,
                             hidden_size=self.lstm_dim,
                             bidirectional=False)
        self.lstm_hidden1 = None
        self.lstm2 = nn.LSTM(input_size=self.word_vec_dim,
                             hidden_size=self.lstm_dim,
                             bidirectional=False)
        self.lstm_hidden2 = None

        self.type_embed_dim = type_embed_dim
        self.type_embeddings = torch.tensor(np.random.normal(
            scale=0.01,
            size=(type_embed_dim, self.n_types)).astype(np.float32),
                                            device=self.device,
                                            requires_grad=True)
        self.type_embeddings = nn.Parameter(self.type_embeddings)

        self.mlp = MLP(2, 2 * self.word_vec_dim + 2 * self.lstm_dim,
                       self.type_embed_dim, mlp_hidden_dim)
예제 #11
0
 def __init__(self, model, output_size, classif, end_classif=True):
     super().__init__()
     self.net = model
     self.c_1 = MLP(**classif)
     self.end_classif = end_classif
     if self.end_classif:
         self.c_2 = nn.Linear(output_size, output_size)
예제 #12
0
def DefaultCifar10CNN(device):
    cnn = CNN(in_features=(32, 32, 3),
              out_features=10,
              conv_filters=[32, 16],
              conv_kernel_size=[5, 5],
              conv_strides=[1, 1],
              conv_pad=[0, 0],
              max_pool_kernels=[(2, 2), (2, 2)],
              max_pool_strides=[2, 2],
              use_dropout=False,
              use_batch_norm=False,
              actv_func=["leakyrelu", "leakyrelu"],
              device=device)
    # Create MLP
    # Calculate the input shape
    s = cnn.GetCurShape()
    in_features = s[0] * s[1] * s[2]

    mlp = MLP(in_features,
              10, [120, 84], ["leakyrelu", "leakyrelu"],
              use_batch_norm=False,
              use_dropout=False,
              use_softmax=False,
              device=device)

    # mlp = DefaultCifar10MLP(device=device, in_features=in_features)

    cnn.AddMLP(mlp)

    return cnn
예제 #13
0
 def __init__(self, in_features, mlp_features):
     """
     SetPartitionTri model.
     """
     super().__init__()
     cfg = dict(mlp_with_relu=False)
     self.mlp = MLP(in_features=in_features, feats=mlp_features, cfg=cfg)
     self.tensor_1 = torch.tensor(1., device='cuda')
예제 #14
0
파일: rubi.py 프로젝트: antolu/RUBi
    def __init__(self, base_vqa):
        super().__init__()
        self.model = base_vqa

        dimensions = [2048, 2048, 3000]
        self.mlp = MLP(4800, dimensions)

        self.slp = nn.Linear(3000, 3000)
예제 #15
0
def main(cfg):
    device = set_env(cfg)

    logging.info('Loading the dataset.')
    train_criterion, val_criterion = get_criterion(cfg.optimization.criterion)
    train_dataloader, val_dataloader = get_dataloader(cfg)

    model = MLP(**cfg.network).to(device)
    logging.info(f'Constructing model on the {device}:{cfg.CUDA_DEVICE}.')
    logging.info(model)

    # Set total steps for onecycleLR and cosineLR
    cfg.optimization.total_steps = len(train_dataloader) * cfg.optimization.epoch
    cfg.optimization.onecycle_scheduler.total_steps = \
        cfg.optimization.cosine_scheduler.T_max = cfg.optimization.total_steps

    optimizer, scheduler = get_optimization(cfg, model)

    best_loss = float("inf")
    for epoch in range(cfg.optimization.epoch):
        train_epoch(model,
                    train_dataloader,
                    train_criterion,
                    optimizer,
                    scheduler,
                    device,
                    epoch,
                    cfg)
        if cfg.optimization.scheduler in ['exp', 'step']:
            scheduler.step()

        val_loss = valid_epoch(model,
                               val_dataloader,
                               val_criterion,
                               device,
                               epoch,
                               cfg)
        if val_loss < best_loss:
            best_loss = val_loss
            torch.save(model.state_dict(), '{}/best_model.pth'.format(cfg.log_dir))
            logging.info(f'New Main Loss {best_loss}')
    torch.save(model.state_dict(), '{}/last_model.pth'.format(cfg.log_dir))

    logging.info('Best Main Loss {}'.format(best_loss))
    logging.info(cfg)
    pickle.dump(cfg, open('{}/config.pkl'.format(cfg.log_dir), 'wb'))
예제 #16
0
파일: spgat.py 프로젝트: HaoZhongkai/gnn
    def _build_model(self):
        self.gconv = nn.ModuleList()
        self.classifier = MLP(self.num_mlp_layers, self.hidden_dim, int(self.hidden_dim/2), self.output_dim)
        self.embedding_layer = nn.Linear(self.input_dim, self.hidden_dim, bias=True)    #transform input 0 to feature

        for layer in range(self.num_layers):
            self.gconv.append(ASPGATLayer(self.hidden_dim, self.hidden_dim, self.embeddim, self.k_order))

        return