Ejemplo n.º 1
0
    def __init__(self, regression=False):
        super(Classifier, self).__init__()
        self.regression = regression
        if cmd_args.gm == 'DGCNN':
            model = DGCNN
        else:
            print('unknown gm %s' % cmd_args.gm)
            sys.exit()

        if cmd_args.gm == 'DGCNN':
            self.gnn = model(latent_dim=cmd_args.latent_dim,
                             output_dim=cmd_args.out_dim,
                             num_node_feats=cmd_args.feat_dim +
                             cmd_args.attr_dim,
                             num_edge_feats=cmd_args.edge_feat_dim,
                             k=cmd_args.sortpooling_k,
                             conv1d_activation=cmd_args.conv1d_activation)
        out_dim = cmd_args.out_dim
        if out_dim == 0:
            if cmd_args.gm == 'DGCNN':
                out_dim = self.gnn.dense_dim
            else:
                out_dim = cmd_args.latent_dim
        self.mlp = MLPClassifier(input_size=out_dim,
                                 hidden_size=cmd_args.hidden,
                                 num_class=cmd_args.num_class,
                                 with_dropout=cmd_args.dropout)
        if regression:
            self.mlp = MLPRegression(input_size=out_dim,
                                     hidden_size=cmd_args.hidden,
                                     with_dropout=cmd_args.dropout)
Ejemplo n.º 2
0
Archivo: main.py Proyecto: qqqlz/GXN
    def __init__(self):
        super(Classifier, self).__init__()
        model = GXN

        print("latent dim is ", cmd_args.latent_dim)

        self.s2v = model(latent_dim=cmd_args.latent_dim,
                         output_dim=cmd_args.out_dim,
                         num_node_feats=cmd_args.feat_dim + cmd_args.attr_dim,
                         num_edge_feats=0,
                         k=cmd_args.sortpooling_k,
                         ks=[cmd_args.k1, cmd_args.k2],
                         cross_weight=cmd_args.cross_weight,
                         fuse_weight=cmd_args.fuse_weight,
                         R=cmd_args.Rhop)

        print("num_node_feats: ", cmd_args.feat_dim + cmd_args.attr_dim)
        out_dim = cmd_args.out_dim
        if out_dim == 0:
            out_dim = self.s2v.dense_dim

        self.mlp = MLPClassifier(input_size=out_dim,
                                 hidden_size=cmd_args.hidden,
                                 num_class=cmd_args.num_class,
                                 with_dropout=cmd_args.dropout)
Ejemplo n.º 3
0
    def __init__(self):
        super(Classifier, self).__init__()
        if cmd_args.gm == 'mean_field':
            model = EmbedMeanField
        elif cmd_args.gm == 'loopy_bp':
            model = EmbedLoopyBP
        elif cmd_args.gm == 'DGCNN':
            model = DGCNN
        else:
            print('unknown gm %s' % cmd_args.gm)
            sys.exit()

        if cmd_args.gm == 'DGCNN':
            self.s2v = model(latent_dim=cmd_args.latent_dim,
                             output_dim=cmd_args.out_dim,
                             num_node_feats=cmd_args.feat_dim +
                             cmd_args.attr_dim,
                             num_edge_feats=0,
                             k=cmd_args.sortpooling_k)
        else:
            self.s2v = model(latent_dim=cmd_args.latent_dim,
                             output_dim=cmd_args.out_dim,
                             num_node_feats=cmd_args.feat_dim,
                             num_edge_feats=0,
                             max_lv=cmd_args.max_lv)
        out_dim = cmd_args.out_dim
        if out_dim == 0:
            if cmd_args.gm == 'DGCNN':
                out_dim = self.s2v.dense_dim
            else:
                out_dim = cmd_args.latent_dim
        self.mlp = MLPClassifier(input_size=out_dim,
                                 hidden_size=cmd_args.hidden,
                                 num_class=cmd_args.num_class,
                                 with_dropout=cmd_args.dropout)
 def __init__(self, num_node_feats, num_class, num_edge_feats=0, regression=False, with_dropout=False):
     super(DGCNN, self).__init__()
     self.regression = regression
     self.gnn = DGCNNEmbedding(output_dim=1024,
                               num_node_feats=num_node_feats)
     self.mlp = MLPClassifier(input_size=1024, hidden_size=100, num_class=num_class, with_dropout=with_dropout)
     if regression:
         self.mlp = MLPRegression(input_size=1024, hidden_size=100, with_dropout=with_dropout)
Ejemplo n.º 5
0
    def __init__(self):
        super(Classifier, self).__init__()
        model = GUNet

        self.s2v = model(latent_dim=cmd_args.latent_dim,
                         output_dim=cmd_args.out_dim,
                         num_node_feats=cmd_args.feat_dim + cmd_args.attr_dim,
                         num_edge_feats=0,
                         k=cmd_args.sortpooling_k)
        out_dim = cmd_args.out_dim
        if out_dim == 0:
            out_dim = self.s2v.dense_dim
        self.mlp = MLPClassifier(input_size=out_dim,
                                 hidden_size=cmd_args.hidden,
                                 num_class=cmd_args.num_class,
                                 with_dropout=cmd_args.dropout)
Ejemplo n.º 6
0
    def __init__(self):
        super(Classifier, self).__init__()
        nodeFeatDim = gHP['featureDim'] + gHP['nodeTagDim']
        if gHP['poolingType'] == 'adaptive':
            self.s2v = DGCNN(latentDims=gHP['graphConvSize'],
                             outputDim=gHP['s2vOutDim'],
                             numNodeFeats=nodeFeatDim,
                             k=gHP['poolingK'],
                             conv2dChannel=gHP['conv2dChannels'],
                             poolingType=gHP['poolingType'])
            gHP['vggInputDim'] = (gHP['poolingK'], self.s2v.totalLatentDim,
                                  gHP['conv2dChannels'])
            self.mlp = getGraphVggBn(inputDims=gHP['vggInputDim'],
                                     hidden=gHP['mlpHidden'],
                                     numClasses=gHP['numClasses'],
                                     dropOutRate=gHP['dropOutRate'])
        else:
            self.s2v = DGCNN(latentDims=gHP['graphConvSize'],
                             outputDim=gHP['s2vOutDim'],
                             numNodeFeats=nodeFeatDim,
                             k=gHP['poolingK'],
                             poolingType='sort',
                             endingLayers=gHP['remLayers'],
                             conv1dChannels=gHP['convChannels'],
                             conv1dKernSz=gHP['convKernSizes'],
                             conv1dMaxPl=gHP['convMaxPool'])
            if gHP['s2vOutDim'] == 0:
                gHP['s2vOutDim'] = self.s2v.denseDim

            if gHP['mlpType'] == 'rap':
                self.mlp = RecallAtPrecision(input_size=gHP['s2vOutDim'],
                                             hidden_size=gHP['mlpHidden'],
                                             alpha=0.6,
                                             dropout=gHP['dropOutRate'])
            elif gHP['mlpType'] == 'logistic_reg':
                self.mlp = LogisticRegression(input_size=gHP['s2vOutDim'],
                                              num_labels=gHP['numClasses'])
            else:
                self.mlp = MLPClassifier(input_size=gHP['s2vOutDim'],
                                         hidden_size=gHP['mlpHidden'],
                                         num_class=gHP['numClasses'],
                                         dropout=gHP['dropOutRate'])
Ejemplo n.º 7
0
Archivo: main.py Proyecto: zjunet/E-Net
    def __init__(self, regression=False):
        super(Classifier, self).__init__()

        self.regression = regression
        model = Enet
        self.gnn = model(latent_dim=cmd_args.latent_dim,
                         output_dim=cmd_args.out_dim,
                         num_node_feats=cmd_args.feat_dim + cmd_args.attr_dim,
                         num_edge_feats=cmd_args.edge_feat_dim,
                         total_num_nodes=cmd_args.total_num_nodes,
                         total_num_tag=cmd_args.feat_dim,
                         k=cmd_args.sortpooling_k,
                         conv1d_activation=cmd_args.conv1d_activation,
                         alpha=0.1,
                         sumsort=cmd_args.sumsort,
                         noise_matrix=cmd_args.noise_matrix,
                         reg_smooth=cmd_args.reg_smooth,
                         smooth_coef=cmd_args.smooth_coef,
                         trainable_noise=cmd_args.trainable_noise,
                         use_sig=cmd_args.use_sig,
                         use_soft=cmd_args.use_soft,
                         noise_bias=cmd_args.noise_bias,
                         noise_init=cmd_args.noise_init)
        out_dim = cmd_args.out_dim
        if out_dim == 0:
            out_dim = self.gnn.dense_dim
        if cmd_args.nodefeat_lp:
            out_dim += (2 * cmd_args.attr_dim)
        self.mlp = MLPClassifier(input_size=out_dim,
                                 hidden_size=cmd_args.hidden,
                                 num_class=cmd_args.num_class,
                                 with_dropout=cmd_args.dropout)
        if regression:
            self.mlp = MLPRegression(input_size=out_dim,
                                     hidden_size=cmd_args.hidden,
                                     with_dropout=cmd_args.dropout)
Ejemplo n.º 8
0
    def __init__(self):
        super(Classifier, self).__init__()

        self.rank_loss = args.rank_loss
        self.model = args.model
        self.eps = args.eps
        if args.pool == 'mean':
            self.pool = self.mean_pool
        elif args.pool == 'max':
            self.pool = self.max_pool

        if self.model == 'gcn':
            self.num_layers = args.gcn_layers
            self.gcns = nn.ModuleList()
            x_size = args.input_dim
            for _ in range(self.num_layers):
                self.gcns.append(
                    GCNBlock(x_size, args.hidden_dim, args.bn, args.gcn_res,
                             args.gcn_norm, args.dropout, args.relu))
                x_size = args.hidden_dim
            self.mlp = MLPClassifier(args.hidden_dim, args.mlp_hidden,
                                     args.num_class, args.mlp_layers,
                                     args.dropout)

        else:
            self.margin = args.margin
            self.agcn_res = args.agcn_res
            self.single_loss = args.single_loss
            self.num_layers = args.num_layers
            if args.arch == 1:
                assert args.gcn_layers % self.num_layers == 0
                gcn_layer_list = [args.gcn_layers // self.num_layers
                                  ] * self.num_layers
            elif args.arch == 2:
                gcn_layer_list = [args.gcn_layers
                                  ] + [1] * (self.num_layers - 1)

            self.agcns = nn.ModuleList()
            x_size = args.input_dim

            for i in range(args.num_layers):
                self.agcns.append(
                    AGCNBlock(args, x_size, args.hidden_dim, gcn_layer_list[i],
                              args.dropout, args.relu))
                x_size = self.agcns[-1].pass_dim
                if args.model == 'diffpool':
                    args.diffpool_k = int(
                        math.ceil(args.diffpool_k * args.percent))
            self.mlps = nn.ModuleList()
            if not args.concat:
                for i in range(args.num_layers):
                    self.mlps.append(
                        MLPClassifier(input_size=args.hidden_dim,
                                      hidden_size=args.mlp_hidden,
                                      num_class=args.num_class,
                                      num_layers=args.mlp_layers,
                                      dropout=args.dropout))
            else:
                self.mlps = MLPClassifier(input_size=args.hidden_dim *
                                          self.num_layers,
                                          hidden_size=args.mlp_hidden,
                                          num_class=args.num_class,
                                          num_layers=args.mlp_layers,
                                          dropout=args.dropout)