def __init__(self, regression=False): super(Classifier, self).__init__() self.regression = regression if cmd_args.gm == 'DGCNN': model = DGCNN else: print('unknown gm %s' % cmd_args.gm) sys.exit() if cmd_args.gm == 'DGCNN': self.gnn = model(latent_dim=cmd_args.latent_dim, output_dim=cmd_args.out_dim, num_node_feats=cmd_args.feat_dim + cmd_args.attr_dim, num_edge_feats=cmd_args.edge_feat_dim, k=cmd_args.sortpooling_k, conv1d_activation=cmd_args.conv1d_activation) out_dim = cmd_args.out_dim if out_dim == 0: if cmd_args.gm == 'DGCNN': out_dim = self.gnn.dense_dim else: out_dim = cmd_args.latent_dim self.mlp = MLPClassifier(input_size=out_dim, hidden_size=cmd_args.hidden, num_class=cmd_args.num_class, with_dropout=cmd_args.dropout) if regression: self.mlp = MLPRegression(input_size=out_dim, hidden_size=cmd_args.hidden, with_dropout=cmd_args.dropout)
def __init__(self, num_node_feats, num_class, num_edge_feats=0, regression=False, with_dropout=False): super(DGCNN, self).__init__() self.regression = regression self.gnn = DGCNNEmbedding(output_dim=1024, num_node_feats=num_node_feats) self.mlp = MLPClassifier(input_size=1024, hidden_size=100, num_class=num_class, with_dropout=with_dropout) if regression: self.mlp = MLPRegression(input_size=1024, hidden_size=100, with_dropout=with_dropout)
def __init__(self, regression=False): super(Classifier, self).__init__() self.regression = regression model = Enet self.gnn = model(latent_dim=cmd_args.latent_dim, output_dim=cmd_args.out_dim, num_node_feats=cmd_args.feat_dim + cmd_args.attr_dim, num_edge_feats=cmd_args.edge_feat_dim, total_num_nodes=cmd_args.total_num_nodes, total_num_tag=cmd_args.feat_dim, k=cmd_args.sortpooling_k, conv1d_activation=cmd_args.conv1d_activation, alpha=0.1, sumsort=cmd_args.sumsort, noise_matrix=cmd_args.noise_matrix, reg_smooth=cmd_args.reg_smooth, smooth_coef=cmd_args.smooth_coef, trainable_noise=cmd_args.trainable_noise, use_sig=cmd_args.use_sig, use_soft=cmd_args.use_soft, noise_bias=cmd_args.noise_bias, noise_init=cmd_args.noise_init) out_dim = cmd_args.out_dim if out_dim == 0: out_dim = self.gnn.dense_dim if cmd_args.nodefeat_lp: out_dim += (2 * cmd_args.attr_dim) self.mlp = MLPClassifier(input_size=out_dim, hidden_size=cmd_args.hidden, num_class=cmd_args.num_class, with_dropout=cmd_args.dropout) if regression: self.mlp = MLPRegression(input_size=out_dim, hidden_size=cmd_args.hidden, with_dropout=cmd_args.dropout)