Exemple #1
0
 def __init__(self, args, loadable_state_dict=None):
     super().__init__()
     self.args = args
     n_classes = args.meta['n_classes']
     s = args.meta['s']
     adj = quadrant_adjacency(s)
     nout = len(adj)
     cout = 4
     net = []
     net.append(
         fgl.make_weight_normed_FGL(
             1,
             s * s,
             cout,
             nout,
             adj,
             op_order="213",
             reduction="sum",
             optimization="tree",
         ))
     self.net = nn.Sequential(*net)
     self.fc = nn.Sequential(nn.Linear(nout * cout, n_classes))
Exemple #2
0
    def __init__(self,
                 args,
                 loadable_state_dict=None,
                 z_size=32,
                 dropout_rate=0.5):
        super().__init__()
        self.args = args
        meta = self.args.meta
        wtree = args.wtree
        self.z_size = z_size
        in_features = constants.original_masked_nnz

        self.node_sizes = [in_features, 1024, 256]
        self.channel_sizes = [1, 10, 64]  # That mapping should be fairly fast
        # self.channel_sizes = [1, 2, 4, 8]  # That mapping should be fairly fast
        adj_list = []

        cur_level = wtree.get_leaves()
        for next_count in self.node_sizes[1:]:
            cur_level, _, adj = wtree.get_level_and_adjacency(
                next_count, cur_level)
            # cur_level, _, adj = ward_tree.go_up_to_reduce(cur_level, next_count)
            adj_list.append(adj)
        # adj_list contains adj list from ~200k->...->128
        # we need to transpose each one and them reverse the list
        self.n_layers = len(self.channel_sizes) - 1
        OP_ORDER = args.op_order  # "132"
        REDUCTION = args.reduction
        OPTIMIZATION = args.optimization
        self.downsample0 = fgl.make_weight_normed_FGL(
            int(self.channel_sizes[0]),
            int(self.node_sizes[0]),
            int(self.channel_sizes[1]),
            int(self.node_sizes[1]),
            adj_list[0],
            op_order=OP_ORDER,
            reduction=REDUCTION,
            optimization=OPTIMIZATION,
        )
        self.activation0 = nn.Sequential()
        self.downsample1 = fgl.make_weight_normed_FGL(
            int(self.channel_sizes[1]),
            int(self.node_sizes[1]),
            int(self.channel_sizes[2]),
            int(self.node_sizes[2]),
            adj_list[1],
            op_order=OP_ORDER,
            reduction=REDUCTION,
            optimization=OPTIMIZATION,
        )
        self.activation1 = nn.Sequential()
        # self.downsample2 = fgl.make_weight_normed_FGL(
        #     int(self.channel_sizes[2]),
        #     int(self.node_sizes[2]),
        #     int(self.channel_sizes[3]),
        #     int(self.node_sizes[3]),
        #     adj_list[2],
        #     op_order=OP_ORDER,
        #     reduction=REDUCTION,
        #     optimization=OPTIMIZATION,
        # )
        # self.activation2 = nn.Sequential()
        self.fc = nn.Sequential(
            weight_norm(
                nn.Linear(self.node_sizes[-1] * self.channel_sizes[-1],
                          len(meta['c2i']))), )
Exemple #3
0
    def __init__(self,
                 args,
                 loadable_state_dict=None,
                 z_size=128,
                 dropout_rate=0.5):
        super().__init__()
        self.args = args
        meta = self.args.meta
        wtree = args.wtree
        self.z_size = z_size
        in_features = int(constants.original_masked_nnz)

        self.node_sizes = [in_features, 1024, 256, 32]

        self.channel_sizes = [1, 32, 64,
                              128]  # That mapping should be fairly fast
        # self.channel_sizes = [1, 2, 4, 8]

        incoming_densities = {
            1024: 0.00001,
            256: 0.001,
            32: 0.001,
        }
        cur_count = self.node_sizes[0]
        adj_list = []
        for next_count in self.node_sizes[1:]:
            adj_list.append(
                utils.random_graph_adjacency_list(
                    cur_count,
                    next_count,
                    density=incoming_densities[next_count]))
            cur_count = next_count

        # adj_list contains adj list from ~200k->...->128
        # we need to transpose each one and them reverse the list
        self.n_layers = len(self.channel_sizes) - 1
        OP_ORDER = args.op_order  # "132"
        REDUCTION = args.reduction
        OPTIMIZATION = args.optimization
        self.downsample0 = fgl.make_weight_normed_FGL(
            int(self.channel_sizes[0]),
            int(self.node_sizes[0]),
            int(self.channel_sizes[1]),
            int(self.node_sizes[1]),
            adj_list[0],
            op_order=OP_ORDER,
            reduction=REDUCTION,
            optimization=OPTIMIZATION,
        )
        self.activation0 = nn.Sequential()
        self.downsample1 = fgl.make_weight_normed_FGL(
            int(self.channel_sizes[1]),
            int(self.node_sizes[1]),
            int(self.channel_sizes[2]),
            int(self.node_sizes[2]),
            adj_list[1],
            op_order=OP_ORDER,
            reduction=REDUCTION,
            optimization=OPTIMIZATION,
        )
        self.activation1 = nn.Sequential()
        self.downsample2 = fgl.make_weight_normed_FGL(
            int(self.channel_sizes[2]),
            int(self.node_sizes[2]),
            int(self.channel_sizes[3]),
            int(self.node_sizes[3]),
            adj_list[2],
            op_order=OP_ORDER,
            reduction=REDUCTION,
            optimization=OPTIMIZATION,
        )
        self.activation2 = nn.Sequential()
        self.fc = nn.Sequential(
            weight_norm(
                nn.Linear(self.node_sizes[-1] * self.channel_sizes[-1],
                          len(meta['c2i']))), )
Exemple #4
0
    def __init__(self,
                 args,
                 loadable_state_dict=None,
                 z_size=128,
                 dropout_rate=0.5):
        super().__init__()
        self.args = args
        meta = self.args.meta
        wtree = args.wtree
        self.z_size = z_size
        in_features = constants.original_masked_nnz

        self.node_sizes = [
            in_features, z_size * 512, z_size * 128, z_size * 128, z_size
        ]
        self.channel_sizes = [1, 32, 64, 64,
                              128]  # That mapping should be fairly fast

        adj_list = []
        cur_level = wtree.get_leaves()
        cur_level, _, adj = wtree.get_level_and_adjacency(
            z_size * 512, cur_level)
        adj_list.append(adj)
        cur_level, _, adj = wtree.get_level_and_adjacency(
            z_size * 128, cur_level)
        adj_list.append(adj)
        adj_list.append(wtree.get_self_adj(cur_level))
        cur_level, _, adj = wtree.get_level_and_adjacency(z_size, cur_level)
        adj_list.append(adj)
        # adj_list contains adj list from ~200k->...->128
        # we need to transpose each one and them reverse the list
        self.n_layers = len(self.channel_sizes) - 1
        OP_ORDER = args.op_order  # "132"
        REDUCTION = args.reduction
        OPTIMIZATION = args.optimization
        self.downsample0 = fgl.make_weight_normed_FGL(
            int(self.channel_sizes[0]),
            int(self.node_sizes[0]),
            int(self.channel_sizes[1]),
            int(self.node_sizes[1]),
            adj_list[0],
            op_order=OP_ORDER,
            reduction=REDUCTION,
            optimization=OPTIMIZATION,
        )
        self.activation0 = nn.Sequential()
        self.downsample1 = fgl.make_weight_normed_FGL(
            int(self.channel_sizes[1]),
            int(self.node_sizes[1]),
            int(self.channel_sizes[2]),
            int(self.node_sizes[2]),
            adj_list[1],
            op_order=OP_ORDER,
            reduction=REDUCTION,
            optimization=OPTIMIZATION,
        )
        self.activation1 = nn.Sequential()
        self.downsample2 = fgl.make_weight_normed_FGL(
            int(self.channel_sizes[2]),
            int(self.node_sizes[2]),
            int(self.channel_sizes[3]),
            int(self.node_sizes[3]),
            adj_list[2],
            op_order=OP_ORDER,
            reduction=REDUCTION,
            optimization="packed0.3",
        )
        self.activation2 = nn.Sequential()
        self.downsample3 = fgl.make_weight_normed_FGL(
            int(self.channel_sizes[3]),
            int(self.node_sizes[3]),
            int(self.channel_sizes[4]),
            int(self.node_sizes[4]),
            adj_list[3],
            op_order=OP_ORDER,
            reduction=REDUCTION,
            optimization=OPTIMIZATION,
        )
        self.activation3 = nn.Sequential()
        self.fc = nn.Sequential(
            weight_norm(
                nn.Linear(self.node_sizes[-1] * self.channel_sizes[-1],
                          len(meta['c2i']))), )
Exemple #5
0
    def __init__(self,
                 args,
                 loadable_state_dict=None,
                 z_size=128,
                 dropout_rate=0.5):
        super().__init__()
        self.args = args
        meta = self.args.meta
        wtree = args.wtree
        self.z_size = z_size
        out_features = constants.original_masked_nnz

        # self.node_sizes = [out_features, z_size * 512, z_size, 12]  # , z_size * 512, z_size * 128, z_size]
        # self.channel_sizes = [1, z_size // 16, z_size // 4, z_size]  # , 32, 64, 128]  # That mapping should be fairly fast

        self.node_sizes = [out_features, 512, 32]
        self.channel_sizes = [1, 32, 128]  # That mapping should be fairly fast

        cur_level = wtree.get_leaves()
        adj_list = []
        for next_count in self.node_sizes[1:]:
            cur_count = len(cur_level)
            cur_level, _, adj = wtree.get_level_and_adjacency(next_count,
                                                              cur_level,
                                                              n_regions=2)
            adj_list.append(
                utils.transpose_adj_list(next_count, cur_count, adj))

        self.n_layers = len(self.channel_sizes) - 1
        OP_ORDER = "213"  # args.op_order
        REDUCTION = "sum"  # args.reduction
        OPTIMIZATION = "packed1.0"  # args.optimization
        self.upsample0 = fgl.make_weight_normed_FGL(
            int(self.channel_sizes[-1]),
            int(self.node_sizes[-1]),
            int(self.channel_sizes[-2]),
            int(self.node_sizes[-2]),
            adj_list[-1],
            op_order=OP_ORDER,
            reduction=REDUCTION,
            optimization=OPTIMIZATION,
        )
        self.upsample1 = fgl.make_weight_normed_FGL(
            int(self.channel_sizes[-2]),
            int(self.node_sizes[-2]),
            int(self.channel_sizes[-3]),
            int(self.node_sizes[-3]),
            adj_list[-2],
            op_order=OP_ORDER,
            reduction=REDUCTION,
            optimization=OPTIMIZATION,
        )
        # self.upsample2 = fgl.make_weight_normed_FGL(
        #     int(self.channel_sizes[-3]),
        #     int(self.node_sizes[-3]),
        #     int(self.channel_sizes[-4]),
        #     int(self.node_sizes[-4]),
        #     adj_list[-3],
        #     op_order=OP_ORDER,
        #     reduction=REDUCTION,
        #     optimization=OPTIMIZATION,
        # )
        self.linear = nn.Linear(6 * 128,
                                self.channel_sizes[-1] * self.node_sizes[-1])
Exemple #6
0
    def __init__(self, args, loadable_state_dict=None, z_size=32, dropout_rate=0.5):
        super().__init__()
        self.args = args
        meta = self.args.meta
        wtree = args.wtree
        self.z_size = z_size
        in_features = constants.original_masked_nnz

        self.node_sizes = [in_features, 4096, 1024, 64]
        self.channel_sizes = [1, 32, 64, 128]  # That mapping should be fairly fast
        adj_list = []

        cur_level = wtree.get_leaves()
        for next_count in self.node_sizes[1:]:
            cur_level, _, adj = wtree.get_level_and_adjacency(next_count, cur_level)
            adj_list.append(adj)
        # adj_list contains adj list from ~200k->...->128
        # we need to transpose each one and them reverse the list
        self.n_layers = len(self.channel_sizes) - 1
        OP_ORDER = args.op_order  # "132"
        REDUCTION = args.reduction
        OPTIMIZATION = args.optimization
        self.downsample0 = fgl.make_weight_normed_FGL(
            int(self.channel_sizes[0]),
            int(self.node_sizes[0]),
            int(self.channel_sizes[1]),
            int(self.node_sizes[1]),
            adj_list[0],
            op_order=OP_ORDER,
            reduction=REDUCTION,
            optimization=OPTIMIZATION,
        )
        self.activation0 = nn.Sequential()
        self.downsample1 = fgl.make_weight_normed_FGL(
            int(self.channel_sizes[1]),
            int(self.node_sizes[1]),
            int(self.channel_sizes[2]),
            int(self.node_sizes[2]),
            adj_list[1],
            op_order=OP_ORDER,
            reduction=REDUCTION,
            optimization=OPTIMIZATION,
        )
        self.activation1 = nn.Sequential()
        self.downsample2 = fgl.make_weight_normed_FGL(
            int(self.channel_sizes[2]),
            int(self.node_sizes[2]),
            int(self.channel_sizes[3]),
            int(self.node_sizes[3]),
            adj_list[2],
            op_order=OP_ORDER,
            reduction=REDUCTION,
            optimization=OPTIMIZATION,
        )
        self.activation2 = nn.Dropout() if args.dropout else nn.Sequential()

        fcs = []
        for i, study in enumerate(args.studies):
            si = args.meta['s2i'][study]
            assert(i == si)
            nclasses = len(args.meta['si2ci'][si])
            fcs.append(
                weight_norm(nn.Linear(self.node_sizes[-1] * self.channel_sizes[-1], nclasses))
            )
        self.fcs = nn.ModuleList(fcs)
Exemple #7
0
    def __init__(self,
                 args,
                 loadable_state_dict=None,
                 z_size=128,
                 dropout_rate=0.5):
        super().__init__()
        self.args = args
        meta = self.args.meta
        wtree = args.wtree
        self.z_size = z_size
        in_features = constants.original_masked_nnz

        # Single layer downsampling.
        # This set up ensures that ConvEncoder and FGL Encoder have same output size.
        self.node_sizes = [in_features, 512, 32]
        self.channel_sizes = [1, 32, 128]  # That mapping should be fairly fast

        # self.node_sizes = [in_features, z_size * 512, z_size, 12]  # , z_size * 512, z_size * 128, z_size]
        # self.channel_sizes = [1, z_size // 16, z_size // 4, z_size]  # , 32, 64, 128]  # That mapping should be fairly fast

        cur_level = wtree.get_leaves()
        adj_list = []
        for next_count in self.node_sizes[1:]:
            cur_level, _, adj = wtree.get_level_and_adjacency(next_count,
                                                              cur_level,
                                                              n_regions=2)
            adj_list.append(adj)

        self.n_layers = len(self.channel_sizes) - 1
        OP_ORDER = "132"  # args.op_order
        REDUCTION = "sum"  # args.reduction
        OPTIMIZATION = "tree"  # args.optimization
        self.downsample0 = fgl.make_weight_normed_FGL(
            int(self.channel_sizes[0]),
            int(self.node_sizes[0]),
            int(self.channel_sizes[1]),
            int(self.node_sizes[1]),
            adj_list[0],
            op_order=OP_ORDER,
            reduction=REDUCTION,
            optimization=OPTIMIZATION,
        )
        self.downsample1 = fgl.make_weight_normed_FGL(
            int(self.channel_sizes[1]),
            int(self.node_sizes[1]),
            int(self.channel_sizes[2]),
            int(self.node_sizes[2]),
            adj_list[1],
            op_order=OP_ORDER,
            reduction=REDUCTION,
            optimization=OPTIMIZATION,
        )
        # self.downsample2 = fgl.make_weight_normed_FGL(
        #     int(self.channel_sizes[2]),
        #     int(self.node_sizes[2]),
        #     int(self.channel_sizes[3]),
        #     int(self.node_sizes[3]),
        #     adj_list[2],
        #     op_order=OP_ORDER,
        #     reduction=REDUCTION,
        #     optimization=OPTIMIZATION,
        # )
        self.linear = nn.Linear(self.channel_sizes[-1] * self.node_sizes[-1],
                                6 * 128)