예제 #1
0
 def __init__(self, args, loadable_state_dict=None):
     super().__init__()
     self.args = args
     n_classes = args.meta['n_classes']
     s = args.meta['s']
     regions = args.meta['regions']
     net = []
     adj, self.rid2idx = regions_adjacency(regions)
     self.adj = adj
     nout = len(adj)
     cout = 4
     net.append(
         fgl.FGL(  #fgl.make_weight_normed_FGL(
             1,
             s * s,
             cout,
             nout,
             adj,
             op_order="213",
             reduction="sum",
             optimization="packed1.0",
         ))
     self.net = nn.Sequential(*net)
     self.fc = nn.Sequential(nn.Linear(nout * cout, n_classes))
     if loadable_state_dict is not None:
         self.load_state_dict(loadable_state_dict)
예제 #2
0
파일: clf.py 프로젝트: HarounH/brain_
 def __init__(self, cs, As, *args, **kwargs):
     super(FGLNet, self).__init__()
     self.cs = cs
     self.As = As
     self.net = []
     for i in range(len(As)):
         self.net.extend([fgl.FGL(cs[i], cs[i + 1], As[i]), nn.Tanh()])
     self.net = nn.Sequential(*(self.net))
     self.linear = nn.Sequential(nn.Linear(As[-1].shape[0] * cs[-1], 8),
                                 nn.Tanh())
예제 #3
0
 def __init__(self, in_c, out_c, As, use_bias=True):
     super(MFGL, self).__init__()
     # assert(len(As) > 0)
     n_out, n_in = As[0].shape
     self.in_c = in_c
     self.out_c = out_c
     self.n_in = n_in
     self.n_out = n_out
     self.As = As
     self.nets = nn.ModuleList(
         [fgl.FGL(in_c, out_c, A, use_bias) for A in As])
     self.n = len(self.As)
예제 #4
0
 def __init__(self, in_c, out_c, A, n=4, dA=0.05, use_bias=True):
     super(RandomMFGL, self).__init__()
     # assert(len(As) > 0)
     n_out, n_in = A.shape
     self.in_c = in_c
     self.out_c = out_c
     self.n_in = n_in
     self.n_out = n_out
     As = [
         A + utils.scsp2tsp(sp.rand(*(A.shape), dA).tocoo())
         for i in range(n)
     ]
     self.As = As
     self.nets = nn.ModuleList(
         [fgl.FGL(in_c, out_c, A, use_bias) for A in As])
예제 #5
0
 def __init__(self, args, complex=False, loadable_state_dict=None):
     super().__init__()
     self.args = args
     n_classes = args.meta['n_classes']
     s = args.meta['s']
     r = args.meta['r']
     net = []
     adj = wedge_adjacency(s, r, diagonally_opposite=complex)
     nout = len(adj)
     cout = 4
     net.append(
         fgl.FGL(  #fgl.make_weight_normed_FGL(
             1,
             s * s,
             cout,
             nout,
             adj,
             op_order="213",
             reduction="sum",
             optimization="tree",
         ))
     self.net = nn.Sequential(*net)
     self.fc = nn.Sequential(nn.Linear(nout * cout, n_classes))
예제 #6
0
    def __init__(self,
                 args,
                 loadable_state_dict=None,
                 z_size=128,
                 content_channels=16,
                 dropout_rate=0.5):
        super(GeneratorHierarchical0, self).__init__()
        self.args = args
        meta = self.args.meta
        wtree = args.wtree
        self.content_channels = content_channels
        self.z_size = z_size

        #############
        # Linear layers
        #############

        self.study_embedding = weight_norm(
            nn.Embedding(len(meta['s2i']), content_channels))
        self.task_embedding = weight_norm(
            nn.Embedding(len(meta['t2i']), content_channels))
        self.contrast_embedding = weight_norm(
            nn.Embedding(len(meta['c2i']), content_channels))

        self.zfc = nn.Sequential(nn.Dropout(p=dropout_rate), )

        self.fcs = nn.ModuleList([
            nn.Sequential(
                nn.Linear(content_channels, content_channels),
                nn.Dropout(dropout_rate),
            ),
            nn.Sequential(
                nn.Linear(2 * content_channels, content_channels),
                nn.Dropout(dropout_rate),
            ),
            nn.Sequential(
                nn.Linear(3 * content_channels, content_channels),
                nn.Dropout(dropout_rate),
            ),
            nn.Sequential(
                nn.Linear(3 * content_channels, content_channels),
                nn.Dropout(dropout_rate),
            ),
            nn.Sequential(
                nn.Linear(3 * content_channels, content_channels),
                nn.Dropout(dropout_rate),
            ),
        ])

        self.node_sizes = [
            constants.masked_nnz, z_size * 256, z_size * 64, z_size * 16,
            z_size * 4, z_size
        ]
        self.channel_sizes = [
            1, content_channels + (z_size // 16),
            content_channels + (z_size // 8), content_channels + (z_size // 4),
            content_channels + (z_size // 2), content_channels + z_size
        ]

        adj_list = []
        cur_level = wtree.get_leaves()
        for next_count in self.nodes_sizes[1:]:
            cur_level, _, adj = ward_tree.go_up_to_reduce(
                cur_level, next_count)
            adj_list.append(adj)
        # adj_list contains adj list from 67615->32768...->128
        # we need to transpose each one and them reverse the list
        adj_list = [
            utils.transpose_adj_list(self.node_sizes[i],
                                     self.node_sizes[i + 1], al)
            for i, al in enumerate(adj_list)
        ]
        adj_list = adj_list[::-1]

        self.upsample0 = fgl.FGL(self.channel_sizes[-1], self.node_sizes[-1],
                                 self.channel_sizes[-2], self.node_sizes[-2],
                                 adj_list[0])
        self.upsample1 = fgl.FGL(self.channel_sizes[-2], self.node_sizes[-2],
                                 self.channel_sizes[-3], self.node_sizes[-3],
                                 adj_list[1])
        self.upsample2 = fgl.FGL(self.channel_sizes[-3], self.node_sizes[-3],
                                 self.channel_sizes[-4], self.node_sizes[-4],
                                 adj_list[2])
        self.upsample3 = fgl.FGL(self.channel_sizes[-4], self.node_sizes[-4],
                                 self.channel_sizes[-5], self.node_sizes[5],
                                 adj_list[3])
        self.upsample4 = fgl.FGL(self.channel_sizes[-5], self.node_sizes[5],
                                 self.channel_sizes[0], self.node_sizes[0],
                                 adj_list[4])

        self.activation0 = nn.Sequential(
            nn.LeakyReLU(0.2), nn.BatchNorm1d(self.channel_sizes[-2]))
        self.activation1 = nn.Sequential(
            nn.LeakyReLU(0.2), nn.BatchNorm1d(self.channel_sizes[-3]))
        self.activation2 = nn.Sequential(
            nn.LeakyReLU(0.2), nn.BatchNorm1d(self.channel_sizes[-4]))
        self.activation3 = nn.Sequential(
            nn.LeakyReLU(0.2), nn.BatchNorm1d(self.channel_sizes[-5]))
        self.activation4 = nn.Sequential(nn.Tanh())

        if loadable_state_dict:
            self.load_state_dict(loadable_state_dict)
예제 #7
0
    def __init__(self,
                 args,
                 wtree,
                 loadable_state_dict=None,
                 z_size=128,
                 content_channels=16,
                 dropout_rate=0.5):
        super(FGLGeneratorHierarchical0, self).__init__()
        # wtree: WardTree object.
        # arch: N, 128 (z_size + cc) -> N, 512 (z_size // 2 + cc) -> N, 2048 (32) -> N, 8192 (16) -> N, 32768 (8) -4-> N, 67615 (1)
        self.args = args
        meta = self.args.meta

        # FCs
        self.study_embedding = weight_norm(
            nn.Embedding(len(meta['s2i']), content_channels))
        self.task_embedding = weight_norm(
            nn.Embedding(len(meta['t2i']), content_channels))
        self.contrast_embedding = weight_norm(
            nn.Embedding(len(meta['c2i']), content_channels))
        self.fcs = nn.ModuleList([
            nn.Sequential(
                nn.Linear(content_channels, content_channels),
                nn.Dropout(dropout_rate),
            ),
            nn.Sequential(
                nn.Linear(2 * content_channels, content_channels),
                nn.Dropout(dropout_rate),
            ),
            nn.Sequential(
                nn.Linear(3 * content_channels, content_channels),
                nn.Dropout(dropout_rate),
            ),
            nn.Sequential(
                nn.Linear(3 * content_channels, content_channels),
                nn.Dropout(dropout_rate),
            ),
            nn.Sequential(
                nn.Linear(3 * content_channels, content_channels),
                nn.Dropout(dropout_rate),
            ),
        ])

        self.nodes_arr = [32768, 8192, 2048, 512, 128]
        adjes = []
        cur_level = wtree.get_leaves()
        for next_count in self.nodes_arr:
            cur_level, adj = ward_tree.go_up_to_reduce(cur_level, next_count)
            adjes.append(adj)
            cur_c = next_c
        adjes = adjes[::-1]

        self.upsample0 = fgl.FGL(128 + content_channels, 64,
                                 utisl.scsp2tsp(adjes[0].T))
        self.activation0 = nn.Sequential(nn.LeakyReLU(0.2), )
        self.upsample1 = fgl.FGL(64 + content_channels, 32,
                                 utisl.scsp2tsp(adjes[1].T))
        self.upsample2 = fgl.FGL(32 + content_channels, 16,
                                 utisl.scsp2tsp(adjes[2].T))
        self.upsample3 = fgl.FGL(16 + content_channels, 8,
                                 utisl.scsp2tsp(adjes[3].T))
        self.upsample4 = fgl.FGL(8 + content_channels, 1,
                                 utisl.scsp2tsp(adjes[4].T))
예제 #8
0
    def __init__(self,
                 args,
                 loadable_state_dict=None,
                 z_size=128,
                 dropout_rate=0.5):
        super(DiscriminatorHierarchical0, self).__init__()
        self.args = args
        meta = self.args.meta
        wtree = args.wtree
        self.content_channels = content_channels
        self.z_size = z_size

        self.node_sizes = [
            constants.masked_nnz, z_size * 256, z_size * 64, z_size * 16,
            z_size * 4, z_size
        ]
        self.channel_sizes = [
            1, z_size // 16, z_size // 8, z_size // 4, z_size // 2, z_size
        ]

        adj_list = []
        cur_level = wtree.get_leaves()
        for next_count in self.nodes_sizes[1:]:
            cur_level, _, adj = ward_tree.go_up_to_reduce(
                cur_level, next_count)
            adj_list.append(adj)
        # adj_list contains adj list from 67615->32768...->128
        # we need to transpose each one and them reverse the list

        self.downsample0 = fgl.FGL(self.channel_sizes[0], self.node_sizes[0],
                                   self.channel_sizes[1], self.node_sizes[1],
                                   adj_list[0])
        self.downsample1 = fgl.FGL(self.channel_sizes[1], self.node_sizes[1],
                                   self.channel_sizes[2], self.node_sizes[2],
                                   adj_list[1])
        self.downsample2 = fgl.FGL(self.channel_sizes[2], self.node_sizes[2],
                                   self.channel_sizes[3], self.node_sizes[3],
                                   adj_list[2])
        self.downsample3 = fgl.FGL(self.channel_sizes[3], self.node_sizes[3],
                                   self.channel_sizes[4], self.node_sizes[4],
                                   adj_list[3])
        self.downsample4 = fgl.FGL(self.channel_sizes[4], self.node_sizes[4],
                                   self.channel_sizes[5], self.node_sizes[5],
                                   adj_list[4])

        self.activation0 = nn.Sequential(nn.LeakyReLU(0.2))
        self.activation1 = nn.Sequential(nn.LeakyReLU(0.2))
        self.activation2 = nn.Sequential(nn.LeakyReLU(0.2))
        self.activation3 = nn.Sequential(nn.LeakyReLU(0.2))
        self.activation4 = nn.Sequential(nn.LeakyReLU(0.2))

        self.contrast_downsample = nn.Sequential(
            fgl.FGL(self.channel_sizes[3], self.node_sizes[3],
                    self.channel_sizes[4], self.node_sizes[4], adj_list[3]),
            nn.Sequential(nn.LeakyReLU(0.2)),
            fgl.FGL(self.channel_sizes[4], self.node_sizes[4],
                    self.channel_sizes[5], self.node_sizes[5], adj_list[4]),
            nn.Sequential(nn.LeakyReLU(0.2)),
        )
        self.task_downsample = nn.Sequential(
            fgl.FGL(self.channel_sizes[4], self.node_sizes[4],
                    self.channel_sizes[5], self.node_sizes[5], adj_list[4]),
            nn.Sequential(nn.LeakyReLU(0.2)),
        )
        self.study_downsample = nn.Sequential()
        self.contrast_fc = nn.Sequential(
            nn.Linear(self.node_sizes[-1] * self.channel_sizes[-1], 1),
            nn.Sigmoid(),
        )
        self.task_fc = nn.Sequential(
            nn.Linear(self.node_sizes[-1] * self.channel_sizes[-1], 1),
            nn.Sigmoid(),
        )
        self.study_fc = nn.Sequential(
            nn.Linear(self.node_sizes[-1] * self.channel_sizes[-1], 1),
            nn.Sigmoid(),
        )
        self.rf_fc = nn.Sequential(
            nn.Linear(self.node_sizes[-1] * self.channel_sizes[-1], 1),
            nn.Sigmoid(),
        )

        if loadable_state_dict:
            self.load_state_dict(loadable_state_dict)
예제 #9
0
파일: _fgl_clf.py 프로젝트: HarounH/brain_
    def __init__(self,
                 args,
                 loadable_state_dict=None,
                 z_size=128,
                 dropout_rate=0.5,
                 downsampled=False):
        raise NotImplementedError()
        super(HierarchicalClassifier0, self).__init__()
        self.args = args
        meta = self.args.meta
        wtree = args.wtree
        self.z_size = z_size

        if downsampled:
            in_features = constants.downsampled_masked_nnz
        else:
            in_features = constants.original_masked_nnz
        self.node_sizes = [
            in_features, z_size * 256, z_size * 64, z_size * 16, z_size * 4,
            z_size
        ]
        self.channel_sizes = [
            1, z_size // 16, z_size // 8, z_size // 4, z_size // 2, z_size
        ]

        adj_list = []
        cur_level = wtree.get_leaves()
        for next_count in self.nodes_sizes[1:]:
            cur_level, _, adj = ward_tree.go_up_to_reduce(
                cur_level, next_count)
            adj_list.append(adj)
        # adj_list contains adj list from 67615->32768...->128
        # we need to transpose each one and them reverse the list

        self.downsample0 = fgl.FGL(self.channel_sizes[0], self.node_sizes[0],
                                   self.channel_sizes[1], self.node_sizes[1],
                                   adj_list[0])
        self.downsample1 = fgl.FGL(self.channel_sizes[1], self.node_sizes[1],
                                   self.channel_sizes[2], self.node_sizes[2],
                                   adj_list[1])
        self.downsample2 = fgl.FGL(self.channel_sizes[2], self.node_sizes[2],
                                   self.channel_sizes[3], self.node_sizes[3],
                                   adj_list[2])
        self.downsample3 = fgl.FGL(self.channel_sizes[3], self.node_sizes[3],
                                   self.channel_sizes[4], self.node_sizes[4],
                                   adj_list[3])
        self.downsample4 = fgl.FGL(self.channel_sizes[4], self.node_sizes[4],
                                   self.channel_sizes[5], self.node_sizes[5],
                                   adj_list[4])

        self.activation0 = nn.Sequential(
            nn.Dropout(dropout_rate))  # nn.Sequential(nn.LeakyReLU(0.2))
        self.activation1 = nn.Sequential(
            nn.Dropout(dropout_rate))  # nn.Sequential(nn.LeakyReLU(0.2))
        self.activation2 = nn.Sequential(
            nn.Dropout(dropout_rate))  # nn.Sequential(nn.LeakyReLU(0.2))
        self.activation3 = nn.Sequential(
            nn.Dropout(dropout_rate))  # nn.Sequential(nn.LeakyReLU(0.2))
        self.activation4 = nn.Sequential(
            nn.Dropout(dropout_rate))  # nn.Sequential(nn.LeakyReLU(0.2))

        self.contrast_downsample = nn.Sequential(
            fgl.FGL(self.channel_sizes[3], self.node_sizes[3],
                    self.channel_sizes[4], self.node_sizes[4], adj_list[3]),
            nn.Sequential(
                nn.Dropout(dropout_rate)),  # nn.Sequential(nn.LeakyReLU(0.2)),
            fgl.FGL(self.channel_sizes[4], self.node_sizes[4],
                    self.channel_sizes[5], self.node_sizes[5], adj_list[4]),
            nn.Sequential(
                nn.Dropout(dropout_rate)),  # nn.Sequential(nn.LeakyReLU(0.2)),
        )
        self.task_downsample = nn.Sequential(
            fgl.FGL(self.channel_sizes[4], self.node_sizes[4],
                    self.channel_sizes[5], self.node_sizes[5], adj_list[4]),
            nn.Sequential(
                nn.Dropout(dropout_rate)),  # nn.Sequential(nn.LeakyReLU(0.2)),
        )
        self.study_downsample = nn.Sequential()
        self.contrast_fc = nn.Sequential(
            nn.Linear(self.node_sizes[-1] * self.channel_sizes[-1],
                      len(meta['c2i'])),
            nn.Sigmoid(),
        )
        self.task_fc = nn.Sequential(
            nn.Linear(self.node_sizes[-1] * self.channel_sizes[-1],
                      len(meta['t2i'])),
            nn.Sigmoid(),
        )
        self.study_fc = nn.Sequential(
            nn.Linear(self.node_sizes[-1] * self.channel_sizes[-1],
                      len(meta['s2i'])),
            nn.Sigmoid(),
        )

        if loadable_state_dict:
            self.load_state_dict(loadable_state_dict)