예제 #1
0
    def __init__(self):
        super(BasicBlockD3, self).__init__()
        densenum = 32
        basenum = 256
        level = 1

        self.bl1 = BasicLayerD(inplanes=level * basenum + densenum * 0)
        self.bl2 = BasicLayerD(inplanes=level * basenum + densenum * 1)
        self.bl3 = BasicLayerD(inplanes=level * basenum + densenum * 2)
        self.bl4 = BasicLayerD(inplanes=level * basenum + densenum * 3)
        self.bl5 = BasicLayerD(inplanes=level * basenum + densenum * 4)
        self.bl6 = BasicLayerD(inplanes=level * basenum + densenum * 5)
        self.bl7 = BasicLayerD(inplanes=level * basenum + densenum * 6)
        self.bl8 = BasicLayerD(inplanes=level * basenum + densenum * 7)
        self.bl9 = BasicLayerD(inplanes=level * basenum + densenum * 8)
        self.bl10 = BasicLayerD(inplanes=level * basenum + densenum * 9)
        self.bl11 = BasicLayerD(inplanes=level * basenum + densenum * 10)
        self.bl12 = BasicLayerD(inplanes=level * basenum + densenum * 11)
        self.bl13 = BasicLayerD(inplanes=level * basenum + densenum * 12)
        self.bl14 = BasicLayerD(inplanes=level * basenum + densenum * 13)
        self.bl15 = BasicLayerD(inplanes=level * basenum + densenum * 14)
        self.bl16 = BasicLayerD(inplanes=level * basenum + densenum * 15)
        self.bl17 = BasicLayerD(inplanes=level * basenum + densenum * 16)
        self.bl18 = BasicLayerD(inplanes=level * basenum + densenum * 17)
        self.bl19 = BasicLayerD(inplanes=level * basenum + densenum * 18)
        self.bl20 = BasicLayerD(inplanes=level * basenum + densenum * 19)
        self.bl21 = BasicLayerD(inplanes=level * basenum + densenum * 20)
        self.bl22 = BasicLayerD(inplanes=level * basenum + densenum * 21)
        self.bl23 = BasicLayerD(inplanes=level * basenum + densenum * 22)
        self.bl24 = BasicLayerD(inplanes=level * basenum + densenum * 23)

        self.nlfT = nn.PReLU()
        self.nlfT2 = nn.PReLU()
        self.convT = hg.Conv2d(level * basenum + densenum * 24, 512, 1, 1)
        self.convT2 = hg.Conv2d(512, 512, 1, 2)
예제 #2
0
    def __init__(self, input_dim, output_dim):
        """
        :param input_dim:
        :param output_dim:
        """
        super(DQN_network, self).__init__()
        ## global state

        self.hexconv_1 = hexagdly.Conv2d(in_channels=4,
                                         out_channels=16,
                                         kernel_size=5,
                                         stride=3,
                                         bias=True)
        # self.hexpool = hexagdly.MaxPool2d(kernel_size=1, stride=2)
        self.hexconv_2 = hexagdly.Conv2d(16, 64, 3, 3)  # 1, 16, 15, 18
        self.global_fc = nn.Sequential((nn.Linear(64 * 5 * 6,
                                                  256)))  # ,nn.Dropout(0.5)
        ## local state
        self.local_fc = nn.Linear(input_dim, 256)
        self.fc_adv = nn.Linear(256, 64)
        self.fc_v = nn.Linear(256, 64)
        self.output_adv = nn.Linear(64, output_dim)
        self.output_v = nn.Linear(64, 1)

        ## concat_fc
        self.cat_fc = nn.Linear(64 * 5 * 6 + 3, 256)
예제 #3
0
    def __init__(self,
                 inplanes,
                 planes,
                 stride=1,
                 downsample=None,
                 nonlinear_function=None):
        super(BasicBlock2, self).__init__()

        if nonlinear_function is None:
            nonlinear_function = nn.ReLU()

        self.conv1 = hg.Conv2d(inplanes, planes,
                               1)  # conv3x3(inplanes, planes, stride)
        self.bn1 = nn.BatchNorm2d(planes)
        self.relu = nonlinear_function
        self.conv2 = hg.Conv2d(planes, planes, 3, stride)
        self.bn2 = nn.BatchNorm2d(planes)
        # self.downsample = downsample
        self.stride = stride

        self.downsample = nn.Sequential(
            hg.Conv2d(inplanes,
                      planes,
                      kernel_size=1,
                      stride=stride,
                      bias=False),
            nn.BatchNorm2d(planes),
        )
예제 #4
0
    def __init__(self, inplanes, planes, stride=1, downsample=False):
        super(BasicBlockQ3, self).__init__()

        self.conv1 = hg.Conv2d(inplanes, planes,
                               1)  # conv3x3(inplanes, planes, stride)
        self.bn1 = nn.BatchNorm2d(planes)
        self.relu1 = nn.PReLU()

        self.conv2 = hg.Conv2d(planes, planes, 1)
        self.bn2 = nn.BatchNorm2d(planes)
        self.relu_end = nn.PReLU()

        self.downsample = (inplanes != planes)
        self.stride = stride

        if self.downsample:
            self.conv3 = hg.Conv2d(planes, planes, 3, stride=2)
            self.bn3 = nn.BatchNorm2d(planes)
            self.relu2 = nn.PReLU()
            self.downsample_res = nn.Sequential(
                hg.Conv2d(inplanes,
                          planes,
                          kernel_size=1,
                          stride=stride,
                          bias=False),
                nn.BatchNorm2d(planes),
            )
예제 #5
0
    def __init__(self, input_dim, output_dim):
        super(DQN_target_network, self).__init__()
        ## global state

        self.hexconv_1 = hexagdly.Conv2d(in_channels=4,
                                         out_channels=16,
                                         kernel_size=5,
                                         stride=3)
        # self.hexpool = hexagdly.MaxPool2d(kernel_size=1, stride=2)
        self.hexconv_2 = hexagdly.Conv2d(16, 64, 3, 3)  # 1, 16, 15, 18
        self.global_fc = nn.Linear(64 * 6 * 6, 256)  # ,nn.Dropout(0.5)
        nn.init.xavier_normal_(self.global_fc.weight)
        ## local state
        self.local_fc = nn.Linear(input_dim, 256)
        nn.init.xavier_normal_(self.local_fc.weight)
        self.fc_adv = nn.Linear(256, 64)
        nn.init.xavier_normal_(self.fc_adv.weight)
        self.fc_v = nn.Linear(256, 64)
        nn.init.xavier_normal_(self.fc_v.weight)
        self.output_adv = nn.Linear(64, output_dim)
        nn.init.xavier_normal_(self.output_adv.weight)
        self.output_v = nn.Linear(64, 1)
        nn.init.xavier_normal_(self.output_v.weight)

        ## concat_fc
        self.cat_fc = nn.Linear(64 * 6 * 6 + 3, 256)
        nn.init.xavier_normal_(self.cat_fc.weight)
예제 #6
0
    def __init__(self,
                 inplanes,
                 stride=1,
                 downsample=None,
                 nonlinear_function=None):
        super(BasicLayerD, self).__init__()

        self.conv1 = hg.Conv2d(inplanes, 128,
                               1)  # conv3x3(inplanes, planes, stride)
        self.bn1 = nn.BatchNorm2d(128)
        self.relu1 = nn.PReLU()
        self.conv2 = hg.Conv2d(128, 32, 3, stride)
        self.bn2 = nn.BatchNorm2d(32)
        self.relu2 = nn.PReLU()
예제 #7
0
    def __init__(self):
        super(HexNetDeep_dense1_it, self).__init__()
        # size before x.view / batch size
        input_channel = 4

        self.cinput = hg.Conv2d(input_channel, 64, 1, 1)

        self.nlf1 = nn.PReLU()
        self.nlf2 = nn.PReLU()
        self.nlf3 = nn.PReLU()
        self.nlf4 = nn.PReLU()
        self.nlf5 = nn.PReLU()

        self.block1 = BasicBlockD1()
        self.block2 = BasicBlockD2()
        self.block3 = BasicBlockD3()

        self.endpool = nn.AvgPool2d(5)

        self.size2 = 512

        self.fc3 = nn.Linear(self.size2, self.size2)
        self.fc4 = nn.Linear(self.size2, 2)

        self.nlf3_fc = nn.PReLU()
        self.nlf4_fc = nn.PReLU()
예제 #8
0
    def __init__(self,
                 n_conv_filters=4,
                 n_hidden=36,
                 n_hidden_layer=1,
                 n_out=1,
                 drop_out=0.0,
                 ny=13,
                 nz=13):
        super(SAMFixedConvNet, self).__init__()

        self.n_conv_filters = n_conv_filters
        # INPUT: (1 x 6 x 6)
        # Conv Output: (1 x 6 x 6)
        # Pool Output: (1 x 3 x 3)
        self.layer1 = nn.Sequential(
            hexagdly.Conv2d(1,
                            n_conv_filters,
                            kernel_size=1,
                            stride=1,
                            bias=True), nn.ReLU(),
            hexagdly.MaxPool2d(kernel_size=1, stride=2))

        # Determine pooling output size (ugh, hackish)
        dummy = torch.rand((1, 1, nz, ny))
        p = hexagdly.MaxPool2d(kernel_size=1, stride=2)
        self.n_pool_out = np.prod(p(dummy).shape) * n_conv_filters

        # Fully-connected layer(s)
        self.o = nn.Linear(self.n_pool_out, n_out, bias=True)
예제 #9
0
    def __init__(self,
                 n_conv_filters=4,
                 n_hidden_layer=1,
                 n_node_hidden=36,
                 n_node_feature=0,
                 n_out=1,
                 drop_out=0.0,
                 ny=14,
                 nz=13):
        super(SAMConvNetSimple, self).__init__()

        self.n_conv_filters = n_conv_filters

        ## Conv and pooling layer
        # INPUT: (1 x ny x nz)
        # Conv Output: (1 x ny x nz)
        # Pool Output: (1 x ny/2 x nz/2) (check!)

        self.conv1 = nn.Sequential(
            hexagdly.Conv2d(1,
                            n_conv_filters,
                            kernel_size=1,
                            stride=1,
                            bias=True), nn.ReLU(),
            hexagdly.MaxPool2d(kernel_size=1, stride=2))

        # Determine pooling output size (ugh, hackish)
        dummy = torch.rand((1, 1, nz, ny))
        #p = hexagdly.MaxPool2d(kernel_size=1, stride=2)
        #embed()
        self.n_pool_out = np.prod(self.conv1(dummy).detach().shape)

        # Fully-connected hidden layer(s), optional feature layer, and output layer
        self.fc = SAMNet(self.n_pool_out, n_hidden_layer, n_node_hidden,
                         n_node_feature, n_out, drop_out)
예제 #10
0
    def __init__(self):
        super(BasicBlockD1, self).__init__()

        densenum = 32
        basenum = 64
        level = 1
        self.bl1 = BasicLayerD(inplanes=level * basenum + densenum * 0)
        self.bl2 = BasicLayerD(inplanes=level * basenum + densenum * 1)
        self.bl3 = BasicLayerD(inplanes=level * basenum + densenum * 2)
        self.bl4 = BasicLayerD(inplanes=level * basenum + densenum * 3)
        self.bl5 = BasicLayerD(inplanes=level * basenum + densenum * 4)
        self.bl6 = BasicLayerD(inplanes=level * basenum + densenum * 5)

        self.nlfT = nn.PReLU()
        self.nlfT2 = nn.PReLU()
        self.convT = hg.Conv2d(level * basenum + densenum * 6, 128, 1, 1)
        self.convT2 = hg.Conv2d(128, 128, 1, 2)
예제 #11
0
    def __init__(self,
                 n_conv_filters=4,
                 n_hidden_layer=1,
                 n_node_hidden=36,
                 n_node_feature=0,
                 kernel_size=1,
                 n_out=1,
                 drop_out=0.0,
                 nx=12,
                 ny=11):
        super(SAMConvNet, self).__init__()

        self.n_conv_filters = n_conv_filters

        ## Conv and pooling layer
        # INPUT: (1 x ny x nz)
        # Conv Output: (1 x ny x nz)
        # Pool Output: (1 x ny/2 x nz/2) (check!)

        self.conv1 = hexagdly.Conv2d(1,
                                     n_conv_filters,
                                     kernel_size=kernel_size,
                                     stride=1,
                                     bias=True)
        self.pool = hexagdly.MaxPool2d(kernel_size=1, stride=2)
        self.conv2 = hexagdly.Conv2d(n_conv_filters,
                                     n_conv_filters,
                                     kernel_size=1,
                                     stride=1,
                                     bias=True)

        # Determine pooling output size (ugh, hackish)
        dummy = torch.rand((1, 1, ny, nx))
        o = self.conv1(dummy).detach()
        o = self.pool(o)
        o = self.conv2(o)
        o = self.pool(o)
        self.n_pool_out = np.prod(o.shape)

        # Fully-connected hidden layer(s), optional feature layer, and output layer
        self.fc = SAMNet(self.n_pool_out, n_hidden_layer, n_node_hidden,
                         n_node_feature, n_out, drop_out)
예제 #12
0
    def _init_corrector(self):
        cnn_layers = []
        cnn_layers.append(
            hexagdly.Conv2d(in_channels=self.n_classes,
                            out_channels=32,
                            kernel_size=1,
                            stride=1,
                            bias=True))
        cnn_layers.append(
            hexagdly.Conv2d(in_channels=32,
                            out_channels=32,
                            kernel_size=1,
                            stride=1,
                            bias=True))
        if self.use_bn:
            cnn_layers.append(nn.BatchNorm2d(self.n_classes))
        cnn_layers.append(nn.ReLU())

        cnn_layers.append(
            hexagdly.Conv2d(in_channels=32,
                            out_channels=32,
                            kernel_size=1,
                            stride=1,
                            bias=True))
        cnn_layers.append(
            hexagdly.Conv2d(in_channels=32,
                            out_channels=32,
                            kernel_size=1,
                            stride=1,
                            bias=True))
        if self.use_bn:
            cnn_layers.append(nn.BatchNorm2d(self.n_classes))
        cnn_layers.append(nn.ReLU())

        cnn_layers.append(
            hexagdly.Conv2d(in_channels=32,
                            out_channels=self.n_classes,
                            kernel_size=1,
                            stride=1,
                            bias=True))
        return nn.Sequential(*cnn_layers)
예제 #13
0
    def __init__(self):
        super(BasicBlockD2, self).__init__()

        densenum = 32
        basenum = 128
        level = 1

        self.bl1 = BasicLayerD(inplanes=level * basenum + densenum * 0)
        self.bl2 = BasicLayerD(inplanes=level * basenum + densenum * 1)
        self.bl3 = BasicLayerD(inplanes=level * basenum + densenum * 2)
        self.bl4 = BasicLayerD(inplanes=level * basenum + densenum * 3)
        self.bl5 = BasicLayerD(inplanes=level * basenum + densenum * 4)
        self.bl6 = BasicLayerD(inplanes=level * basenum + densenum * 5)
        self.bl7 = BasicLayerD(inplanes=level * basenum + densenum * 6)
        self.bl8 = BasicLayerD(inplanes=level * basenum + densenum * 7)
        self.bl9 = BasicLayerD(inplanes=level * basenum + densenum * 8)
        self.bl10 = BasicLayerD(inplanes=level * basenum + densenum * 9)
        self.bl11 = BasicLayerD(inplanes=level * basenum + densenum * 10)
        self.bl12 = BasicLayerD(inplanes=level * basenum + densenum * 11)

        self.nlfT = nn.PReLU()
        self.nlfT2 = nn.PReLU()
        self.convT = hg.Conv2d(level * basenum + densenum * 12, 256, 1, 1)
        self.convT2 = hg.Conv2d(256, 256, 1, 2)
예제 #14
0
    def get_tensors(self, in_channels, kernel_size, stride, bias):
        channel_dist = 1000
        if bias is False:
            bias_value = 0
        else:
            bias_value = 1.0

        # input tensor
        array = self.get_array()
        array = np.expand_dims(
            np.stack([j * channel_dist + array for j in range(in_channels)]),
            0)
        tensor = torch.FloatTensor(array)

        # expected output tensor
        if kernel_size == 1:
            conv2d_array = self.get_array_conv2d_size1_stride1()
            n_neighbours = self.get_n_neighbors_size1()
        elif kernel_size == 2:
            conv2d_array = self.get_array_conv2d_size2_stride1()
            n_neighbours = self.get_n_neighbors_size2()
        convolved_array = np.sum(
            np.stack([(channel * channel_dist) * n_neighbours + conv2d_array
                      for channel in range(in_channels)]),
            0,
        )
        if stride == 2:
            convolved_array = self.get_array_stride_2(convolved_array)
        elif stride == 3:
            convolved_array = self.get_array_stride_3(convolved_array)
        convolved_array = np.expand_dims(np.expand_dims(convolved_array, 0), 0)
        convolved_tensor = torch.FloatTensor(convolved_array) + bias_value

        # output tensor of test method
        conv2d = hex.Conv2d(in_channels, 1, kernel_size, stride, bias, True)

        return conv2d(tensor), convolved_tensor
예제 #15
0
import hexagdly

from hexagdly_tools import plot_hextensor

feat_vec, energies, poly, pos_ext, patch_indices, methyl_pos, adj_mat = load_and_prep(
    'data/sam_pattern_data.dat.npz')

plt.ion()

feat = feat_vec[310]
#plot_from_feat(pos_ext, feat)

# On 6x6 grid and mirrored
myfeat = feat.reshape(6, 6).T[::-1, ::-1]
myfeat = torch.tensor(np.ascontiguousarray(myfeat)).reshape(1, 1, 6, 6)

#plot_hextensor(myfeat.reshape(1,1,6,6))

hexconv = hexagdly.Conv2d(1,
                          1,
                          kernel_size=1,
                          stride=1,
                          bias=False,
                          debug=True)
out = hexconv(myfeat).detach()

dataset = SAMConvDataset(feat_vec, poly)

net = SAMConvNet()

out = net(myfeat)