예제 #1
0
    def build_bottom_up(self, pretrained):
        backbone = self.params['backbone']

        if backbone == "resnet50":
            model = models.resnet50(pretrained=pretrained)
        elif backbone == "resnet101":
            model = models.resnet101(pretrained=pretrained)
        else:
            raise Exception("unimplemented backbone %s" % backbone)

        # p3 ~ p5 are extracted from backbone
        p3 = nn.Sequential(model.conv1, model.bn1, model.relu, model.maxpool,
                           model.layer1, model.layer2)

        p4 = model.layer3
        p5 = model.layer4

        # build remaining layers
        in_channels = self.calc_in_channel_width(p5)
        p6 = nn.Conv2d(in_channels, 256, 3, stride=2, padding=1)

        p7 = nn.Sequential(nn.ReLU(),
                           nn.Conv2d(256, 256, 3, stride=2, padding=1))

        # register bottom up layers
        self.bottom_up_layers = nn.ModuleList((p3, p4, p5, p6, p7))
예제 #2
0
 def __init__(self):
     super(CNN_EMNIST, self).__init__()
     self.conv1 = nn.Conv2d(1, 32, kernel_size=5)
     self.conv2 = nn.Conv2d(32, 128, kernel_size=5)
     self.conv2_drop = nn.Dropout2d()
     self.fc1 = nn.Linear(2048, 512)
     self.fc3 = nn.Linear(512, 47)
예제 #3
0
 def _build_weights(self, dim_in, dim_out, style_dim=64):
     self.conv1 = nn.Conv2d(dim_in, dim_out, 3, 1, 1)
     self.conv2 = nn.Conv2d(dim_out, dim_out, 3, 1, 1)
     self.norm1 = AdaIN(style_dim, dim_in)
     self.norm2 = AdaIN(style_dim, dim_out)
     if self.learned_sc:
         self.conv1x1 = nn.Conv2d(dim_in, dim_out, 1, 1, 0, bias=False)
예제 #4
0
 def _build_weights(self, dim_in, dim_out):
     self.conv1 = nn.Conv2d(dim_in, dim_in, 3, 1, 1)
     self.conv2 = nn.Conv2d(dim_in, dim_out, 3, 1, 1)
     if self.normalize:
         self.norm1 = nn.InstanceNorm2d(dim_in, affine=True)
         self.norm2 = nn.InstanceNorm2d(dim_in, affine=True)
     if self.learned_sc:
         self.conv1x1 = nn.Conv2d(dim_in, dim_out, 1, 1, 0, bias=False)
예제 #5
0
파일: test.py 프로젝트: jiefisher/Omega
 def __init__(self):
     self.conv1 = nn.Conv2d(filter_shapes=(1,6,5,5),padding=(2,2),stride=(1,1))
     self.pool1 = nn.MaxPool(ksize=(2,2),padding=(0,0),stride=(2,2))
     self.conv2 = nn.Conv2d(filter_shapes=(6,16,5,5),padding=(0,0),stride=(1,1))
     self.pool2 = nn.MaxPool(ksize=(2,2),padding=(0,0),stride=(2,2))
     self.fc1=nn.Linear((16*5*5,120))
     self.fc2=nn.Linear((120,84))
     self.fc3=nn.Linear((84,2))
예제 #6
0
    def __init__(self):
        # build model
        self.conv1 = nn.Conv2d(3, 8, k_size=5, stride=1, padding=0)
        self.relu1 = nn.Relu()
        self.pool1 = nn.MaxPooling(k_size=2, stride=2)

        self.conv2 = nn.Conv2d(8, 16, k_size=3, stride=1, padding=0)
        self.relu2 = nn.Relu()
        self.pool2 = nn.MaxPooling(k_size=2, stride=2)

        self.linear1 = nn.Linear(36, 128)
        self.relu3 = nn.Relu()
        self.linear2 = nn.Linear(128, 84)
        self.relu4 = nn.Relu()
        self.out = nn.Linear(84, 10)
예제 #7
0
def test_conv2d(k_size, s, p, d):
    input = np.random.rand(64, 3, 64, 64)
    out_size = 16

    x1 = torch.Tensor(input)
    x2 = Tensor(input)
    x1.requires_grad = True

    n1 = nn.Conv2d(3, out_size, k_size, s, p, d)
    n2 = Conv2d(3, out_size, k_size, s, p, d)
    n2.weight[:] = n1.weight.data.numpy()
    n2.bias[:] = n1.bias.data.numpy()

    y1 = n1(x1)
    y2 = n2(x2)

    assert y1.shape == y2.shape
    assert np.allclose(y1.detach().numpy(), y2)

    loss = y1.sum()
    loss.backward()

    y2.backward(np.ones(y2.shape))

    assert np.allclose(x1.grad.numpy(), x2.grad)
    assert np.allclose(n1.weight.grad.numpy(), n2.weight.grad)
    assert np.allclose(n1.bias.grad.numpy(), n2.bias.grad)
예제 #8
0
    def __init__(self, in_shape):
        super(UNet128, self).__init__()
        C, H, W = in_shape
        # assert(C==3)

        # 128
        self.down3 = StackEncoder(C, 128, kernel_size=3)  # 64
        self.down4 = StackEncoder(128, 256, kernel_size=3)  # 32
        self.down5 = StackEncoder(256, 512, kernel_size=3)  # 16
        self.down6 = StackEncoder(512, 1024, kernel_size=3)  # 8

        self.center = nn.Sequential(
            ConvBnRelu2d(1024, 1024, kernel_size=3, padding=1, stride=1), )

        # 8
        # x_big_channels, x_channels, y_channels
        self.up6 = StackDecoder(1024, 1024, 512, kernel_size=3)  # 16
        self.up5 = StackDecoder(512, 512, 256, kernel_size=3)  # 32
        self.up4 = StackDecoder(256, 256, 128, kernel_size=3)  # 64
        self.up3 = StackDecoder(128, 128, 64, kernel_size=3)  # 128
        self.classify = nn.Conv2d(64,
                                  1,
                                  kernel_size=1,
                                  padding=0,
                                  stride=1,
                                  bias=True)
예제 #9
0
    def __init__(self, in_channels, out_channels, size):
        super().__init__()

        conv = [
            nn.Conv2d(in_channels, out_channels, 1),
            nn.BatchNorm2d(out_channels)
        ]

        layer_conv = [
            nn.Conv2d(out_channels, out_channels, 3, 1, 1),
            nn.BatchNorm2d(out_channels)
        ]

        self.conv = nn.Sequential(*conv)
        self.upsample = nn.Upsample(size)
        self.layer_conv = nn.Sequential(*layer_conv)
예제 #10
0
파일: module.py 프로젝트: BORUTO-U/DLF-VINO
 def __init__(self):
     super(Module, self).__init__()
     self.conv0 = nn.Sequential([
         nn.Conv2d(2, 128, 10, 4),
         nn.DownSample2d(in_channels=128),
         nn.Conv2d(128, 128, 1),
         nn.Conv2d(128, 64, 1),
         nn.UpSample2d(in_channels=64, stride=4),
         nn.UpSample2d(in_channels=64),
         nn.ConvTranspose2d(64, 1, 10)
     ])
     self.register_module(self.conv0)
     self.conv1 = nn.Sequential([
         nn.Conv2d(2, 64, 5, padding='SAME'),
         Res(64, 3),
         Res(64, 3),
         nn.Conv2d(64, 128, 3, padding='SAME'),
         Res(128, 3),
         nn.Conv2d(128, 64, 3, padding='SAME'),
         Res(64, 3),
         Res(64, 3),
         nn.Conv2d(64, 1, 3, padding='SAME'),
         Res(1, 3)
     ])
     self.register_module(self.conv1)
 def __init__(self, height, width, with_r, with_boundary,
              in_channels, first_one=False, *args, **kwargs):
     super(CoordConvTh, self).__init__()
     self.addcoords = AddCoordsTh(height, width, with_r, with_boundary)
     in_channels += 2
     if with_r:
         in_channels += 1
     if with_boundary and not first_one:
         in_channels += 2
     self.conv = nn.Conv2d(in_channels=in_channels, *args, **kwargs)
예제 #12
0
        def __init__(self,
                     img_size=256,
                     style_dim=64,
                     max_conv_dim=512,
                     w_hpf=1):
            super().__init__()
            dim_in = 2**14 // img_size
            self.img_size = img_size
            self.from_rgb = nn.Conv2d(3, dim_in, 3, 1, 1)
            self.encode = nn.ModuleList()
            self.decode = nn.ModuleList()
            self.to_rgb = nn.Sequential(nn.InstanceNorm2d(dim_in, affine=True),
                                        nn.LeakyReLU(0.2),
                                        nn.Conv2d(dim_in, 3, 1, 1, 0))

            # down/up-sampling blocks
            repeat_num = int(np.log2(img_size)) - 4
            if w_hpf > 0:
                repeat_num += 1
            for _ in range(repeat_num):
                dim_out = min(dim_in * 2, max_conv_dim)
                self.encode.append(
                    ResBlk(dim_in, dim_out, normalize=True, downsample=True))
                self.decode.insert(0,
                                   AdainResBlk(dim_out,
                                               dim_in,
                                               style_dim,
                                               w_hpf=w_hpf,
                                               upsample=True))  # stack-like
                dim_in = dim_out

            # bottleneck blocks
            for _ in range(2):
                self.encode.append(ResBlk(dim_out, dim_out, normalize=True))
                self.decode.insert(
                    0, AdainResBlk(dim_out, dim_out, style_dim, w_hpf=w_hpf))

            if w_hpf > 0:
                device = torch.device(
                    'cuda' if torch.cuda.is_available() else 'cpu')
                self.hpf = HighPass(w_hpf, device)
  def __init__(self):
    super(DiscriminatorNet, self).__init__()
    
    self.net = nn.Sequential(OrderedDict([
                                            ('merge', nn.Conv2d(4, 3, kernel_size=1,stride = 1, padding = 0)),
                                            ('conv1', nn.Conv2d(3, 32, kernel_size=3,stride = 1, padding = 1)),
                                            ('relu1', nn.ReLU()),
                                            ('pool1',  nn.MaxPool2d(4,4)),

                                            ('conv2_1', nn.Conv2d(32, 64, kernel_size=3,stride = 1, padding = 1)),
                                            ('relu2_1', nn.ReLU()),
                                            ('conv2_2', nn.Conv2d(64, 64, kernel_size=3,stride = 1, padding = 1)),
                                            ('relu2_2', nn.ReLU()),
                                            ('pool2', nn.MaxPool2d(2,2)),

                                            ('conv3_1', nn.Conv2d(64, 64, kernel_size=3,stride = 1, padding = 1)),
                                            ('relu3_1', nn.ReLU()),
                                            ('weight_norm3_1', nn.utils.weight_norm()), # Look into this later
                                            ('conv3_2', nn.Conv2d(64, 64, kernel_size=3,stride = 1, padding = 1)),
                                            ('relu3_2', nn.ReLU())
                                            ('weight_norm3_2', nn.utils.weight_norm()), # Look into this later
                                            ('pool3', nn.MaxPool2d(2,2)),

                                            ('fc4', nn.Linear(12288, 100)),
                                            ('tanh4' F.Tanh()),
                                            ('fc5', nn.Linear(100,2)),
                                            ('tanh5' F.Tanh()),
                                            ('fc6', nn.Linear(2,1)),
                                            # ('sigmoid6' F.Sigmoid())
                                                                      ]))
예제 #14
0
파일: module.py 프로젝트: BORUTO-U/DLF-VINO
 def __init__(self, ch, size, name=None):
     super(Res, self).__init__()
     self.conv = nn.Conv2d(ch, ch, size, padding='SAME', name=name)
     if name is None:
         name = None
     else:
         name = name + '_scale'
     self.scale = tf.Variable(tf.random_normal(shape=[ch],
                                               mean=0.,
                                               stddev=0.5),
                              name=name)
     self.register_all_modules()
     self.parameters_ += [self.scale]
예제 #15
0
    def build_regressions(self):
        box_regressions = []

        num_box = len(self.params['box_sizes']) * len(self.params['ratios'])
        num_class = self.num_class

        out_channels = num_box * num_class
        classifiers = nn.Sequential(nn.Conv2dReLU(256, 256, 3, 1, 1),
                                    nn.Conv2dReLU(256, 256, 3, 1, 1),
                                    nn.Conv2dReLU(256, 256, 3, 1, 1),
                                    nn.Conv2dReLU(256, 256, 3, 1, 1),
                                    nn.Conv2d(256, out_channels, 3, 1, 1),
                                    nn.Sigmoid())

        out_channels = num_box * 4
        box_regressions = nn.Sequential(nn.Conv2dReLU(256, 256, 3, 1, 1),
                                        nn.Conv2dReLU(256, 256, 3, 1, 1),
                                        nn.Conv2dReLU(256, 256, 3, 1, 1),
                                        nn.Conv2dReLU(256, 256, 3, 1, 1),
                                        nn.Conv2d(256, out_channels, 3, 1, 1))

        self.classifiers = classifiers
        self.box_regressions = box_regressions
        def __init__(self, num_modules=1, end_relu=False, num_landmarks=98, fname_pretrained=None):
            super(FAN, self).__init__()
            self.num_modules = num_modules
            self.end_relu = end_relu

            # Base part
            self.conv1 = CoordConvTh(256, 256, True, False,
                                     in_channels=3, out_channels=64,
                                     kernel_size=7, stride=2, padding=3)
            self.bn1 = nn.BatchNorm2d(64)
            self.conv2 = ConvBlock(64, 128)
            self.conv3 = ConvBlock(128, 128)
            self.conv4 = ConvBlock(128, 256)

            # Stacking part
            self.add_module('m0', HourGlass(1, 4, 256, first_one=True))
            self.add_module('top_m_0', ConvBlock(256, 256))
            self.add_module('conv_last0', nn.Conv2d(256, 256, 1, 1, 0))
            self.add_module('bn_end0', nn.BatchNorm2d(256))
            self.add_module('l0', nn.Conv2d(256, num_landmarks + 1, 1, 1, 0))

            if fname_pretrained is not None:
                self.load_pretrained_weights(fname_pretrained)
예제 #17
0
        def __init__(self,
                     img_size=256,
                     style_dim=64,
                     num_domains=2,
                     max_conv_dim=512):
            super().__init__()
            dim_in = 2**14 // img_size
            blocks = []
            blocks += [nn.Conv2d(3, dim_in, 3, 1, 1)]

            repeat_num = int(np.log2(img_size)) - 2
            for _ in range(repeat_num):
                dim_out = min(dim_in * 2, max_conv_dim)
                blocks += [ResBlk(dim_in, dim_out, downsample=True)]
                dim_in = dim_out

            blocks += [nn.LeakyReLU(0.2)]
            blocks += [nn.Conv2d(dim_out, dim_out, 4, 1, 0)]
            blocks += [nn.LeakyReLU(0.2)]
            self.shared = nn.Sequential(*blocks)

            self.unshared = nn.ModuleList()
            for _ in range(num_domains):
                self.unshared.append(nn.Linear(dim_out, style_dim))
예제 #18
0
    def build_regressions(self):
        classifiers = []
        box_regressions = []

        extras = [self.b0]
        extras.extend(self.extras)

        # from extras
        for i, extra in enumerate(extras):
            in_channels = self.calc_in_channel_width(extra)
            n = self.default_box.get_num_ratios(i)

            classifier = nn.Conv2d(in_channels, n * self.num_class, 3, 1, 1)
            classifiers.append(classifier)

            box_regression = nn.Conv2d(in_channels, n * 4, 3, 1, 1)
            box_regressions.append(box_regression)

        in_channels = self.calc_in_channel_width(self.b0)
        l2_norm = nn.Norm2d(in_channels)

        self.l2_norm = l2_norm
        self.classifiers = nn.ModuleList(classifiers)
        self.box_regressions = nn.ModuleList(box_regressions)
예제 #19
0
    def build_regressions(self):
        box_regressions = []

        params = {'stride': 1, 'padding': 1, 'use_batchnorm': True}

        num_box = len(self.params['box_sizes']) * len(self.params['ratios'])
        num_class = self.num_class

        out_channels = num_box * num_class
        classifiers = nn.Sequential(nn.Conv2dReLU(256, 256, 3, **params),
                                    nn.Conv2dReLU(256, 256, 3, **params),
                                    nn.Conv2dReLU(256, 256, 3, **params),
                                    nn.Conv2dReLU(256, 256, 3, **params),
                                    nn.Conv2d(256, out_channels, 3, 1, 1))

        out_channels = num_box * 4
        box_regressions = nn.Sequential(nn.Conv2dReLU(256, 256, 3, **params),
                                        nn.Conv2dReLU(256, 256, 3, **params),
                                        nn.Conv2dReLU(256, 256, 3, **params),
                                        nn.Conv2dReLU(256, 256, 3, **params),
                                        nn.Conv2d(256, out_channels, 3, 1, 1))

        self.classifiers = classifiers
        self.box_regressions = box_regressions
        def __init__(self, in_planes, out_planes):
            super(ConvBlock, self).__init__()
            self.bn1 = nn.BatchNorm2d(in_planes)
            conv3x3 = partial(nn.Conv2d, kernel_size=3, stride=1, padding=1, bias=False, dilation=1)
            self.conv1 = conv3x3(in_planes, int(out_planes / 2))
            self.bn2 = nn.BatchNorm2d(int(out_planes / 2))
            self.conv2 = conv3x3(int(out_planes / 2), int(out_planes / 4))
            self.bn3 = nn.BatchNorm2d(int(out_planes / 4))
            self.conv3 = conv3x3(int(out_planes / 4), int(out_planes / 4))

            self.downsample = None
            if in_planes != out_planes:
                self.downsample = nn.Sequential(nn.BatchNorm2d(in_planes),
                                                nn.ReLU(True),
                                                nn.Conv2d(in_planes, out_planes, 1, 1, bias=False))
예제 #21
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size=3,
              padding=1,
              dilation=1,
              stride=1,
              groups=1,
              is_bn=True,
              is_relu=True):
     super(ConvBnRelu2d, self).__init__()
     self.conv = nn.Conv2d(in_channels,
                           out_channels,
                           kernel_size=kernel_size,
                           padding=padding,
                           stride=stride,
                           dilation=dilation,
                           groups=groups,
                           bias=False)
     self.bn = nn.BatchNorm2d(out_channels, eps=BN_EPS)
     self.relu = nn.ReLU(inplace=True)
     if is_bn is False: self.bn = None
     if is_relu is False: self.relu = None
예제 #22
0
import types
import cv2
import time
import matplotlib.pyplot as plt


if __name__ == "__main__":
    print('shit change git test')
    img = cv2.imread('data/cats.jpg')
    img = img[:900, 250:1150, :]
    print(img.shape)
    img = img / 255.0
    plt.imshow(img)
    plt.show()
    x = np.random.rand(32, 32, 3)
    conv1 = nn.Conv2d(3, 64, k_size=3, stride=1, padding=0)
    relu = nn.Relu()
    pool = nn.MaxPooling(k_size=2, stride=2)
    t1 = time.time()
    out = conv1(np.array([img]))
    t2 = time.time()
    out = relu(out)
    t3 = time.time()
    out = pool(out)
    t4 = time.time()
    print(out.shape)
    print('cost time: {}'.format(t2-t1))
    print('relu cost: ', t3-t2)
    print('maxpooling cost: ', t4-t3)
    for i in range(64):
        im = out[0, :, :, i]  # .reshape(out.shape[1], out.shape[2], 1)
예제 #23
0
import sys
sys.path.insert(0, '../../python/planner')

import planner as pln
import hardware as hw
import nn

import torch

import time

simd_cfg_path = '../../hwcfg/simd.json'
hw_spec = hw.HardwareSpec(simd_cfg_path)

data_1 = torch.randn(1, 3, 224, 224)
conv_1 = nn.Conv2d(3, 32, 3, padding=1)

data_2 = torch.randn(1, 1, 112, 112)
conv_2 = nn.Conv2d(1, 1, 3, padding=1)

data_3 = torch.randn(1, 32, 112, 112)
conv_3 = nn.Conv2d(32, 64, 1, padding=0)

data_4 = torch.randn(1, 1, 112, 112)
conv_4 = nn.Conv2d(1, 1, 3, padding=1)

data_5 = torch.randn(1, 64, 56, 56)
conv_5 = nn.Conv2d(64, 128, 1, padding=0)

data_6 = torch.randn(1, 1, 56, 56)
conv_6 = nn.Conv2d(1, 1, 3, padding=1)
예제 #24
0
import cv2
import time
import matplotlib.pyplot as plt

if __name__ == "__main__":
    print('shit change git test')
    img = cv2.imread('data/cats.jpg')
    img = img[:900, 250:1150, :]
    print(img.shape)
    img = img / 255.0
    # plt.imshow(img)
    # plt.show()
    # x = np.random.rand(32, 32, 3)
    out_img = None
    for i in range(3):
        conv = nn.Conv2d(1, 1, k_size=3, stride=1, padding=1)
        in_img = img[:, :, i]
        out = conv(np.array([in_img[:, :, np.newaxis]]))
        out_img = np.concatenate((out_img, out[0]), axis=-1) \
            if out_img is not None else out[0]

    cv2.imshow('conv', out_img)
    cv2.waitKey(0)
    # relu = nn.Relu()
    # pool = nn.MaxPooling(k_size=2, stride=2)
    # t1 = time.time()
    # out = conv1(np.array([img]))
    # t2 = time.time()
    # out = relu(out)
    # t3 = time.time()
    # out = pool(out)
예제 #25
0
sys.path.insert(0, '../../python/planner')

import planner as pln
import hardware as hw
import nn

import torch

import time

simd_cfg_path = '../../hwcfg/simd.json'
hw_spec = hw.HardwareSpec(simd_cfg_path)

data_1 = torch.randn(1, 3, 224, 224)
conv_1 = nn.Conv2d(3, 96, 7, padding=2)

data_2_1 = torch.randn(1, 96, 55, 55)
conv_2_1 = nn.Conv2d(96, 16, 1, padding=0)

data_2_2 = torch.randn(1, 16, 55, 55)
conv_2_2 = nn.Conv2d(16, 64, 1, padding=0)
conv_2_3 = nn.Conv2d(16, 64, 3, padding=1)

data_3_1 = torch.randn(1, 128, 55, 55)
conv_3_1 = nn.Conv2d(128, 16, 1, padding=0)

data_3_2 = torch.randn(1, 32, 55, 55)
conv_3_2 = nn.Conv2d(32, 128, 1, padding=0)
conv_3_3 = nn.Conv2d(32, 128, 3, padding=1)
예제 #26
0
        out1 = m.compute((batch, hidden_channel, H, W), lambda b, c, i, j: m.
                         sum(inputs[b, c // factor, i + ri, j + rj] * self.
                             weight1[c // factor, c % factor, ri, rj],
                             axis=[ri, rj]))
        rc = m.reduce_axis((0, hidden_channel))
        out2 = m.compute((batch, out_channel, H, W), lambda b, c, i, j: m.sum(
            out1[b, rc, i, j] * self.weight2[c, rc], axis=[rc]))
        return out2

    def forward(self, inputs):
        return self.__compute__(inputs)


inputs = m.placeholder(shape=[1, 3, 224, 224], dtype="float32")

# this should use the library such as cuDNN
conv = nn.Conv2d(inputs, out_channel=32, kernel=[3, 3], stride=2, padding=1)

# this is a user-defined function block
block = CustomBlock(conv.shape, 32, 64)

result = block.forward(conv)

# create the computation graph with optimization
compute_graph = m.create_graph(result)

# link to library and generate code for user-defined functions
runnable = m.deploy(compute_graph, target="cuda")

# run the graph
runnable.run()
예제 #27
0
import sys
sys.path.insert(0, '../../python/planner')

import planner as pln
import hardware as hw
import nn

import torch

import time

hw_spec = hw.HardwareSpec(0.2, 0.8, 0.0008, 0.005, 0.64)

data_1_1 = torch.randn(1, 3, 224, 224)
conv_1_1 = nn.Conv2d(3, 64, 3, padding=1)

data_1_2 = torch.randn(1, 64, 224, 224)
conv_1_2 = nn.Conv2d(64, 64, 3, padding=1)

data_2_1 = torch.randn(1, 64, 112, 112)
conv_2_1 = nn.Conv2d(64, 128, 3, padding=1)

data_2_2 = torch.randn(1, 128, 112, 112)
conv_2_2 = nn.Conv2d(128, 128, 3, padding=1)

data_3_1 = torch.randn(1, 128, 56, 56)
conv_3_1 = nn.Conv2d(128, 256, 3, padding=1)

data_3_2 = torch.randn(1, 256, 56, 56)
conv_3_2 = nn.Conv2d(256, 256, 3, padding=1)
예제 #28
0
    def __init__(self, in_channels, out_channels, size):
        super().__init__()

        self.conv = nn.Conv2d(in_channels, out_channels, 1)
        self.upsample = nn.Upsample(size)
예제 #29
0
import sys
sys.path.insert(0, '../../python/planner')

import planner as pln
import hardware as hw
import nn

import torch

import time

simd_cfg_path = '../../hwcfg/simd.json'
hw_spec = hw.HardwareSpec(simd_cfg_path)

data_1 = torch.randn(1, 3, 224, 224)
conv_1 = nn.Conv2d(3, 64, 7, stride=2, padding=3)
#####
data_2 = torch.randn(1, 64, 56, 56)
conv_2 = nn.Conv2d(64, 64, 1)
#####
data_3 = torch.randn(1, 64, 56, 56)
conv_3 = nn.Conv2d(64, 192, 3, padding=1)
#####
data_incpt_1 = torch.randn(1, 192, 28, 28)

incpt_1_conv_1 = nn.Conv2d(192, 64, 1)

incpt_1_conv_2_1 = nn.Conv2d(192, 96, 1)
data_incpt_1_2_2 = torch.randn(1, 96, 28, 28)
incpt_1_conv_2_2 = nn.Conv2d(96, 128, 3, padding=1)
    print "fc5: {}".format(net['fc5'].output_shape[1:])

    net['prob'] = DenseLayer(net['fc5'], num_units=1, nonlinearity=sigmoid)
    print "prob: {}".format(net['prob'].output_shape[1:])

    return net

## Our Pytorch Implementation

import torch
import torch.nn as nn
import torch.nn.functional as F

def build(nn.Module):
  net = nn.Sequential(OrderedDict([
                                    ('merge', nn.Conv2d(4, 3, kernel_size=1,stride = 1, padding = 0)),
                                    ('conv1', nn.Conv2d(3, 32, kernel_size=3,stride = 1, padding = 1)),
                                    ('relu1', nn.ReLU()),
                                    ('pool1',  nn.MaxPool2d(4,4)),

                                    ('conv2_1', nn.Conv2d(32, 64, kernel_size=3,stride = 1, padding = 1)),
                                    ('relu2_1', nn.ReLU()),
                                    ('conv2_2', nn.Conv2d(64, 64, kernel_size=3,stride = 1, padding = 1)),
                                    ('relu2_2', nn.ReLU()),
                                    ('pool2', nn.MaxPool2d(2,2)),

                                    ('conv3_1', nn.Conv2d(64, 64, kernel_size=3,stride = 1, padding = 1)),
                                    ('relu3_1', nn.ReLU()),
                                    ('weight_norm3_1', nn.utils.weight_norm()), # Look into this later
                                    ('conv3_2', nn.Conv2d(64, 64, kernel_size=3,stride = 1, padding = 1)),
                                    ('relu3_2', nn.ReLU())