Example #1
0
    def __init__(self, n_classes=10, useRRSVM=True):
        super(GoogLeNet, self).__init__()
        self.pre_layers = nn.Sequential(
            nn.Conv2d(3, 192, kernel_size=3, padding=1),
            nn.BatchNorm2d(192),
            nn.ReLU(True),
        )
        self.useRRSVM = useRRSVM
        self.a3 = Inception(192, 64, 96, 128, 16, 32, 32)
        self.b3 = Inception(256, 128, 128, 192, 32, 96, 64)

        self.a4 = Inception(480, 192, 96, 208, 16, 48, 64)
        self.b4 = Inception(512, 160, 112, 224, 24, 64, 64)
        self.c4 = Inception(512, 128, 128, 256, 24, 64, 64)
        self.d4 = Inception(512, 112, 144, 288, 32, 64, 64)
        self.e4 = Inception(528, 256, 160, 320, 32, 128, 128)

        self.a5 = Inception(832, 256, 160, 320, 32, 128, 128)
        self.b5 = Inception(832, 384, 192, 384, 48, 128, 128)

        if not self.useRRSVM:
            self.pool1 = nn.MaxPool2d(3, stride=2, padding=1)
            self.pool2 = nn.MaxPool2d(3, stride=2, padding=1)
            self.pool3 = nn.AvgPool2d(8, stride=1)
        else:
            self.pool1_r = RRSVM.RRSVM_Module(480, kernel_size=2, stride=2)
            self.pool2_r = RRSVM.RRSVM_Module(832, kernel_size=2, stride=2)
            self.pool3_r = RRSVM.RRSVM_Module(1024, kernel_size=8, stride=1)
            self.pool1 = nn.MaxPool2d(3, stride=2, padding=1)
            self.pool2 = nn.MaxPool2d(3, stride=2, padding=1)
            self.pool3 = nn.AvgPool2d(8, stride=1)

        self.linear = nn.Linear(1024, n_classes)
Example #2
0
    def __init__(self,
                 num_classes=1000,
                 aux_logits=True,
                 transform_input=False,
                 useRRSVM=False):
        super(Inception3, self).__init__()
        self.aux_logits = aux_logits
        self.transform_input = transform_input
        self.Conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, stride=2)
        self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3)
        self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1)
        if useRRSVM:
            self.pool2d_2b = RRSVM.RRSVM_Module(in_channels=64,
                                                kernel_size=3,
                                                stride=2)
        else:
            self.pool2d_2b = nn.MaxPool2d(kernel_size=3, stride=2)

        self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1)
        self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3)
        # 3 inception Fig 5
        if useRRSVM:
            self.pool2d_4a = RRSVM.RRSVM_Module(in_channels=192,
                                                kernel_size=3,
                                                stride=2)
        else:
            self.pool2d_4a = nn.MaxPool2d(kernel_size=3, stride=2)

        self.Mixed_5b = InceptionA(192, pool_features=32, useRRSVM=useRRSVM)
        self.Mixed_5c = InceptionA(256, pool_features=64, useRRSVM=useRRSVM)
        self.Mixed_5d = InceptionA(288, pool_features=64, useRRSVM=useRRSVM)
        self.Mixed_6a = InceptionB(288, useRRSVM=useRRSVM)
        self.Mixed_6b = InceptionC(768, channels_7x7=128, useRRSVM=useRRSVM)
        self.Mixed_6c = InceptionC(768, channels_7x7=160, useRRSVM=useRRSVM)
        self.Mixed_6d = InceptionC(768, channels_7x7=160, useRRSVM=useRRSVM)
        self.Mixed_6e = InceptionC(768, channels_7x7=192, useRRSVM=useRRSVM)
        if aux_logits:
            self.AuxLogits = InceptionAux(768, num_classes, useRRSVM=useRRSVM)
        self.Mixed_7a = InceptionD(768, useRRSVM=useRRSVM)
        self.Mixed_7b = InceptionE(1280, useRRSVM=useRRSVM)
        self.Mixed_7c = InceptionE(2048, useRRSVM=useRRSVM)

        if useRRSVM:
            self.pool2d_8 = RRSVM.RRSVM_Module(in_channels=2048, kernel_size=8)
        else:
            self.pool2d_8 = nn.AvgPool2d(kernel_size=8)

        self.fc = nn.Linear(2048, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
                import scipy.stats as stats
                stddev = m.stddev if hasattr(m, 'stddev') else 0.1
                X = stats.truncnorm(-2, 2, scale=stddev)
                values = torch.Tensor(X.rvs(m.weight.data.numel()))
                values = values.view(m.weight.data.size())
                m.weight.data.copy_(values)
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
Example #3
0
 def __init__(self):
     super(LeNet_RRSVM, self).__init__()
     self.conv1 = nn.Conv2d(3, 6, 5)
     self.pool1 = RRSVM.RRSVM_Module(6, 2, 2)
     self.conv2 = nn.Conv2d(6, 16, 5)
     self.pool2 = RRSVM.RRSVM_Module(16, 2, 2)
     self.fc1 = nn.Linear(16 * 5 * 5, 120)
     self.fc2 = nn.Linear(120, 84)
     self.fc3 = nn.Linear(84, 10)
    def __init__(self, in_channels, pool_features, useRRSVM=True):
        super(InceptionA, self).__init__()
        self.useRRSVM = useRRSVM
        self.branch1x1 = BasicConv2d(in_channels, 64, kernel_size=1)

        self.branch5x5_1 = BasicConv2d(in_channels, 48, kernel_size=1)
        self.branch5x5_2 = BasicConv2d(48, 64, kernel_size=5, padding=2)

        self.branch3x3dbl_1 = BasicConv2d(in_channels, 64, kernel_size=1)
        self.branch3x3dbl_2 = BasicConv2d(64, 96, kernel_size=3, padding=1)
        self.branch3x3dbl_3 = BasicConv2d(96, 96, kernel_size=3, padding=1)
        if useRRSVM:
            self.branch_pool_0r = RRSVM.RRSVM_Module(in_channels=in_channels,
                                                     kernel_size=3,
                                                     stride=1,
                                                     padding=1)
            self.branch_pool_0 = nn.AvgPool2d(kernel_size=3,
                                              stride=1,
                                              padding=1)

        else:
            self.branch_pool_0 = nn.AvgPool2d(kernel_size=3,
                                              stride=1,
                                              padding=1)

        self.branch_pool = BasicConv2d(in_channels,
                                       pool_features,
                                       kernel_size=1)
Example #5
0
    def _make_layers(self, cfg):
        layers = []
        in_channels = 3
        for x in cfg:
            if x == 'M':
                layers += [
                    nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True)
                ]
            elif x == 'A':
                layers += [nn.AvgPool2d(kernel_size=2, stride=2)]
            elif x == 'O':
                layers += [
                    RRSVM.RRSVM_Module(in_channels,
                                       kernel_size=2,
                                       stride=2,
                                       return_indices=True,
                                       p_constraint=self.p_constraint)
                ]
            else:
                layers += [
                    nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
                    nn.BatchNorm2d(x),
                    nn.ReLU(inplace=True)
                ]
                in_channels = x

        layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
        return nn.Sequential(*layers)
Example #6
0
    def __init__(self, block, layers, num_classes=1000, useRRSVM=False):
        self.inplanes = 64
        super(ResNet, self).__init__()
        self.conv1 = nn.Conv2d(3,
                               64,
                               kernel_size=7,
                               stride=2,
                               padding=3,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        if useRRSVM:
            self.avgpool = RRSVM.RRSVM_Module(in_channels=2048,
                                              init='eps_max',
                                              kernel_size=15)
        else:
            self.avgpool = nn.AvgPool2d(15)
        self.fc = nn.Linear(512 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
Example #7
0
    def __init__(self, in_channels, useRRSVM=False):
        super(InceptionE, self).__init__()
        self.branch1x1 = BasicConv2d(in_channels, 320, kernel_size=1)

        self.branch3x3_1 = BasicConv2d(in_channels, 384, kernel_size=1)
        self.branch3x3_2a = BasicConv2d(384,
                                        384,
                                        kernel_size=(1, 3),
                                        padding=(0, 1))
        self.branch3x3_2b = BasicConv2d(384,
                                        384,
                                        kernel_size=(3, 1),
                                        padding=(1, 0))

        self.branch3x3dbl_1 = BasicConv2d(in_channels, 448, kernel_size=1)
        self.branch3x3dbl_2 = BasicConv2d(448, 384, kernel_size=3, padding=1)
        self.branch3x3dbl_3a = BasicConv2d(384,
                                           384,
                                           kernel_size=(1, 3),
                                           padding=(0, 1))
        self.branch3x3dbl_3b = BasicConv2d(384,
                                           384,
                                           kernel_size=(3, 1),
                                           padding=(1, 0))
        if useRRSVM:
            self.branch_pool_0 = RRSVM.RRSVM_Module(in_channels,
                                                    kernel_size=3,
                                                    stride=1,
                                                    padding=1)
        else:
            self.branch_pool_0 = nn.AvgPool2d(kernel_size=3,
                                              stride=1,
                                              padding=1)

        self.branch_pool = BasicConv2d(in_channels, 192, kernel_size=1)
Example #8
0
def test_forward(input, kernel_size=3, padding=1, stride=2, dilation=1):
    F = RRSVM.RRSVM_F(kernel_size,
                      padding,
                      stride,
                      dilation=dilation,
                      return_indices=True)
    analytical, analytical_indices = F(*input)
    analytical = analytical.data.numpy()
    analytical_indices = analytical_indices.data.numpy()
    numerical, numerical_indices = get_numerical_output(
        *input,
        kernel_size=kernel_size,
        padding=padding,
        stride=stride,
        dilation=1)

    atol = 1e-5
    rtol = 1e-3
    if not (np.absolute(numerical - analytical) <=
            (atol + rtol * np.absolute(numerical))).all():
        print "Failed. Output Failed Foward Test"
    else:
        print "Passed. Ouput Pass Foward Test"

    # Minh: Seems like a bug. This code does not test the indices
    # if not (np.absolute(numerical - analytical) <= (atol + rtol * np.absolute(numerical))).all():
    if not (numerical_indices == analytical_indices).all():
        print "Failed. Indices Failed Foward Test"
    else:
        print "Passed. Indices Pass Foward Test"
Example #9
0
def train(train_loader, model, criterion, optimizer, epoch, p_constraint, use_cuda):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    if p_constraint:
        positive_clipper = RRSVM.RRSVM_PositiveClipper()

    # switch to train mode
    model.train()

    end = time.time()
    for i, (input, target) in enumerate(train_loader):
        # measure data loading time
        data_time.update(time.time() - end)
        if use_cuda:
            target = target.cuda()
            input = input.cuda()
        input_var = torch.autograd.Variable(input)
        target_var = torch.autograd.Variable(target)

        # compute output
        output, output_aux = model(input_var)
        loss = criterion(output, target_var)
        loss_aux = criterion(output, target_var)
        # TODO: here check how to merge aux loss
        t_loss = loss + loss_aux

        # measure accuracy and record loss
        prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
        losses.update(loss.data[0]+loss_aux.data[0], input.size(0))
        top1.update(prec1[0], input.size(0))
        top5.update(prec5[0], input.size(0))

        # compute gradient and do SGD step
        optimizer.zero_grad()
        t_loss.backward()
        optimizer.step()

        if p_constraint and positive_clipper.frequency % (i+1) == 0:
            model.apply(positive_clipper)
        # measure elapsed time

        batch_time.update(time.time() - end)
        end = time.time()

        if i % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                  'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                   epoch, i, len(train_loader), batch_time=batch_time,
                   data_time=data_time, loss=losses, top1=top1, top5=top5))
Example #10
0
    def __init__(self,
                 in_planes,
                 n1x1,
                 n3x3red,
                 n3x3,
                 n5x5red,
                 n5x5,
                 pool_planes,
                 useRRSVM=False):
        super(Inception, self).__init__()
        self.useRRSVM = useRRSVM
        # 1x1 conv branch
        self.b1 = nn.Sequential(
            nn.Conv2d(in_planes, n1x1, kernel_size=1),
            nn.BatchNorm2d(n1x1),
            nn.ReLU(True),
        )

        # 1x1 conv -> 3x3 conv branch
        self.b2 = nn.Sequential(
            nn.Conv2d(in_planes, n3x3red, kernel_size=1),
            nn.BatchNorm2d(n3x3red),
            nn.ReLU(True),
            nn.Conv2d(n3x3red, n3x3, kernel_size=3, padding=1),
            nn.BatchNorm2d(n3x3),
            nn.ReLU(True),
        )

        # 1x1 conv -> 5x5 conv branch
        self.b3 = nn.Sequential(
            nn.Conv2d(in_planes, n5x5red, kernel_size=1),
            nn.BatchNorm2d(n5x5red),
            nn.ReLU(True),
            nn.Conv2d(n5x5red, n5x5, kernel_size=3, padding=1),
            nn.BatchNorm2d(n5x5),
            nn.ReLU(True),
            nn.Conv2d(n5x5, n5x5, kernel_size=3, padding=1),
            nn.BatchNorm2d(n5x5),
            nn.ReLU(True),
        )

        # 3x3 pool -> 1x1 conv branch
        self.pool = nn.MaxPool2d(3, stride=1, padding=1)
        if self.useRRSVM:
            self.pool_r = RRSVM.RRSVM_Module(in_planes,
                                             kernel_size=3,
                                             stride=1,
                                             padding=1)

        self.b4 = nn.Sequential(
            nn.Conv2d(in_planes, pool_planes, kernel_size=1),
            nn.BatchNorm2d(pool_planes),
            nn.ReLU(True),
        )
Example #11
0
def test_forward(input, kernel_size=3, padding=1, stride=2, dilation=1):
    # input = (Variable(torch.FloatTensor(torch.randn(1, 1, 5, 5)), requires_grad=True),
    #          Variable(torch.FloatTensor(torch.randn(1, 9)), requires_grad=True),)

    F = RRSVM.RRSVM_F(kernel_size,
                      padding,
                      stride,
                      dilation=1,
                      return_indices=True)

    # if torch.cuda.is_available():
    #     input = [i.cuda() for i in input]
    # else:
    #     print("Cuda device not detected on this device")
    #     sys.exit(-1)

    analytical, analytical_indices = F(*input)
    analytical = analytical.cpu().data.numpy()
    analytical_indices = analytical_indices.cpu().data.numpy()

    atol = 1e-5
    rtol = 1e-3

    if torch.cuda.is_available():
        input = [i.cpu() for i in input]

    numerical, numerical_indices = get_numerical_output(
        *input,
        kernel_size=kernel_size,
        padding=padding,
        stride=stride,
        dilation=1)

    flag = True

    if not (np.absolute(numerical - analytical) <=
            (atol + rtol * np.absolute(numerical))).all():
        print "Update Output Error"
        flag = False
    #
    # relative_loss = (numerical - analytical) / (numerical + 1e-6)
    # print "Max Diff: {:.04f}".format((np.abs(relative_loss).max()))

    input_np = input[0].data.cpu().numpy()
    if check_forward_indices(input_np, numerical_indices, analytical_indices,
                             kernel_size, padding, stride, dilation):
        print "Passed, Indices Pass Forward Test"
        flag = True
    else:
        print "Failed, Indices Fail Foward Test"
        flag = False
    return flag
Example #12
0
 def __init__(self, in_channels, num_classes, useRRSVM=False):
     super(InceptionAux, self).__init__()
     self.conv0 = BasicConv2d(in_channels, 128, kernel_size=1)
     self.conv1 = BasicConv2d(128, 768, kernel_size=5)
     self.conv1.stddev = 0.01
     self.fc = nn.Linear(768, num_classes)
     self.fc.stddev = 0.001
     if useRRSVM:
         self.pool = RRSVM.RRSVM_Module(in_channels,
                                        kernel_size=5,
                                        stride=3)
     else:
         self.pool = nn.AvgPool2d(kernel_size=5, stride=3)
Example #13
0
    def __init__(self, in_planes, out_planes, dropRate=0.0, useRRSVM=True):
        super(TransitionBlock, self).__init__()
        self.useRRSVM = useRRSVM
        self.bn1 = nn.BatchNorm2d(in_planes)
        self.relu = nn.ReLU(inplace=True)
        self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1,
                               padding=0, bias=False)
        self.droprate = dropRate
        if useRRSVM:
            self.pool_r = RRSVM.RRSVM_Module(in_channels=out_planes, kernel_size=2)
            self.pool = nn.AvgPool2d(kernel_size=2)

        else:
            self.pool = nn.AvgPool2d(kernel_size=2)
Example #14
0
    def __init__(self, in_channels, useRRSVM=False):
        super(InceptionB, self).__init__()
        self.branch3x3 = BasicConv2d(in_channels, 384, kernel_size=3, stride=2)

        self.branch3x3dbl_1 = BasicConv2d(in_channels, 64, kernel_size=1)
        self.branch3x3dbl_2 = BasicConv2d(64, 96, kernel_size=3, padding=1)
        self.branch3x3dbl_3 = BasicConv2d(96, 96, kernel_size=3, stride=2)

        if useRRSVM:
            self.branch_pool = RRSVM.RRSVM_Module(in_channels=in_channels,
                                                  kernel_size=3,
                                                  stride=2)
        else:
            self.branch_pool = nn.MaxPool2d(kernel_size=3, stride=2)
Example #15
0
    def __init__(self, depth, n_classes, useRRSVM=True, growth_rate=12,
                 reduction=0.5, bottleneck=True, dropRate=0.0):
        super(DenseNet3, self).__init__()
        self.useRRSVM = useRRSVM
        in_planes = 2 * growth_rate
        n = (depth - 4) / 3
        if bottleneck == True:
            n = n/2
            block = BottleneckBlock
        else:
            block = BasicBlock
        # 1st conv before any dense block
        self.conv1 = nn.Conv2d(3, in_planes, kernel_size=3, stride=1,
                               padding=1, bias=False)
        # 1st block
        self.block1 = DenseBlock(n, in_planes, growth_rate, block, dropRate)
        in_planes = int(in_planes+n*growth_rate)
        self.trans1 = TransitionBlock(in_planes, int(math.floor(in_planes*reduction)), dropRate=dropRate, useRRSVM=useRRSVM)
        in_planes = int(math.floor(in_planes*reduction))
        # 2nd block
        self.block2 = DenseBlock(n, in_planes, growth_rate, block, dropRate)
        in_planes = int(in_planes+n*growth_rate)
        self.trans2 = TransitionBlock(in_planes, int(math.floor(in_planes*reduction)), dropRate=dropRate, useRRSVM=useRRSVM)
        in_planes = int(math.floor(in_planes*reduction))
        # 3rd block
        self.block3 = DenseBlock(n, in_planes, growth_rate, block, dropRate)
        in_planes = int(in_planes+n*growth_rate)
        # global average pooling and classifier
        self.bn1 = nn.BatchNorm2d(in_planes)
        self.relu = nn.ReLU(inplace=True)
        if useRRSVM:
            self.pool_r = RRSVM.RRSVM_Module(in_channels=in_planes, kernel_size=8)
            self.pool = nn.AvgPool2d(kernel_size=8)

        else:
            self.pool = nn.AvgPool2d(kernel_size=8)

        self.fc = nn.Linear(in_planes, n_classes)
        self.in_planes = in_planes

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
            elif isinstance(m, nn.Linear):
                m.bias.data.zero_()
    def __init__(self, in_channels, channels_7x7, useRRSVM=True):
        super(InceptionC, self).__init__()
        self.useRRSVM = useRRSVM
        self.branch1x1 = BasicConv2d(in_channels, 192, kernel_size=1)

        c7 = channels_7x7
        self.branch7x7_1 = BasicConv2d(in_channels, c7, kernel_size=1)
        self.branch7x7_2 = BasicConv2d(c7,
                                       c7,
                                       kernel_size=(1, 7),
                                       padding=(0, 3))
        self.branch7x7_3 = BasicConv2d(c7,
                                       192,
                                       kernel_size=(7, 1),
                                       padding=(3, 0))

        self.branch7x7dbl_1 = BasicConv2d(in_channels, c7, kernel_size=1)
        self.branch7x7dbl_2 = BasicConv2d(c7,
                                          c7,
                                          kernel_size=(7, 1),
                                          padding=(3, 0))
        self.branch7x7dbl_3 = BasicConv2d(c7,
                                          c7,
                                          kernel_size=(1, 7),
                                          padding=(0, 3))
        self.branch7x7dbl_4 = BasicConv2d(c7,
                                          c7,
                                          kernel_size=(7, 1),
                                          padding=(3, 0))
        self.branch7x7dbl_5 = BasicConv2d(c7,
                                          192,
                                          kernel_size=(1, 7),
                                          padding=(0, 3))
        if useRRSVM:
            self.branch_pool_0r = RRSVM.RRSVM_Module(in_channels=in_channels,
                                                     kernel_size=3,
                                                     stride=1,
                                                     padding=1)
            self.branch_pool_0 = nn.AvgPool2d(kernel_size=3,
                                              stride=1,
                                              padding=1)

        else:
            self.branch_pool_0 = nn.AvgPool2d(kernel_size=3,
                                              stride=1,
                                              padding=1)

        self.branch_pool = BasicConv2d(in_channels, 192, kernel_size=1)
Example #17
0
def test_gradient(input, kernel_size=3, padding=0, stride=1):

    F = RRSVM.RRSVM_F(kernel_size=kernel_size,
                      padding=padding,
                      stride=stride,
                      dilation=1)

    test = gradcheck(lambda i, s: F(i, s),
                     inputs=input,
                     eps=1e-3,
                     atol=1e-3,
                     rtol=1e-3)
    if test == True:
        print("Passed. Gradient Check Passed!")
    else:
        print("Failed. Gradient Check Failed!")
Example #18
0
def make_layers(cfg, useRRSVM=False, batch_norm=False):
    layers = []
    in_channels = 3
    for v in cfg:
        if v == 'M':
            if useRRSVM:
                layers += [
                    RRSVM.RRSVM_Module(in_channels=in_channels,
                                       kernel_size=2,
                                       stride=2)
                ]
                layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
        else:
            conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
            if batch_norm:
                layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
            else:
                layers += [conv2d, nn.ReLU(inplace=True)]
            in_channels = v
    return nn.Sequential(*layers)
Example #19
0
    def __init__(self, in_channels, useRRSVM=False):
        super(InceptionD, self).__init__()
        self.branch3x3_1 = BasicConv2d(in_channels, 192, kernel_size=1)
        self.branch3x3_2 = BasicConv2d(192, 320, kernel_size=3, stride=2)

        self.branch7x7x3_1 = BasicConv2d(in_channels, 192, kernel_size=1)
        self.branch7x7x3_2 = BasicConv2d(192,
                                         192,
                                         kernel_size=(1, 7),
                                         padding=(0, 3))
        self.branch7x7x3_3 = BasicConv2d(192,
                                         192,
                                         kernel_size=(7, 1),
                                         padding=(3, 0))
        self.branch7x7x3_4 = BasicConv2d(192, 192, kernel_size=3, stride=2)
        if useRRSVM:
            self.branch_pool = RRSVM.RRSVM_Module(in_channels,
                                                  kernel_size=3,
                                                  stride=2)
        else:
            self.branch_pool = nn.MaxPool2d(kernel_size=3, stride=2)
Example #20
0
def test_gradient(input, kernel_size=3, padding=0, stride=1):

    F = RRSVM.RRSVM_F(kernel_size=kernel_size,
                      padding=padding,
                      stride=stride,
                      dilation=1)
    # if torch.cuda.is_available():
    #     input = [i.cuda() for i in input]
    # else:
    #     print("Cuda device not detected on this device")
    #     sys.exit(-1)
    test = gradcheck(lambda i, s: F(i, s),
                     inputs=input,
                     eps=1e-3,
                     atol=1e-3,
                     rtol=1e-3)
    # if test == True:
    #     print("Gradient Check Passed!")
    # else:
    #     print("Gradient Check Failed!")
    #
    return test
Example #21
0
A = torch.randperm(n_im * n_channel * feature_size * feature_size).float()
A = A.view(n_im, n_channel, feature_size, feature_size)

weight = torch.randn(n_channel, kernel_size**2)

RRSVM_input = (
    Variable(torch.FloatTensor(A), requires_grad=True),
    Variable(torch.FloatTensor(weight), requires_grad=True),
)

Max_input = Variable(torch.FloatTensor(A), requires_grad=True)
Avg_Input = Variable(torch.FloatTensor(A), requires_grad=True)

RRSVM_F = RRSVM.RRSVM_F(kernel_size=kernel_size,
                        padding=padding,
                        stride=stride,
                        dilation=dilation,
                        return_indices=True)


def Max_F(input):
    return Funtional.max_pool2d(input,
                                kernel_size=kernel_size,
                                padding=padding,
                                stride=stride,
                                dilation=dilation,
                                return_indices=True)


def Avg_F(input):
    return Funtional.avg_pool2d(input,
Example #22
0
        kernel_size = 2
        n_channel = 100
        feature_size = 224
        batch_size = 40
        input = (
            Variable(torch.FloatTensor(
                torch.randn(batch_size, n_channel, feature_size,
                            feature_size)),
                     requires_grad=True),
            Variable(torch.FloatTensor(torch.randn(n_channel, kernel_size**2)),
                     requires_grad=True),
        )

        RRSVM_f = RRSVM.RRSVM_F(kernel_size,
                                padding=0,
                                stride=feature_size,
                                dilation=1)

        useCuda = False
        if useCuda and torch.cuda.is_available():
            input = [i.cuda() for i in input]

        start = time.time()
        analytical, analytical_indices = RRSVM_f(*input)
        end = time.time()
        print "RRSVM CPU:{:0.10f}".format(end - start)

        useCuda = True

        input_cuda = (
            Variable(torch.FloatTensor(

model = reduced_vgg.VGG(n_classes=n_classes)


print('Number of model parameters: {}'.format(
        sum([p.data.nelement() for p in model.parameters()])))

criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(filter(lambda p:p.requires_grad, model.parameters()), lr=args.lr, momentum=0.9, weight_decay=1e-4) # was 5e-4 before


p_constraint = False
if args.positive_constraint:
    p_constraint = True
    positive_clipper = RRSVM.RRSVM_PositiveClipper()

use_cuda = torch.cuda.is_available() and (args.gpu_id is not None or args.multiGpu)

if use_cuda:
    if args.multiGpu:
        if args.gpu_id is None:  # using all the GPUs
            device_count = torch.cuda.device_count()
            print("Using ALL {:d} GPUs".format(device_count))
            model = nn.DataParallel(model, device_ids=[i for i in range(device_count)]).cuda()
        else:
            print("Using GPUs: {:s}".format(args.gpu_id))
            device_ids = [int(x) for x in args.gpu_id]
            model = nn.DataParallel(model, device_ids=device_ids).cuda()