Beispiel #1
0
 def __init__(self):
     super(CalibNet48, self).__init__(
         conv1=F.Convolution2D(3, 64, ksize=4, stride=1),
         ln1=F.LocalResponseNormalization(n=9),
         conv2=F.Convolution2D(64, 64, ksize=4, stride=1),
         ln2=F.LocalResponseNormalization(n=9),
         fc1=F.Linear(23104, 256),
         fc2=F.Linear(256, 45),  ## 384 = 256 + 128
     )
Beispiel #2
0
 def __init__(self):
     super(FaceNet48, self).__init__(
         net24 = FaceNet24(),
         conv1=F.Convolution2D(3, 64, ksize=4, stride=1),
         ln1=F.LocalResponseNormalization(n=9),
         conv2=F.Convolution2D(64,64, ksize=4, stride=1),
         ln2=F.LocalResponseNormalization(n=9),
         fc1=F.Linear(5184, 256),
         fc2=F.Linear(384, 2), ## 384 = 256 + 128
     )
Beispiel #3
0
 def check_backward(self, x_data, y_grad):
     gradient_check.check_backward(functions.LocalResponseNormalization(),
                                   x_data,
                                   y_grad,
                                   eps=1,
                                   dtype=numpy.float64,
                                   **self.check_backward_optionss)
Beispiel #4
0
    def check_backward(self, inputs, grad_outputs, backend_config):
        if backend_config.use_cuda:
            inputs = cuda.to_gpu(inputs)
            grad_outputs = cuda.to_gpu(grad_outputs)

        with backend_config:
            gradient_check.check_backward(
                functions.LocalResponseNormalization(), inputs, grad_outputs,
                eps=1, dtype=numpy.float64, **self.check_backward_options)
Beispiel #5
0
 def setUp(self):
     n = 5
     k = 1
     alpha = 1e-4
     beta = .75
     self.x = np.random.uniform(-1, 1,
                                (2, self.channel, 3, 2)).astype(self.dtype)
     self.gy = np.random.uniform(-1, 1,
                                 (2, self.channel, 3, 2)).astype(self.dtype)
     self.check_forward_optionss = {}
     self.check_backward_optionss = {}
     if self.channel >= 8:
         self.check_forward_optionss = {'atol': 1e-4, 'rtol': 1e-3}
         self.check_backward_optionss = {'atol': 5e-3, 'rtol': 5e-3}
     self.lrn = F.LocalResponseNormalization(n, k, alpha, beta)
Beispiel #6
0
def test_lrn(caculate, switchOn=True):
    total_forward = 0
    total_backward = 0
    data = np.ndarray((10, 3, 2240, 2240), dtype=np.float32)
    data.fill(333.33)
    datay = np.ndarray((10, 3, 2240, 2240), dtype=np.float32)
    datay.fill(333.33)
    # y = np.empty(shape=(10, 3, 2240, 2240), dtype=np.float32)
    # gx = np.empty(shape=(10, 3, 2240, 2240), dtype=np.float32)

    total_forward = 0
    count = 0
    niter = 5
    n_dry = 3

    n = 5
    k = 2
    alpha = 1e-4
    beta = .75

    mkld.enable_lrn = switchOn
    for i in range(niter):
        x = np.asarray(data),
        gy = np.asarray(datay),

        # y = np.empty(shape=(10, 3, 2240, 2240), dtype=np.float32)
        # gx = np.empty(shape=(10, 3, 2240, 2240), dtype=np.float32)

        start = time.time()

        lrn = F.LocalResponseNormalization(n, k, alpha, beta)
        lrn.forward_cpu(x)
        end = time.time()
        if i > n_dry - 1:
            count += 1
            total_forward += (end - start) * 1000

        start = time.time()
        lrn.backward_cpu(x, gy)
        end = time.time()
        if i > n_dry - 1:
            total_backward += (end - start) * 1000

    # print("Mkldnn Average Forward: ", total_forward/count, "ms")
    print(caculate, " Average Forward: ", total_forward / count, "ms")
    print(caculate, " Average Backward: ", total_backward / count, "ms")
    print(caculate, " Average Total: ",
          (total_forward + total_backward) / count, "ms")
 def check_backward(self, x_data, y_grad):
     gradient_check.check_backward(functions.LocalResponseNormalization(),
                                   x_data,
                                   y_grad,
                                   eps=1,
                                   atol=1e-4)