Esempio n. 1
0
import numpy as np
from pycaffe2 import core, device_checker, gradient_checker, workspace
from caffe2.proto import caffe2_pb2, caffe2_legacy_pb2

import sys
import unittest

if workspace.has_gpu_support and workspace.NumberOfGPUs() > 0:
    gpu_device_option = caffe2_pb2.DeviceOption()
    gpu_device_option.device_type = caffe2_pb2.CUDA
    cpu_device_option = caffe2_pb2.DeviceOption()
    device_checker = device_checker.DeviceChecker(
        0.01, [gpu_device_option, cpu_device_option])
    gradient_checkers = [
        gradient_checker.GradientChecker(0.005, 0.05, gpu_device_option,
                                         "gpu_checker_ws"),
        gradient_checker.GradientChecker(0.01, 0.05, cpu_device_option,
                                         "cpu_checker_ws"),
    ]
else:
    cpu_device_option = caffe2_pb2.DeviceOption()
    device_checker = device_checker.DeviceChecker(0.01, [cpu_device_option])
    gradient_checkers = [
        gradient_checker.GradientChecker(0.01, 0.05, cpu_device_option,
                                         "cpu_checker_ws")
    ]


class TestConvLegacyPooling(unittest.TestCase):
    def setUp(self):
        self.test_configs = [
Esempio n. 2
0
 def testAllreduceSingleGPU(self):
     for i in range(workspace.NumberOfGPUs()):
         self.RunningAllreduceWithGPUs([i], muji.Allreduce)
Esempio n. 3
0
        pattern = workspace.GetCudaPeerAccessPattern()
        if pattern.shape[0] >= 2 and np.all(pattern[:2, :2]):
            self.RunningAllreduceWithGPUs([0, 1], muji.Allreduce2)
        else:
            print 'Skipping allreduce with 2 gpus. Not peer access ready.'

    def testAllreduceWithFourGPUs(self):
        pattern = workspace.GetCudaPeerAccessPattern()
        if pattern.shape[0] >= 4 and np.all(pattern[:4, :4]):
            self.RunningAllreduceWithGPUs([0, 1, 2, 3], muji.Allreduce4)
        else:
            print 'Skipping allreduce with 4 gpus. Not peer access ready.'

    def testAllreduceWithEightGPUs(self):
        pattern = workspace.GetCudaPeerAccessPattern()
        if (pattern.shape[0] >= 8 and np.all(pattern[:4, :4])
                and np.all(pattern[4:, 4:])):
            self.RunningAllreduceWithGPUs(range(8), muji.Allreduce8)
        else:
            print 'Skipping allreduce with 8 gpus. Not peer access ready.'


if __name__ == '__main__':
    if not workspace.has_gpu_support:
        print 'No GPU support. skipping muji test.'
    elif workspace.NumberOfGPUs() == 0:
        print 'No GPU device. Skipping gpu test.'
    else:
        workspace.GlobalInit(['python'])
        unittest.main()
Esempio n. 4
0
 def testAllreduceFallback(self):
     self.RunningAllreduceWithGPUs(range(workspace.NumberOfGPUs()),
                                   muji.AllreduceFallback)
Esempio n. 5
0
 def testGetCudaPeerAccessPattern(self):
     pattern = workspace.GetCudaPeerAccessPattern()
     self.assertEqual(type(pattern), np.ndarray)
     self.assertEqual(pattern.ndim, 2)
     self.assertEqual(pattern.shape[0], pattern.shape[1])
     self.assertEqual(pattern.shape[0], workspace.NumberOfGPUs())