示例#1
0
    def RunningAllreduceWithGPUs(self, gpu_ids, allreduce_function):
        """A base function to test different scenarios."""
        workspace.ResetWorkspace()
        net = core.Net("mujitest")
        for id in gpu_ids:
            net.ConstantFill([],
                             "testblob_gpu_" + str(id),
                             shape=[1, 2, 3, 4],
                             value=float(id + 1),
                             device_option=muji.OnGPU(id))
        allreduce_function(net, ["testblob_gpu_" + str(i) for i in gpu_ids],
                           "_reduced", gpu_ids)
        workspace.RunNetOnce(net)
        target_value = sum(gpu_ids) + len(gpu_ids)
        all_blobs = workspace.Blobs()
        all_blobs.sort()
        for blob in all_blobs:
            print blob, workspace.FetchBlob(blob)

        for id in gpu_ids:
            blob = workspace.FetchBlob("testblob_gpu_" + str(i) + "_reduced")
            np.testing.assert_array_equal(blob,
                                          target_value,
                                          err_msg="gpu id %d of %s" %
                                          (id, str(gpu_ids)))
示例#2
0
  def testCreateWorkspace(self):
    workspaces = workspace.Workspaces()
    self.assertEqual(len(workspaces), 1)
    self.assertEqual(workspaces[0], "default")
    self.net = core.Net("test-net")
    self.net.ConstantFill([], "testblob", shape=[1, 2, 3, 4], value=1.0)
    self.assertEqual(
        workspace.RunNetOnce(self.net.Proto().SerializeToString()), True)
    self.assertEqual(workspace.HasBlob("testblob"), True)
    self.assertEqual(workspace.SwitchWorkspace("test", True), True)
    self.assertEqual(workspace.HasBlob("testblob"), False)
    self.assertEqual(workspace.SwitchWorkspace("default"), True)
    self.assertEqual(workspace.HasBlob("testblob"), True)

    try:
        # The following should raise an error.
        workspace.SwitchWorkspace("non-existing")
        # so this should never happen.
        self.assertEqual(True, False)
    except RuntimeError:
        pass

    workspaces = workspace.Workspaces()
    self.assertEqual(len(workspaces), 2)
    workspaces.sort()
    self.assertEqual(workspaces[0], "default")
    self.assertEqual(workspaces[1], "test")
 def CheckNet(self, net, inputs={}, ignore=set()):
     """Checks a network by inspecting all of its intermediate results, and see
 if things match.
 """
     old_ws_name = workspace.CurrentWorkspace()
     results = []
     blobs_to_check = sum([list(op.outputs) for op in net.operators], [])
     blobs_to_check = [b for b in blobs_to_check if b not in ignore]
     workspace.SwitchWorkspace("_device_check_", True)
     for i, device_option in enumerate(self._device_options):
         for name, arr in inputs.iteritems():
             workspace.FeedBlob(name, arr, device_option)
         for op in net.operators:
             op.device_option.CopyFrom(device_option)
         workspace.RunNetOnce(net)
         results.append(
             [workspace.FetchBlob(name) for name in blobs_to_check])
     # After running on all devices, check correctness
     success = True
     for i in range(1, len(results)):
         for j in range(len(blobs_to_check)):
             x = results[i][j]
             y = results[0][j]
             if np.any(np.abs(x - y) > self._threshold):
                 print 'Failure in checking device option', i, 'and blob ',
                 print blobs_to_check[j], '. The outputs are:'
                 print x.flatten()
                 print y.flatten()
                 success = False
                 continue
     workspace.SwitchWorkspace(old_ws_name)
     return success
示例#4
0
    def _testMiniAlexNet(self, order):
        # First, we get all the random initialization of parameters.
        model = self._MiniAlexNetNoDropout(order)
        workspace.ResetWorkspace()
        workspace.RunNetOnce(model.param_init_net)
        inputs = dict([(str(name), workspace.FetchBlob(str(name)))
                       for name in model.params])
        if order == "NCHW":
            inputs["data"] = np.random.rand(4, 3, 227, 227).astype(np.float32)
        else:
            inputs["data"] = np.random.rand(4, 227, 227, 3).astype(np.float32)
        inputs["label"] = np.array([1, 2, 3, 4]).astype(np.int32)

        cpu_device = caffe2_pb2.DeviceOption()
        cpu_device.device_type = caffe2_pb2.CPU
        gpu_device = caffe2_pb2.DeviceOption()
        gpu_device.device_type = caffe2_pb2.CUDA

        checker = device_checker.DeviceChecker(1e-5, [cpu_device, gpu_device])
        ret = checker.CheckNet(
            model.net.Proto(),
            inputs,
            # The indices sometimes may be sensitive to small numerical differences
            # in the input, so we ignore checking them.
            ignore=['_pool1_idx', '_pool2_idx', '_pool5_idx'])
        self.assertEqual(ret, True)
示例#5
0
 def testFetchBlobGPU(self):
   self.assertEqual(workspace.RunNetOnce(self.net.Proto().SerializeToString()), True)
   fetched = workspace.FetchBlob("testblob")
   # check if fetched is correct.
   self.assertEqual(fetched.shape, (1, 2, 3, 4))
   np.testing.assert_array_equal(fetched, 1.0)
   fetched[:] = 2.0
   self.assertEqual(workspace.FeedBlob("testblob", fetched), True)
   fetched_again = workspace.FetchBlob("testblob")
   self.assertEqual(fetched_again.shape, (1, 2, 3, 4))
   np.testing.assert_array_equal(fetched_again, 2.0)
示例#6
0
    def testMNISTNetworks(self):
        # First, we get all the random initialization of parameters.
        init_net, train_net = self._MNISTNetworks()
        workspace.ResetWorkspace()
        workspace.RunNetOnce(init_net)
        inputs = dict([(str(name), workspace.FetchBlob(str(name)))
                       for name in workspace.Blobs()])
        cpu_device = caffe2_pb2.DeviceOption()
        cpu_device.device_type = caffe2_pb2.CPU
        gpu_device = caffe2_pb2.DeviceOption()
        gpu_device.device_type = caffe2_pb2.CUDA

        checker = device_checker.DeviceChecker(1e-2, [cpu_device, gpu_device])
        ret = checker.CheckNet(train_net.Proto(), inputs)
        self.assertEqual(ret, True)
示例#7
0
def Benchmark(model_gen, arg):
    model, input_size = model_gen(arg.order)

    # In order to be able to run everything without feeding more stuff, let's
    # add the data and label blobs to the parameter initialization net as well.
    if arg.order == "NCHW":
        input_shape = [arg.batch_size, 3, input_size, input_size]
    else:
        input_shape = [arg.batch_size, input_size, input_size, 3]
    model.param_init_net.GaussianFill([],
                                      "data",
                                      shape=input_shape,
                                      mean=0.0,
                                      std=1.0)
    model.param_init_net.UniformIntFill([],
                                        "label",
                                        shape=[
                                            arg.batch_size,
                                        ],
                                        min=0,
                                        max=999)

    # Note: even when we are running things on CPU, adding a few engine related
    # argument will not hurt since the CPU operator registy will simply ignore
    # these options and go the default path.
    for op in model.net.Proto().op:
        if op.type == 'Conv' or op.type == 'ConvFp16':
            op.engine = 'CUDNN'
            #op.arg.add().CopyFrom(utils.MakeArgument('ws_nbytes_limit', arg.cudnn_limit))
            op.arg.add().CopyFrom(utils.MakeArgument('exhaustive_search', 1))
            op.arg.add().CopyFrom(
                utils.MakeArgument('shared_ws_name', 'cudnn_workspace'))
        elif op.type in [
                'MaxPool', 'MaxPoolFp16', 'AveragePool', 'AveragePoolFp16',
                'Relu', 'ReluFp16', 'Softmax', 'SoftmaxFp16'
        ]:
            op.engine = 'CUDNN'
    if arg.forward_only:
        print arg.model, ': running forward only.'
    else:
        print arg.model, ': running forward-backward.'
        model.AddGradientOperators()
        if arg.order == 'NHWC':
            print(
                '==WARNING==\n'
                'NHWC order with CuDNN may not be supported yet, so I might\n'
                'exit suddenly.')

    if not arg.cpu:
        model.param_init_net.RunAllOnGPU()
        model.net.RunAllOnGPU()

    workspace.RunNetOnce(model.param_init_net)
    workspace.CreateNet(model.net)
    for i in range(arg.warmup_iterations):
        workspace.RunNet(model.net.Proto().name)

    start = time.time()
    for i in range(arg.iterations):
        workspace.RunNet(model.net.Proto().name)
    print 'Spent: ', (time.time() - start) / arg.iterations
    if arg.layer_wise_benchmark:
        print 'Layer-wise benchmark.'
        workspace.BenchmarkNet(model.net.Proto().name, 1, arg.iterations, True)
    # Writes out the pbtxt for benchmarks on e.g. Android
    with open("{0}_init_batch_{1}.pbtxt".format(arg.model, arg.batch_size),
              "w") as fid:
        fid.write(str(model.param_init_net.Proto()))
    with open("{0}.pbtxt".format(arg.model, arg.batch_size), "w") as fid:
        fid.write(str(model.net.Proto()))
示例#8
0
if __name__ == '__main__':
    if len(sys.argv) == 1:
        print(
            'If you do not explicitly ask to run this test, I will not run it. '
            'Pass in any argument to have the test run for you.')
        sys.exit(0)
    if not os.path.exists('data/testdata/caffe_translator'):
        print 'No testdata existing for the caffe translator test. Exiting.'
        sys.exit(0)
    # We will do all the computation stuff in the global space.
    caffenet = caffe_pb2.NetParameter()
    caffenet_pretrained = caffe_pb2.NetParameter()
    text_format.Merge(
        open('data/testdata/caffe_translator/deploy.prototxt').read(),
        caffenet)
    caffenet_pretrained.ParseFromString(
        open(
            'data/testdata/caffe_translator/bvlc_reference_caffenet.caffemodel'
        ).read())
    net, pretrained_params = caffe_translator.TranslateModel(
        caffenet, caffenet_pretrained)
    caffe_translator.DeleteDropout(net)
    for param in pretrained_params.protos:
        workspace.FeedBlob(param.name, utils.Caffe2TensorToNumpyArray(param))
    # Let's also feed in the data from the Caffe test code.
    data = np.load('data/testdata/caffe_translator/data_dump.npy').astype(
        np.float32)
    workspace.FeedBlob('data', data)
    # Actually running the test.
    workspace.RunNetOnce(net.SerializeToString())
    unittest.main()
示例#9
0
 def testResetWorkspace(self):
   self.assertEqual(workspace.RunNetOnce(self.net.Proto().SerializeToString()), True)
   self.assertEqual(workspace.HasBlob("testblob"), True)
   self.assertEqual(workspace.ResetWorkspace(), True)
   self.assertEqual(workspace.HasBlob("testblob"), False)