def test_reduce_front_sum_empty_batch(self, num_reduce_dim, seed, gc, dc): np.random.seed(seed) X = np.random.rand(0, 4, 3, 5).astype(np.float32) def ref_sum(X): return [np.sum(X, axis=(tuple(range(num_reduce_dim))))] self.reduce_op_test("ReduceFrontSum", ref_sum, [X], ["input"], num_reduce_dim, gc) self.grad_variant_input_test("ReduceFrontSumGradient", X, ref_sum, num_reduce_dim) # test the second iteration not_empty_X = np.random.rand(2, 4, 3, 5).astype(np.float32) net = core.Net('test') with core.DeviceScope(gc): net.ReduceFrontSum(['X'], ['output'], num_reduce_dim=num_reduce_dim) workspace.CreateNet(net) workspace.FeedBlob('X', not_empty_X) workspace.RunNet(workspace.GetNetName(net)) output = workspace.FetchBlob('output') np.testing.assert_allclose(output, ref_sum(not_empty_X)[0], atol=1e-3) workspace.FeedBlob('X', X) workspace.RunNet(workspace.GetNetName(net)) output = workspace.FetchBlob('output') np.testing.assert_allclose(output, ref_sum(X)[0], atol=1e-3)
net_def.ParseFromString(f.read()) net_def.device_option.CopyFrom(device_opts) C.create_net(net_def.SerializeToString()) C.feed_blob('data', img, device_opts.SerializeToString()) ### Debug code """ for b in workspace.Blobs(): x = workspace.FetchBlob(b) if type(x) != str: print(str(b) + ': ' + str(x.shape)) """ ### End of debug code print('Running net ' + workspace.GetNetName(net_def) + '...') C.run_net(workspace.GetNetName(net_def), 1, False) # Turn it into something we can play with and examine which is in a multi-dimensional array results = workspace.FetchBlob('prob') #print("results shape: ", results.shape) # Quick way to get the top-1 prediction result # Squeeze out the unnecessary axis. This returns a 1-D array of length 1000 preds = np.squeeze(results) # Get the prediction and the confidence by finding the maximum value and index of maximum value in preds array curr_pred, curr_conf = max(enumerate(preds), key=operator.itemgetter(1)) print("Prediction: ", curr_pred) print("Confidence: ", curr_conf)
# Create GPU device option device_opts = caffe2_pb2.DeviceOption() if use_gpu == 0: device_opts.device_type = caffe2_pb2.CPU print('Running on CPU') else: device_opts.device_type = caffe2_pb2.HIP device_opts.hip_gpu_id = 0 print('Running on HIP') if use_gpu == 2: engine_list = ['MIOPEN', ''] C.set_global_engine_pref({caffe2_pb2.HIP : engine_list}) print('Using MIOPEN') C.feed_blob('data', img, device_opts.SerializeToString()) init_def = caffe2_pb2.NetDef() with open(INIT_NET, 'rb') as f: init_def.ParseFromString(f.read()) init_def.device_option.CopyFrom(device_opts) C.run_net_once(init_def.SerializeToString()) net_def = caffe2_pb2.NetDef() with open(PREDICT_NET, 'rb') as f: net_def.ParseFromString(f.read()) net_def.device_option.CopyFrom(device_opts) C.create_net(net_def.SerializeToString()) C.feed_blob('data', img, device_opts.SerializeToString()) C.benchmark_net(workspace.GetNetName(net_def), 10, num_iter, per_layer_timing)