Esempio n. 1
0
def test_pad_opset_1():
    x = np.ones((2, 2), dtype=np.float32)
    y = np.pad(x, pad_width=1, mode='constant')

    model = get_node_model('Pad', x, paddings=[1, 1, 1, 1])
    ng_results = run_model(model, [x])
    assert np.array_equal(ng_results, [y])

    x = np.random.randn(1, 3, 4, 5).astype(np.float32)
    y = np.pad(x, pad_width=((0, 0), (0, 0), (1, 2), (3, 4)), mode='constant')

    model = get_node_model('Pad',
                           x,
                           mode='constant',
                           paddings=[0, 0, 1, 3, 0, 0, 2, 4])
    ng_results = run_model(model, [x])
    assert np.array_equal(ng_results, [y])

    # incorrect paddings rank
    x = np.ones((2, 2), dtype=np.float32)
    model = get_node_model('Pad', x, paddings=[0, 1, 1, 3, 1, 2])
    with pytest.raises(RuntimeError):
        run_model(model, [x])

    # no paddings arttribute
    model = get_node_model('Pad', x)
    with pytest.raises(RuntimeError):
        import_onnx_model(model)
Esempio n. 2
0
def test_pad_opset_2():
    x = np.ones((2, 2), dtype=np.float32)
    y = np.pad(x, pad_width=1, mode='constant')

    model = get_node_model('Pad', x, opset=2, pads=[1, 1, 1, 1])
    ng_results = run_model(model, [x])
    assert np.array_equal(ng_results, [y])

    x = np.random.randn(1, 3, 4, 5).astype(np.float32)
    y = np.pad(x, pad_width=((0, 0), (0, 0), (1, 2), (3, 4)), mode='constant')

    model = get_node_model('Pad',
                           x,
                           opset=2,
                           mode='constant',
                           pads=[0, 0, 1, 3, 0, 0, 2, 4])
    ng_results = run_model(model, [x])
    assert np.array_equal(ng_results, [y])

    # incorrect pads rank
    x = np.ones((2, 2), dtype=np.float32)
    model = get_node_model('Pad', x, opset=2, pads=[0, 1, 1, 3, 1, 2])
    with pytest.raises(ValueError):
        run_model(model, [x])

    # negative pads values
    model = get_node_model('Pad', x, opset=2, pads=[0, -1, -1, 3])
    with pytest.raises(NotImplementedError):
        run_model(model, [x])

    # no pads attribute
    model = get_node_model('Pad', x, opset=2)
    with pytest.raises(ValueError):
        import_onnx_model(model)[0]
def test_missing_op():
    node = make_node('FakeOpName', ['A'], ['X'], name='missing_op_node')
    graph = make_graph(
        [node], 'test_graph',
        [make_tensor_value_info('A', onnx.TensorProto.FLOAT, [1])],
        [make_tensor_value_info('X', onnx.TensorProto.FLOAT, [1])])
    model = make_model(graph, producer_name='ngraph ONNXImporter')

    with pytest.raises(NotImplementedError) as exc_info:
        import_onnx_model(model)

    exc_args = exc_info.value.args
    assert exc_args[0] % exc_args[1:] == 'Unknown operation: FakeOpName'
Esempio n. 4
0
def convert_and_calculate(onnx_node, data_inputs, data_outputs):
    # type: (onnx.NodeProto, List[np.ndarray], List[np.ndarray]) -> List[np.ndarray]
    """
    Convert ONNX node to ngraph node and perform computation on input data.

    :param onnx_node: ONNX NodeProto describing a computation node
    :param data_inputs: list of numpy ndarrays with input data
    :param data_outputs: list of numpy ndarrays with expected output data
    :return: list of numpy ndarrays with computed output
    """
    transformer = get_transformer()
    input_tensors = [
        make_tensor_value_info(name, onnx.TensorProto.FLOAT, value.shape)
        for name, value in zip(onnx_node.input, data_inputs)
    ]
    output_tensors = [
        make_tensor_value_info(name, onnx.TensorProto.FLOAT, value.shape)
        for name, value in zip(onnx_node.output, data_outputs)
    ]

    graph = make_graph([onnx_node], 'test_graph', input_tensors,
                       output_tensors)
    model = make_model(graph, producer_name='ngraph ONNXImporter')

    ng_results = []
    for ng_model in import_onnx_model(model):
        computation = transformer.computation(ng_model['output'],
                                              *ng_model['inputs'])
        ng_results.append(computation(*data_inputs))

    return ng_results
Esempio n. 5
0
def run(args):
    onnx_filename = os.path.join(args.test_dir, 'model.onnx')
    input_names, output_names = onnx_input_output_names(onnx_filename)
    test_data_dir = os.path.join(args.test_dir, 'test_data_set_0')
    inputs, outputs = load_test_data(test_data_dir, input_names, output_names)

    model = onnx.load(onnx_filename)
    ng_func = import_onnx_model(model)

    runtime = ng.runtime(backend_name=args.backend)
    computation = runtime.computation(ng_func)

    inputs = [v for n, v in inputs]
    outputs = [v for n, v in outputs]

    actual_outputs = computation(*inputs)

    for i, (name, expected,
            actual) in enumerate(zip(output_names, outputs, actual_outputs)):
        np.testing.assert_allclose(expected, actual, rtol=1e-3,
                                   atol=1e-4), name
        print('%s: OK' % name)
    print('ALL OK')

    if args.iterations > 1:
        num_iterations = args.iterations - 1
        start = time.time()
        for t in range(num_iterations):
            computation(*inputs)
        elapsed = time.time() - start
        print('Elapsed: %.3f msec' % (elapsed * 1000 / num_iterations))
Esempio n. 6
0
def test_reshape_opset5():
    original_shape = [2, 3, 4]
    test_cases = {
        'reordered_dims': np.array([4, 2, 3], dtype=np.int64),
        'reduced_dims': np.array([3, 8], dtype=np.int64),
        'extended_dims': np.array([3, 2, 2, 2], dtype=np.int64),
        'one_dim': np.array([24], dtype=np.int64),
        'negative_dim': np.array([6, -1, 2], dtype=np.int64),
    }
    input_data = np.random.random_sample(original_shape).astype(np.float32)

    for test_name, shape in test_cases.items():
        const_node = make_node('Constant', inputs=[], outputs=['const_shape'],
                               value=onnx.helper.make_tensor(
                                   name='const_tensor',
                                   data_type=onnx.TensorProto.INT64,
                                   dims=shape.shape,
                                   vals=shape.flatten()))
        reshape_node = onnx.helper.make_node('Reshape', inputs=['data', 'const_shape'],
                                             outputs=['reshaped'])

        graph = make_graph([const_node, reshape_node], 'test_graph',
                           [make_tensor_value_info('data', onnx.TensorProto.FLOAT, input_data.shape)],
                           [make_tensor_value_info('reshaped', onnx.TensorProto.FLOAT, ())])

        model = make_model(graph, producer_name='ngraph ONNX Importer')
        model.opset_import[0].version = 5
        ng_model_function = import_onnx_model(model)
        runtime = get_runtime()
        computation = runtime.computation(ng_model_function)
        ng_results = computation(input_data)
        expected_output = np.reshape(input_data, shape)
        assert np.array_equal(ng_results[0], expected_output)
Esempio n. 7
0
def predict(img_no, plot_result):
    """
    Calculate the Dice and plot the predicted masks for image # img_no
    """

    img = imgs_validation[[img_no], ]
    msk = msks_validation[[img_no], ]
    
    #TODO load onnx model in ngraph
    if onnx:
        onnx_protobuf = onnx.load('/data/Healthcare_app/output/unet_model_for_decathlon_100_iter.onnx')
        ng_models = import_onnx_model(onnx_protobuf)
        ng_model = ng_models[0]
        runtime = ng.runtime(backend_name='CPU')
        unet = runtime.computation(ng_model['output'], *ng_model['inputs'])
        
        start_time = time.time()
        pred_mask= unet(img)[0]
        print ("Time for prediction ngraph: ", '%.0f'%((time.time()-start_time)*1000),"ms")

    else:
        start_time = time.time()
        pred_mask = model.predict(img, verbose=0, steps=None)
        #print ("Time for prediction TF: ", '\033[1m %.0f \033[0m'%((time.time()-start_time)*1000),"ms")
       	end_time = (time.time()-start_time)*1000 
        print(end_time)
    plotDiceScore(img_no,img,msk,pred_mask,plot_result, round(end_time))
    return end_time
def run(args):
    onnx_filename = os.path.join(args.test_dir, 'model.onnx')
    input_names, output_names = onnx_input_output_names(onnx_filename)
    test_data_dir = os.path.join(args.test_dir, 'test_data_set_0')
    inputs, outputs = load_test_data(test_data_dir, input_names, output_names)

    model = onnx.load(onnx_filename)
    ng_func = import_onnx_model(model)

    runtime = ng.runtime(backend_name=args.backend)
    computation = runtime.computation(ng_func)

    inputs = [v for n, v in inputs]
    outputs = [v for n, v in outputs]

    actual_outputs = computation(*inputs)

    for i, (name, expected,
            actual) in enumerate(zip(output_names, outputs, actual_outputs)):
        np.testing.assert_allclose(expected, actual, rtol=1e-3,
                                   atol=1e-4), name
        print('%s: OK' % name)
    print('ALL OK')

    def compute():
        computation(*inputs)

    return run_onnx_util.run_benchmark(compute, args.iterations)
Esempio n. 9
0
def main():
    args = parser.parse_args()
    model_path = args.model
    dataset_size = args.size
    batch_size = args.batch_size
    backend_name = args.backend
    print_freq = args.print_freq

    # Load ONNX model
    onnx_protobuf = onnx.load(model_path)
    # Change batch size defined in model to value passed by user as argument
    onnx_protobuf.graph.input[0].type.tensor_type.shape.dim[0].dim_value = batch_size

    ng_model = import_onnx_model(onnx_protobuf)
    model_batch, model_channels, model_height, model_width = ng_model.get_parameters()[0].shape

    # Generate synthetic dataset filled with random values
    dataset = generate_data(count=dataset_size,
                            batch_size=model_batch,
                            image_channels=model_channels,
                            image_height=model_height,
                            image_width=model_width)
    dataset = [(img, 0) for img in dataset]

    perf_metrics = evaluate(backend_name, ng_model, dataset, batch_size, print_freq)
    save_results('results/', args.output_file, {key: val.data for key, val in perf_metrics.items()})
Esempio n. 10
0
def test_identity():
    np.random.seed(133391)
    shape = [2, 4]
    input_data = np.random.randn(*shape).astype(np.float32)

    identity_node = make_node('Identity', inputs=['x'], outputs=['y'])
    ng_results = run_node(identity_node, [input_data])
    assert np.array_equal(ng_results, [input_data])

    node1 = make_node('Add',
                      inputs=['A', 'B'],
                      outputs=['add1'],
                      name='add_node1')
    node2 = make_node('Identity',
                      inputs=['add1'],
                      outputs=['identity1'],
                      name='identity_node1')
    node3 = make_node('Abs',
                      inputs=['identity1'],
                      outputs=['Y'],
                      name='abs_node1')

    graph = make_graph([node1, node2, node3], 'test_graph', [
        make_tensor_value_info('A', onnx.TensorProto.FLOAT, shape),
        make_tensor_value_info('B', onnx.TensorProto.FLOAT, shape)
    ], [make_tensor_value_info('Y', onnx.TensorProto.FLOAT, shape)])
    model = make_model(graph, producer_name='ngraph ONNX Importer')
    ng_model_function = import_onnx_model(model)
    runtime = get_runtime()
    computation = runtime.computation(ng_model_function)
    ng_results = computation(input_data, input_data)
    expected_result = np.abs(input_data + input_data)

    assert np.array_equal(ng_results[0], expected_result)
Esempio n. 11
0
def import_and_compute_conv(x, weights, transpose=False, **attributes):
    x, weights = np.array(x), np.array(weights)
    onnx_model = make_onnx_model_for_conv_op(x.shape, weights.shape,
                                             transpose=transpose, **attributes)
    ng_model_function = import_onnx_model(onnx_model)
    computation = get_runtime().computation(ng_model_function)
    return computation(x, weights)[0]
Esempio n. 12
0
def import_and_compute_matmul(input_left, input_right):
    input_data_left = np.array(input_left)
    input_data_right = np.array(input_right)
    onnx_model = make_onnx_model_for_matmul_op(input_data_left, input_data_right)
    transformer = get_runtime()
    ng_model = import_onnx_model(onnx_model)[0]
    computation = transformer.computation(ng_model['output'], *ng_model['inputs'])
    return computation(input_data_left, input_data_right)[0]
Esempio n. 13
0
def import_and_compute_conv(x, weights, transpose=False, **attributes):
    x, weights = np.array(x), np.array(weights)
    onnx_model = make_onnx_model_for_conv_op(x.shape,
                                             weights.shape,
                                             transpose=transpose,
                                             **attributes)
    ng_model = import_onnx_model(onnx_model)[0]
    computation = get_transformer().computation(ng_model['output'],
                                                *ng_model['inputs'])
    return computation(x, weights)
Esempio n. 14
0
def test_cast_errors():
    np.random.seed(133391)
    input_data = np.ceil(np.random.rand(2, 3, 4) * 16)

    # missing 'to' attribute
    node = onnx.helper.make_node('Cast', inputs=['A'], outputs=['B'])
    input_tensors = [make_tensor_value_info(name, onnx.TensorProto.FLOAT, value.shape)
                     for name, value in zip(node.input, [input_data])]
    output_tensors = [make_tensor_value_info(name, onnx.TensorProto.FLOAT16, value.shape)
                      for name, value in zip(node.output, ())]  # type: ignore

    graph = make_graph([node], 'compute_graph', input_tensors, output_tensors)
    model = make_model(graph, producer_name='NgraphBackend')
    with pytest.raises(RuntimeError):
        import_onnx_model(model)

    # unsupported data type representation
    node = onnx.helper.make_node('Cast', inputs=['A'], outputs=['B'], to=1.2345)
    input_tensors = [make_tensor_value_info(name, onnx.TensorProto.FLOAT, value.shape)
                     for name, value in zip(node.input, [input_data])]
    output_tensors = [make_tensor_value_info(name, onnx.TensorProto.INT32, value.shape)
                      for name, value in zip(node.output, ())]  # type: ignore

    graph = make_graph([node], 'compute_graph', input_tensors, output_tensors)
    model = make_model(graph, producer_name='NgraphBackend')
    with pytest.raises(RuntimeError):
        import_onnx_model(model)

    # unsupported input tensor data type:
    node = onnx.helper.make_node('Cast', inputs=['A'], outputs=['B'], to=onnx.TensorProto.INT32)
    input_tensors = [make_tensor_value_info(name, onnx.TensorProto.COMPLEX64, value.shape)
                     for name, value in zip(node.input, [input_data])]
    output_tensors = [make_tensor_value_info(name, onnx.TensorProto.INT32, value.shape)
                      for name, value in zip(node.output, ())]  # type: ignore

    graph = make_graph([node], 'compute_graph', input_tensors, output_tensors)
    model = make_model(graph, producer_name='NgraphBackend')
    with pytest.raises((RuntimeError, NgraphTypeError)):
        import_onnx_model(model)

    # unsupported output tensor data type:
    node = onnx.helper.make_node('Cast', inputs=['A'], outputs=['B'],
                                 to=onnx.TensorProto.COMPLEX128)
    input_tensors = [make_tensor_value_info(name, onnx.TensorProto.FLOAT, value.shape)
                     for name, value in zip(node.input, [input_data])]
    output_tensors = [make_tensor_value_info(name, onnx.TensorProto.COMPLEX128, value.shape)
                      for name, value in zip(node.output, ())]  # type: ignore

    graph = make_graph([node], 'compute_graph', input_tensors, output_tensors)
    model = make_model(graph, producer_name='NgraphBackend')
    with pytest.raises(RuntimeError):
        import_onnx_model(model)
Esempio n. 15
0
def import_and_compute_gemm(input_a, input_b, input_c, **kwargs):
    input_a, input_b, input_c = np.array(input_a), np.array(input_b), np.array(input_c)

    if kwargs.get('trans_a'):
        kwargs['transA'] = kwargs['trans_a']
        del kwargs['trans_a']

    if kwargs.get('trans_b'):
        kwargs['transB'] = kwargs['trans_b']
        del kwargs['trans_b']

    onnx_model = make_onnx_model_for_gemm_op(input_a, input_b, input_c, **kwargs)
    transformer = get_runtime()
    ng_model = import_onnx_model(onnx_model)[0]
    computation = transformer.computation(ng_model['output'], *ng_model['inputs'])
    return computation(input_a, input_b, input_c)[0]
Esempio n. 16
0
def test_simple_graph():
    node1 = make_node('Add', ['A', 'B'], ['X'], name='add_node1')
    node2 = make_node('Add', ['X', 'C'], ['Y'], name='add_node2')
    graph = make_graph([node1, node2], 'test_graph',
                       [make_tensor_value_info('A', onnx.TensorProto.FLOAT, [1]),
                        make_tensor_value_info('B', onnx.TensorProto.FLOAT, [1]),
                        make_tensor_value_info('C', onnx.TensorProto.FLOAT, [1])],
                       [make_tensor_value_info('Y', onnx.TensorProto.FLOAT, [1])])
    model = make_model(graph, producer_name='ngraph ONNXImporter')

    ng_model_function = import_onnx_model(model)

    runtime = get_runtime()
    computation = runtime.computation(ng_model_function)
    assert np.array_equal(computation(1, 2, 3)[0], np.array([6.0], dtype=np.float32))
    assert np.array_equal(computation(4, 5, 6)[0], np.array([15.0], dtype=np.float32))
Esempio n. 17
0
def test_simple_graph():
    node1 = make_node('Add', ['A', 'B'], ['X'], name='add_node1')
    node2 = make_node('Add', ['X', 'C'], ['Y'], name='add_node2')
    graph = make_graph([node1, node2], 'test_graph', [
        make_tensor_value_info('A', onnx.TensorProto.FLOAT, [1]),
        make_tensor_value_info('B', onnx.TensorProto.FLOAT, [1]),
        make_tensor_value_info('C', onnx.TensorProto.FLOAT, [1])
    ], [make_tensor_value_info('Y', onnx.TensorProto.FLOAT, [1])])
    model = make_model(graph, producer_name='ngraph ONNXImporter')

    ng_model = import_onnx_model(model)[0]

    runtime = ng.runtime(
        manager_name=pytest.config.getoption('backend', default='CPU'))
    computation = runtime.computation(ng_model['output'], *ng_model['inputs'])
    assert np.array_equal(computation(4, 5, 6),
                          np.array([15.0], dtype=np.float32))
Esempio n. 18
0
def run_model(onnx_model, data_inputs):
    # type: (onnx.ModelProto, List[np.ndarray]) -> List[np.ndarray]
    """
    Convert ONNX model to an ngraph model and perform computation on input data.

    :param onnx_model: ONNX ModelProto describing an ONNX model
    :param data_inputs: list of numpy ndarrays with input data
    :return: list of numpy ndarrays with computed output
    """
    NgraphBackend.backend_name = BACKEND_NAME
    if NgraphBackend.supports_ngraph_device(NgraphBackend.backend_name):
        ng_model_function = import_onnx_model(onnx_model)
        runtime = get_runtime()
        computation = runtime.computation(ng_model_function)
        return computation(*data_inputs)
    else:
        raise RuntimeError('The requested nGraph backend <'
                           + NgraphBackend.backend_name + '> is not supported!')
Esempio n. 19
0
def run_model(onnx_model, data_inputs):
    # type: (onnx.ModelProto, List[np.ndarray]) -> List[np.ndarray]
    """
    Convert ONNX model to an ngraph model and perform computation on input data.

    :param onnx_model: ONNX ModelProto describing an ONNX model
    :param data_inputs: list of numpy ndarrays with input data
    :return: list of numpy ndarrays with computed output
    """
    NgraphBackend.backend_name = pytest.config.getoption('backend', default='CPU')
    if NgraphBackend.supports_ngraph_device(NgraphBackend.backend_name):
        ng_model = import_onnx_model(onnx_model)
        runtime = get_runtime()
        computations = [runtime.computation(model['output'], *model['inputs']) for
                        model in ng_model]
        return [computation(*data_inputs) for computation in computations]
    else:
        raise RuntimeError('The requested nGraph backend <' + NgraphBackend.backend_name +
                           '> is not supported!')
Esempio n. 20
0
def run_ngraph_inference(model_onnx, inputs, device):
    onnx_protobuf = onnx.load(model_onnx)
    ng_function = import_onnx_model(onnx_protobuf)
    runtime = ng.runtime(backend_name=device)
    model = runtime.computation(ng_function)
    # TODO: doesnt work with fully connected, fix
    if "fully" not in model_onnx:
        inputs = np.expand_dims(
            inputs, 1
        )  # add a dimension so slicing in the loop returns a properly shaped input for ngraph. pytorch too
    n = len(inputs)
    outputs = []

    start = timer()
    for i in range(n):
        data = inputs[i]
        outputs.append(model(data))

    inference_time = (timer() - start) * 1000 / n
    print(f'inference time (msec): {inference_time:.5f}')
    return outputs, inference_time
Esempio n. 21
0
 def prepare(cls, onnx_model, device='CPU', **kwargs):
     # type: (onnx.ModelProto, str, Dict) -> NgraphBackendRep
     """Prepare backend representation of ONNX model."""
     super(NgraphBackend, cls).prepare(onnx_model, device, **kwargs)
     ng_model_function = import_onnx_model(onnx_model)
     return NgraphBackendRep(ng_model_function, cls.backend_name)
Esempio n. 22
0
 def prepare_ngraph_exe(self, model_path):
     onnx_protobuf = onnx.load(model_path)
     ng_function = import_onnx_model(onnx_protobuf)
     runtime = ng.runtime(backend_name='CPU')
     return runtime.computation(ng_function)
Esempio n. 23
0
import sys
import timeit

import numpy as np
import onnx
import ngraph as ng
from ngraph_onnx.onnx_importer.importer import import_onnx_model

model = onnx.load(sys.argv[1])

ng_func = import_onnx_model(model)
#print(ng_model)

picture = np.ones([1, 3, 224, 224], dtype=np.float32)

runtime = ng.runtime(backend_name='CPU')
#runtime = ng.runtime(backend_name='GPU')
resnet = runtime.computation(ng_func)
#print(resnet)


def run():
    resnet(picture)


n = 100

print(timeit.timeit('run()', globals=globals(), number=n) / n * 1000, 'msec')
Esempio n. 24
0
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************

import onnx

onnx_protobuf = onnx.load('/path/to/model/cntk_ResNet20_CIFAR10/model.onnx')

# Convert a serialized ONNX model to an ngraph model
from ngraph_onnx.onnx_importer.importer import import_onnx_model
ng_model = import_onnx_model(onnx_protobuf)[0]


# Using an ngraph runtime (CPU backend), create a callable computation
import ngraph as ng
runtime = ng.runtime(backend_name='CPU')
resnet = runtime.computation(ng_model['output'], *ng_model['inputs'])

# Load or create an image
import numpy as np
picture = np.ones([1, 3, 32, 32])

# Run ResNet inference on picture
resnet(picture)

Esempio n. 25
0
        fmt_str += '{0}{1}'.format(
            tmp,
            self.target_transform.__repr__().replace('\n',
                                                     '\n' + ' ' * len(tmp)))
        return fmt_str


BATCH_SIZE = 960

ONNX_MODEL = "./checkpoint/f1_model.onnx"

im_h = 32
im_w = 32
#ngraph
onnx_protobuf = onnx.load(ONNX_MODEL)
ng_model = import_onnx_model(onnx_protobuf)[0]
runtime = ng.runtime(backend_name='CPU')
resnet = runtime.computation(ng_model['output'], *ng_model['inputs'])

model = resnet20_cifar()
inputs = []
targets = []

test_dir = './test_img.npz'

cinic_mean = [0.47889522, 0.47227842, 0.43047404]
cinic_std = [0.24205776, 0.23828046, 0.25874835]

transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize(mean=cinic_mean, std=cinic_std)
Esempio n. 26
0
dummy_input = torch.randn(list(lr.size())[0],
                          list(lr.size())[1],
                          list(lr.size())[2],
                          list(lr.size())[3],
                          device='cuda',
                          requires_grad=False)
edsr_onnx_filename = '{}.onnx'.format(pytorch_model_name)

torch.onnx.export(pytorch_edsr_model,
                  dummy_input,
                  edsr_onnx_filename,
                  export_params=True,
                  verbose=True,
                  training=False)
edsr_onnx_model = onnx.load(edsr_onnx_filename)
ng_models = import_onnx_model(edsr_onnx_model)
print(ng_models)

ng_model = ng_models[0]
runtime = ng.runtime(backend_name='CPU')
edsr_ng_model = runtime.computation(ng_model['output'], *ng_model['inputs'])

print(edsr_ng_model)

for i in range(100):
    print(i)

    # sr = edsr_ng_model(lr, idx_scale)
    # lr.to(torch.device('cpu'))
    sr = edsr_ng_model(lr)
    # sr = torch.from_numpy(sr)
    def test(self):
        import onnx
        from ngraph_onnx.onnx_importer.importer import import_onnx_model
        import ngraph as ng
        global dim0, dim2, dim3

        torch.set_grad_enabled(False)

        epoch = self.optimizer.get_last_epoch() + 1
        self.ckp.write_log('\nEvaluation:')
        self.ckp.add_log(
            torch.zeros(1, len(self.loader_test), len(self.scale))
        )
        self.model.eval()

        timer_test = utility.timer()
        if self.args.save_results: self.ckp.begin_background()
        # print(self.loader_test)
        for idx_data, d in enumerate(self.loader_test):
            for idx_scale, scale in enumerate(self.scale):
                d.dataset.set_scale(idx_scale)
                print('idx_scale={}'.format(idx_scale))
                # print("len: {}".format(len(d)))
                # for lr, hr, filename, _ in tqdm(d, ncols=80):
                for batch, (lr, hr, filename, _) in enumerate(d):
                    print('{} '.format(batch), end='', flush=True)
                    lr, hr = self.prepare(lr, hr)
                    print('test lr.size: {}'.format(lr.size()))
                    dim0 = lr.size()[0]
                    dim2 = lr.size()[2]
                    dim3 = lr.size()[3]
                    
                    showbug = False
                    if showbug: print('stage1', flush=True)
                    if self.args.ngraph:
                        
                        pytorch_model_name = self.args.ngraph
                        pytorch_edsr_model = torch.load(pytorch_model_name).cuda()
                        if showbug: print('stage2-1', flush=True)
                        # print(lr.size())
                        # dummy_input = torch.randn_like(lr, device='cuda')
                        if showbug: print('stage2-2', flush=True)
                        edsr_onnx_filename = '{}.onnx'.format(pytorch_model_name)
                        # print('Export to onnx model {}'.format(edsr_onnx_filename))
                        torch.onnx.export(pytorch_edsr_model, lr.to(torch.device('cuda')), edsr_onnx_filename, export_params=True, verbose=False, training=False)
                        if showbug: print('stage2-3', flush=True)

                        edsr_onnx_model = onnx.load(edsr_onnx_filename)
                        # print(onnx.helper.printable_graph(edsr_onnx_model.graph))

                        if showbug: print('stage2-4', flush=True)
                        ng_models = import_onnx_model(edsr_onnx_model)

                        # print('Convert to nGreph Model')

                        ng_model = ng_models[0]
                        if showbug: print('stage2-5', flush=True)
                        runtime = ng.runtime(backend_name='CPU')
                        if showbug: print('stage2-6', flush=True)
                        edsr_ng_model = runtime.computation(ng_model['output'], *ng_model['inputs'])
                        if showbug: print('stage2-7', flush=True)

                        sr = edsr_ng_model(lr, idx_scale)
                        if showbug: print('stage2-8', flush=True)
                        sr = torch.from_numpy(sr)
                        if showbug: print('stage2-9', flush=True)
                    elif self.args.tensorrt:
                        pytorch_model_name = self.args.tensorrt
                        pytorch_edsr_model = torch.load(pytorch_model_name)
                        
                        # lr_np = lr.numpy().astype(np.float32)
                        dummy_input = torch.randn_like(lr, device='cuda')
                        edsr_onnx_filename = '{}.onnx'.format(pytorch_model_name)
                        print('Export to onnx model {}'.format(edsr_onnx_filename))
                        torch.onnx.export(pytorch_edsr_model, dummy_input, edsr_onnx_filename, export_params=True, verbose=False, training=False)

                        import os
                        import onnx

                        edsr_onnx_model = onnx.load(edsr_onnx_filename)
                        # print(onnx.helper.printable_graph(edsr_onnx_model.graph))

                        import tensorrt
                        import onnx_tensorrt.backend as backend
                        import numpy as np

                        tensorrt_engine = backend.prepare(edsr_onnx_model, device='CUDA:0')
                        # lr_np = lr_np.to(torch.device("cuda:0"))
                        # lr.numpy().astype(np.float32)

                        sr = tensorrt_engine.run(lr.numpy().astype(np.float32))[0]
                        sr = torch.from_numpy(sr)

                        print('complete one')   



                        pytorch_model_name = self.args.tensorrt
                        pytorch_edsr_model = torch.load(pytorch_model_name)
                        
                        # lr_np = lr.numpy().astype(np.float32)
                        dummy_input = torch.randn_like(lr, device='cuda')
                        edsr_onnx_filename = '{}.onnx'.format(pytorch_model_name)
                        print('Export to onnx model {}'.format(edsr_onnx_filename))
                        torch.onnx.export(pytorch_edsr_model, dummy_input, edsr_onnx_filename, export_params=True, verbose=False, training=False)

                        import os
                        import onnx

                        edsr_onnx_model = onnx.load(edsr_onnx_filename)
                        # print(onnx.helper.printable_graph(edsr_onnx_model.graph))

                        import tensorrt
                        import onnx_tensorrt.backend as backend
                        import numpy as np

                        tensorrt_engine = backend.prepare(edsr_onnx_model, device='CUDA:0')
                        # lr_np = lr_np.to(torch.device("cuda:0"))
                        # lr.numpy().astype(np.float32)

                        sr = tensorrt_engine.run(lr.numpy().astype(np.float32))[0]
                        sr = torch.from_numpy(sr)
                        
                        print('complete two')   
                    else:
                        sr = self.model(lr, idx_scale)

                    if showbug: print('stage3', flush=True)
                    sr = utility.quantize(sr, self.args.rgb_range)

                    if showbug: print('stage4', flush=True)
                    save_list = [sr]
                    if showbug: print('stage5', flush=True)
                    self.ckp.log[-1, idx_data, idx_scale] += utility.calc_psnr(
                        sr, hr, scale, self.args.rgb_range, dataset=d
                    )
                    if showbug: print('stage6', flush=True)
                    if self.args.save_gt:
                        save_list.extend([lr, hr])
                    if showbug: print('stage7', flush=True)

                    if self.args.save_results:
                        self.ckp.save_results(d, filename[0], save_list, scale)
                    if showbug: print('stage8', flush=True)

                self.ckp.log[-1, idx_data, idx_scale] /= len(d)
                best = self.ckp.log.max(0)
                psnr = self.ckp.log[-1, idx_data, idx_scale].numpy()
                print('')
                self.ckp.write_log(
                    '[{} x{}]\tPSNR: {:.3f} (Best: {:.3f} @epoch {})'.format(
                        d.dataset.name,
                        scale,
                        self.ckp.log[-1, idx_data, idx_scale],
                        best[0][idx_data, idx_scale],
                        best[1][idx_data, idx_scale] + 1
                    )
                )
                
        self.ckp.write_log('Forward: {:.2f}s\n'.format(timer_test.toc()))
        self.ckp.write_log('Saving...')

        if self.args.save_results:
            self.ckp.end_background()

        if not self.args.test_only:
            self.ckp.save(self, epoch, is_best=(best[1][0, 0] + 1 == epoch))

        self.ckp.write_log(
           'Total: {:.2f}s\n'.format(timer_test.toc()), refresh=True
        )

        torch.set_grad_enabled(True)
        return psnr
Esempio n. 28
0
    if arg.device=='x86':
        backend_name = 'CPU'
        if arg.thread=='single':
            os.environ["OMP_NUM_THREADS"] = '1'
    if arg.device=='gpu':
        backend_name = 'PlaidML'
        if arg.thread=='single':
            os.environ["OMP_NUM_THREADS"] = '1'
        os.environ["PLAIDML_DEVICE_IDS"] = 'opencl_nvidia_tesla_v100-pcie-32gb.1'
        #os.environ["PLAIDML_DEVICE_IDS"] = 'llvm_cpu.0'

    print(time.strftime("[localtime] %Y-%m-%d %H:%M:%S", time.localtime()) )

    on, input_shape = get_onnx(arg.onnx)
    ng_function = import_onnx_model(on)
    print(ng_function)
    
    runtime = ng.runtime(backend_name)
    func = runtime.computation(ng_function)
    
    assert(len(input_shape) == 1)
    for value in input_shape.values():
        shape = value
    
    print(shape)
    #shape=[1,3,224,224]
    picture = np.ones(shape, dtype=np.float32)
   
    nSteps=15
    avg_time=0
Esempio n. 29
0
import sys
import timeit

import numpy as np
import onnx
import ngraph as ng
from ngraph_onnx.onnx_importer.importer import import_onnx_model

model = onnx.load(sys.argv[1])

ng_func = import_onnx_model(model)
#print(ng_model)

picture = np.ones([1, 3, 224, 224], dtype=np.float32)

runtime = ng.runtime(backend_name='CPU')
#runtime = ng.runtime(backend_name='GPU')
resnet = runtime.computation(ng_func)
#print(resnet)

def run():
  resnet(picture)

n = 100

print(timeit.timeit('run()', globals=globals(), number=n) / n * 1000, 'msec')