def prepare_model(model_name, url): if IS_PYTHON3: params = list( inspect.signature(Runner._prepare_model_data).parameters.keys()) else: params = inspect.getargspec(Runner._prepare_model_data).args runner_class = Runner if params[0] == "self": runner_class = Runner(TensorflowBackend) return runner_class._prepare_model_data( model_test=TestCase( name="test_{}".format(model_name), model_name=model_name, url=url, model_dir=None, model=None, data_sets=None, kind='real', ))
def _roundtrip(self, model_name): model_dir = Runner(c2)._prepare_model_data( namedtuple('dummy', ['model_name'])(model_name)) pb_path = os.path.join(model_dir, 'model.pb') before_roundtrip = onnx.load(pb_path) with open(pb_path, 'rb') as pb: after_roundtrip = onnx.load_from_string(pb.read()) assert onnx.helper.printable_graph(before_roundtrip.graph) \ == onnx.helper.printable_graph(after_roundtrip.graph) with open(pb_path, 'rb') as pb: assert after_roundtrip.SerializeToString() == pb.read()
def prepare_model(model_name, url): if legacy_onnx_pre_ver(1, 5, 0): prepare_model_data = Runner._prepare_model_data else: prepare_model_data = Runner.prepare_model_data if IS_PYTHON3: params = list( inspect.signature(prepare_model_data).parameters.keys()) else: params = inspect.getargspec(prepare_model_data).args runner_class = Runner if params[0] == "self": runner_class = Runner(TensorflowBackend) if legacy_onnx_pre_ver(1, 5, 0): prepare_model_data = runner_class._prepare_model_data else: prepare_model_data = runner_class.prepare_model_data if legacy_onnx_pre_ver(1, 4, 0): tc = TestCase(name="test_{}".format(model_name), model_name=model_name, url=url, model_dir=None, model=None, data_sets=None, kind='real') else: tc = TestCase(name="test_{}".format(model_name), model_name=model_name, url=url, model_dir=None, model=None, data_sets=None, kind='real', rtol=1e-3, atol=1e-7) return prepare_model_data(model_test=tc)
def gen_model_test_coverage(schemas, f, ml): # type: (Sequence[defs.OpSchema], IO[Any], bool) -> None f.write('# Model Test Coverage\n') # Process schemas schema_dict = dict() for schema in schemas: schema_dict[schema.name] = schema # Load models from each model test using Runner._prepare_model_data # Need to grab associated nodes attrs = dict() # type: Dict[Text, Dict[Text, List[Any]]] model_paths = [] # type: List[Any] for rt in load_model_tests(kind='real'): model_dir = Runner._prepare_model_data(rt) model_paths.append(os.path.join(model_dir, 'model.onnx')) model_paths.sort() model_written = False for model_pb_path in model_paths: model = load(model_pb_path) if ml: ml_present = False for opset in model.opset_import: if opset.domain == 'ai.onnx.ml': ml_present = True if not ml_present: continue else: model_written = True f.write('## {}\n'.format(model.graph.name)) # Deconstruct model num_covered = 0 for node in model.graph.node: if node.op_type in common_covered or node.op_type in experimental_covered: num_covered += 1 # Add details of which nodes are/aren't covered # Iterate through and store each node's attributes for attr in node.attribute: if node.op_type not in attrs: attrs[node.op_type] = dict() if attr.name not in attrs[node.op_type]: attrs[node.op_type][attr.name] = [] if attr.type == AttributeProto.FLOAT: if attr.f not in attrs[node.op_type][attr.name]: attrs[node.op_type][attr.name].append(attr.f) elif attr.type == AttributeProto.INT: if attr.i not in attrs[node.op_type][attr.name]: attrs[node.op_type][attr.name].append(attr.i) elif attr.type == AttributeProto.STRING: if attr.s not in attrs[node.op_type][attr.name]: attrs[node.op_type][attr.name].append(attr.s) elif attr.type == AttributeProto.TENSOR: if attr.t not in attrs[node.op_type][attr.name]: attrs[node.op_type][attr.name].append(attr.t) elif attr.type == AttributeProto.GRAPH: if attr.g not in attrs[node.op_type][attr.name]: attrs[node.op_type][attr.name].append(attr.g) elif attr.type == AttributeProto.FLOATS: if attr.floats not in attrs[node.op_type][attr.name]: attrs[node.op_type][attr.name].append(attr.floats) elif attr.type == AttributeProto.INTS: if attr.ints not in attrs[node.op_type][attr.name]: attrs[node.op_type][attr.name].append(attr.ints) elif attr.type == AttributeProto.STRINGS: if attr.strings not in attrs[node.op_type][attr.name]: attrs[node.op_type][attr.name].append(attr.strings) elif attr.type == AttributeProto.TENSORS: if attr.tensors not in attrs[node.op_type][attr.name]: attrs[node.op_type][attr.name].append(attr.tensors) elif attr.type == AttributeProto.GRAPHS: if attr.graphs not in attrs[node.op_type][attr.name]: attrs[node.op_type][attr.name].append(attr.graphs) f.write( '\n{} has {} nodes. Of these, {} are covered by node tests ({}%)\n\n\n' .format(model.graph.name, num_covered, len(model.graph.node), 100.0 * float(num_covered) / float(len(model.graph.node)))) # Iterate through attrs, print f.write('<details>\n') f.write('<summary>nodes</summary>\n\n') for op in sorted(attrs): f.write('<details>\n') # Get total number of attributes for node schema f.write( '<summary>{}: {} out of {} attributes covered</summary>\n\n'. format(op, len(attrs[op].keys()), len(schema_dict[op].attributes))) for attribute in sorted(schema_dict[op].attributes): if attribute in attrs[op]: f.write('{}: {}\n'.format(attribute, len(attrs[op][attribute]))) else: f.write('{}: 0\n'.format(attribute)) f.write('</details>\n') f.write('</details>\n\n\n') if not model_written and ml: f.write('No model tests present for selected domain\n')