def convert_flexible_coremodel(model_path, input_name, output_name): spec = coremltools.utils.load_spec(model_path) img_size_ranges = flexible_shape_utils.NeuralNetworkImageSizeRange() img_size_ranges.add_height_range((100, 1920)) img_size_ranges.add_width_range((100, 1920)) flexible_shape_utils.update_image_size_range(spec, feature_name=input_name, size_range=img_size_ranges) flexible_shape_utils.update_image_size_range(spec, feature_name=output_name, size_range=img_size_ranges) coremltools.utils.save_spec(spec, model_path) model_spec = coremltools.utils.load_spec(model_path) model_fp16_spec = coremltools.utils.convert_neural_network_spec_weights_to_fp16( model_spec) coremltools.utils.save_spec(model_fp16_spec, model_path)
def _set_user_inputs(proto, inputs): for input_type in inputs: shape = input_type.shape if isinstance(shape, EnumeratedShapes): if isinstance(input_type, ImageType): default_height, default_width = 0, 0 for inp in proto.description.input: if inp.name == input_type.name: default_height = inp.type.imageType.height default_width = inp.type.imageType.width break image_sizes = [] if input_type.channel_first: for s in shape.shapes: if s.shape[-2] == default_height and s.shape[ -1] == default_width: continue image_sizes.append( flexible_shape_utils.NeuralNetworkImageSize( height=s.shape[-2], width=s.shape[-1])) else: for s in shape.shapes: if s.shape[-3] == default_height and s.shape[ -2] == default_width: continue image_sizes.append( flexible_shape_utils.NeuralNetworkImageSize( height=s.shape[-3], width=s.shape[-2])) add_enumerated_image_sizes(proto, input_type.name, sizes=image_sizes) else: add_multiarray_ndshape_enumeration( proto, input_type.name, [tuple(s.shape) for s in shape.shapes]) elif isinstance(shape, Shape): shape = shape.shape # This is shape in Shape if all([ not isinstance(s, RangeDim) and not is_symbolic(s) and s > 0 for s in shape ]): continue if isinstance(input_type, ImageType): img_range = flexible_shape_utils.NeuralNetworkImageSizeRange() if input_type.channel_first: H = shape[-2] W = shape[-1] else: H = shape[-3] W = shape[-2] if isinstance(H, RangeDim): img_range.add_height_range((H.lower_bound, H.upper_bound)) elif is_symbolic(H): img_range.add_height_range((1, -1)) else: img_range.add_height_range((H, H)) if isinstance(W, RangeDim): img_range.add_width_range((W.lower_bound, W.upper_bound)) elif is_symbolic(W): img_range.add_width_range((1, -1)) else: img_range.add_width_range((W, W)) flexible_shape_utils.update_image_size_range( proto, input_type.name, img_range) else: lb = [] ub = [] for s in shape: if isinstance(s, RangeDim): lb.append(s.lower_bound) ub.append(s.upper_bound) elif is_symbolic(s): lb.append(1) ub.append(-1) else: lb.append(s) ub.append(s) set_multiarray_ndshape_range(proto, input_type.name, lower_bounds=lb, upper_bounds=ub)
model_file = open(model_in, 'rb') model_proto = onnx_pb.ModelProto() model_proto.ParseFromString(model_file.read()) print("prepare to convert...") coreml_model = convert(model_proto, preprocessing_args={ 'image_scale': (1.0 / 255.0 / 58.50182), 'blue_bias': (-109.496254 / 255.0 / 58.50182), 'green_bias': (-118.698456 / 255.0 / 58.50182), 'red_bias': (-124.68751 / 255.0 / 58.50182), 'is_bgr': True }, image_input_names=['gemfield' ]) #, image_output_names=['745']) #coreml_model = convert(model_proto, preprocessing_args= {'image_scale' : (1.0/255), 'is_bgr':True}, image_input_names=['gemfield'],image_output_names=['745']) coreml_model.save(model_out) #### import coremltools from coremltools.models.neural_network import flexible_shape_utils spec = coremltools.utils.load_spec(model_out) img_size_ranges = flexible_shape_utils.NeuralNetworkImageSizeRange() img_size_ranges.add_height_range((384, 640)) img_size_ranges.add_width_range((384, 640)) flexible_shape_utils.update_image_size_range(spec, feature_name='gemfield', size_range=img_size_ranges) #flexible_shape_utils.update_image_size_range(spec, feature_name='745', size_range=img_size_ranges) coremltools.utils.save_spec(spec, 'flex_{}'.format(model_out)) ######