def simple_if(condition_val): condition = ng.constant(condition_val, dtype=np.bool) # then_body X_t = ng.parameter([2], np.float32, "X") Y_t = ng.parameter([2], np.float32, "Y") then_mul = ng.multiply(X_t, Y_t) then_body_res_1 = ng.result(then_mul) then_body = GraphBody([X_t, Y_t], [then_body_res_1]) then_body_inputs = [ TensorIteratorInvariantInputDesc(1, 0), TensorIteratorInvariantInputDesc(2, 1) ] then_body_outputs = [TensorIteratorBodyOutputDesc(0, 0)] # else_body X_e = ng.parameter([2], np.float32, "X") Y_e = ng.parameter([2], np.float32, "Y") add_e = ng.add(X_e, Y_e) else_body_res_1 = ng.result(add_e) else_body = GraphBody([X_e, Y_e], [else_body_res_1]) else_body_inputs = [ TensorIteratorInvariantInputDesc(1, 0), TensorIteratorInvariantInputDesc(2, 1) ] else_body_outputs = [TensorIteratorBodyOutputDesc(0, 0)] X = ng.constant([3, 4], dtype=np.float32) Y = ng.constant([2, 1], dtype=np.float32) if_node = ng.if_op(condition, [X, Y], (then_body, else_body), (then_body_inputs, else_body_inputs), (then_body_outputs, else_body_outputs)) relu = ng.relu(if_node) return relu
def create_simple_if_with_two_outputs(condition_val): condition = ng.constant(condition_val, dtype=np.bool) # then_body X_t = ng.parameter([], np.float32, "X") Y_t = ng.parameter([], np.float32, "Y") Z_t = ng.parameter([], np.float32, "Z") add_t = ng.add(X_t, Y_t) mul_t = ng.multiply(Y_t, Z_t) then_body_res_1 = ng.result(add_t) then_body_res_2 = ng.result(mul_t) then_body = GraphBody([X_t, Y_t, Z_t], [then_body_res_1, then_body_res_2]) then_body_inputs = [ TensorIteratorInvariantInputDesc(1, 0), TensorIteratorInvariantInputDesc(2, 1), TensorIteratorInvariantInputDesc(3, 2) ] then_body_outputs = [ TensorIteratorBodyOutputDesc(0, 0), TensorIteratorBodyOutputDesc(1, 1) ] # else_body X_e = ng.parameter([], np.float32, "X") Z_e = ng.parameter([], np.float32, "Z") W_e = ng.parameter([], np.float32, "W") add_e = ng.add(X_e, W_e) pow_e = ng.power(W_e, Z_e) else_body_res_1 = ng.result(add_e) else_body_res_2 = ng.result(pow_e) else_body = GraphBody([X_e, Z_e, W_e], [else_body_res_1, else_body_res_2]) else_body_inputs = [ TensorIteratorInvariantInputDesc(1, 0), TensorIteratorInvariantInputDesc(3, 1), TensorIteratorInvariantInputDesc(4, 2) ] else_body_outputs = [ TensorIteratorBodyOutputDesc(0, 0), TensorIteratorBodyOutputDesc(1, 1) ] X = ng.constant(15.0, dtype=np.float32) Y = ng.constant(-5.0, dtype=np.float32) Z = ng.constant(4.0, dtype=np.float32) W = ng.constant(2.0, dtype=np.float32) if_node = ng.if_op(condition, [X, Y, Z, W], (then_body, else_body), (then_body_inputs, else_body_inputs), (then_body_outputs, else_body_outputs)) return if_node
def __init__(self, *args, upsample_ratio=1, **kwargs): super().__init__(*args, **kwargs) self.pooled_heatmaps_blob_name = 'pooled_heatmaps' self.heatmaps_blob_name = 'heatmaps' self.pafs_blob_name = 'pafs' function = ng.function_from_cnn(self.net) paf = function.get_output_op(0) paf = paf.inputs()[0].get_source_output().get_node() paf.set_friendly_name(self.pafs_blob_name) heatmap = function.get_output_op(1) heatmap = heatmap.inputs()[0].get_source_output().get_node() heatmap.set_friendly_name(self.heatmaps_blob_name) # Add keypoints NMS to the network. # Heuristic NMS kernel size adjustment depending on the feature maps upsampling ratio. p = int(np.round(6 / 7 * upsample_ratio)) k = 2 * p + 1 pooled_heatmap = ng.max_pool(heatmap, kernel_shape=(k, k), pads_begin=(p, p), pads_end=(p, p), strides=(1, 1), name=self.pooled_heatmaps_blob_name) f = ng.impl.Function([ ng.result(heatmap, name=self.heatmaps_blob_name), ng.result(pooled_heatmap, name=self.pooled_heatmaps_blob_name), ng.result(paf, name=self.pafs_blob_name) ], function.get_parameters(), 'hpe') self.image_blob_name = self._get_inputs(self.net) self.net = IENetwork(ng.impl.Function.to_capsule(f)) self.exec_net = self.ie.load_network( network=self.net, device_name=self.device, num_requests=self.max_num_requests) self.requests = self.exec_net.requests self.empty_requests = deque(self.requests) self.num_joints = self.net.outputs[self.heatmaps_blob_name].shape[ 1] - 1 # The last channel is for background. target_size = self.net.input_info[ self.image_blob_name].input_data.shape[-2] self.output_scale = target_size / self.net.outputs[ self.heatmaps_blob_name].shape[-2] if self.target_size is None: self.target_size = target_size self.decoder = OpenPoseDecoder(num_joints=self.num_joints)
def create_encoder(input_shape, levels = 4): import ngraph as ng # input input_node = ng.parameter(input_shape, np.float32, name="data") padding_begin = padding_end = [0, 0] strides = [1, 1] dilations = [1, 1] input_channels = [input_shape[1]] last_output = input_node # convolution layers for i in range(levels): input_c = input_channels[-1] output_c = input_c * 2 conv_w = np.random.uniform(0, 1, [output_c, input_c, 5, 5]).astype(np.float32) conv_node = ng.convolution(last_output, conv_w, strides, padding_begin, padding_end, dilations) input_channels.append(output_c) last_output = conv_node # deconvolution layers for i in range(levels): input_c = input_channels[-2] output_c = input_channels.pop(-1) deconv_w = np.random.uniform(0, 1, [output_c, input_c, 5, 5]).astype(np.float32) deconv_node = ng.convolution_backprop_data(last_output, deconv_w, strides) last_output = deconv_node # result last_output.set_friendly_name("out") result_node = ng.result(last_output) return ng.Function(result_node, [input_node], "Encoder")
def test_query_state(device): import ngraph as ng from ngraph.impl import Function input_data = ng.parameter([5, 7], name="input_data", dtype=np.float32) rv = ng.read_value(input_data, "var_id_667") #a = ng.add(rv, input_data) node = ng.assign(rv, "var_id_667") res = ng.result(rv, "res") func = Function([res], sinks=[node], parameters=[input_data], name='test') caps = Function.to_capsule(func) net = ie.IENetwork(caps) ie_core = ie.IECore() exec_net = ie_core.load_network(network=net, device_name=device, num_requests=1) request = exec_net.requests[0] mem_states = request.query_state() mem_state = mem_states[0] with pytest.raises(ValueError) as e: ones_arr = np.ones(shape=(1, 800), dtype=np.float32) mem_state.state.buffer[:] = ones_arr assert "assignment destination is read-only" in str(e.value) assert mem_state.name == 'id_1' assert mem_state.state.tensor_desc.precision == 'FP32'
def create_function_with_memory(input_shape, data_type): input_data = ng.parameter(input_shape, name="input_data", dtype=data_type) rv = ng.read_value(input_data, "var_id_667") add = ng.add(rv, input_data, name="MemoryAdd") node = ng.assign(add, "var_id_667") res = ng.result(add, "res") func = Function(results=[res], sinks=[node], parameters=[input_data], name="name") caps = Function.to_capsule(func) return caps
def create_diff_if_with_two_outputs(condition_val): condition = ng.constant(condition_val, dtype=np.bool) # then_body X_t = ng.parameter([2], np.float32, "X") Y_t = ng.parameter([2], np.float32, "Y") mmul_t = ng.matmul(X_t, Y_t, False, False) mul_t = ng.multiply(Y_t, X_t) then_body_res_1 = ng.result(mmul_t) then_body_res_2 = ng.result(mul_t) then_body = GraphBody([X_t, Y_t], [then_body_res_1, then_body_res_2]) then_body_inputs = [ TensorIteratorInvariantInputDesc(1, 0), TensorIteratorInvariantInputDesc(2, 1) ] then_body_outputs = [ TensorIteratorBodyOutputDesc(0, 0), TensorIteratorBodyOutputDesc(1, 1) ] # else_body X_e = ng.parameter([2], np.float32, "X") Z_e = ng.parameter([], np.float32, "Z") mul_e = ng.multiply(X_e, Z_e) else_body_res_1 = ng.result(Z_e) else_body_res_2 = ng.result(mul_e) else_body = GraphBody([X_e, Z_e], [else_body_res_1, else_body_res_2]) else_body_inputs = [ TensorIteratorInvariantInputDesc(1, 0), TensorIteratorInvariantInputDesc(3, 1) ] else_body_outputs = [ TensorIteratorBodyOutputDesc(0, 0), TensorIteratorBodyOutputDesc(1, 1) ] X = ng.constant([3, 4], dtype=np.float32) Y = ng.constant([2, 1], dtype=np.float32) Z = ng.constant(4.0, dtype=np.float32) if_node = ng.if_op(condition, [X, Y, Z], (then_body, else_body), (then_body_inputs, else_body_inputs), (then_body_outputs, else_body_outputs)) return if_node
def __init__(self, ie, model_path, target_size, aspect_ratio, prob_threshold, size_divisor=8, upsample_ratio=1): super().__init__(ie, model_path) self.image_blob_name = self._get_inputs(self.net) self.pooled_heatmaps_blob_name = 'pooled_heatmaps' self.heatmaps_blob_name = 'heatmaps' self.pafs_blob_name = 'pafs' function = ng.function_from_cnn(self.net) paf = function.get_output_op(0) paf = paf.inputs()[0].get_source_output().get_node() paf.set_friendly_name(self.pafs_blob_name) heatmap = function.get_output_op(1) heatmap = heatmap.inputs()[0].get_source_output().get_node() heatmap.set_friendly_name(self.heatmaps_blob_name) # Add keypoints NMS to the network. # Heuristic NMS kernel size adjustment depending on the feature maps upsampling ratio. p = int(np.round(6 / 7 * upsample_ratio)) k = 2 * p + 1 pooled_heatmap = ng.max_pool(heatmap, kernel_shape=(k, k), pads_begin=(p, p), pads_end=(p, p), strides=(1, 1), name=self.pooled_heatmaps_blob_name) f = ng.impl.Function( [ng.result(heatmap, name=self.heatmaps_blob_name), ng.result(pooled_heatmap, name=self.pooled_heatmaps_blob_name), ng.result(paf, name=self.pafs_blob_name)], function.get_parameters(), 'hpe') self.net = IENetwork(ng.impl.Function.to_capsule(f)) self.output_scale = self.net.input_info[self.image_blob_name].input_data.shape[-2] / self.net.outputs[self.heatmaps_blob_name].shape[-2] if target_size is None: target_size = self.net.input_info[self.image_blob_name].input_data.shape[-2] self.h = (target_size + size_divisor - 1) // size_divisor * size_divisor input_width = round(target_size * aspect_ratio) self.w = (input_width + size_divisor - 1) // size_divisor * size_divisor default_input_shape = self.net.input_info[self.image_blob_name].input_data.shape input_shape = {self.image_blob_name: (default_input_shape[:-2] + [self.h, self.w])} self.logger.info('Reshape net to {}'.format(input_shape)) self.net.reshape(input_shape) num_joints = self.net.outputs[self.heatmaps_blob_name].shape[1] - 1 # The last channel is for background self.decoder = OpenPoseDecoder(num_joints, score_threshold=prob_threshold) self.size_divisor = size_divisor
def get_test_cnnnetwork(): param = ng.parameter(Shape([1, 3, 22, 22]), name="parameter") relu = ng.relu(param) res = ng.result(relu, name='result') func = Function([res], [param], 'test') caps = Function.to_capsule(func) cnnNetwork = IENetwork(caps) assert cnnNetwork != None return cnnNetwork
def ngraph_embedding(ids, vocab_embeddings, vocab_size, embedding_dim, padding_idx, sparse): """ decomposing embedding with ngraph ops. """ import ngraph as ng from ngraph import opset8 as opset from openvino.inference_engine import IECore if vocab_embeddings is None: # vocab_embeddings = np.zeros( (vocab_size, embedding_dim)).astype("float32") node_ids = ng.parameter(shape=ids.shape, name='ids', dtype=ids.dtype) node_w = ng.parameter(shape=vocab_embeddings.shape, name='w', dtype=vocab_embeddings.dtype) if padding_idx == -1: padding_idx += vocab_size if padding_idx is not None: ''' mask W ''' masked_embeddings = np.ones(vocab_embeddings.shape, dtype='int64') masked_embeddings[padding_idx, :] = 0 # mask node_mask = ng.constant(masked_embeddings, name='mask', dtype=vocab_embeddings.dtype) node_masked_w = ng.multiply(node_w, node_mask) node_axis = ng.constant([0], name='const0', dtype=np.int64) node_gather = opset.gather(data=node_masked_w if padding_idx else node_w, indices=node_ids, axis=node_axis, batch_dims=0) graph = ng.result(node_gather, name='y') parameters = [node_ids, node_w] inputs_dict = {'ids': ids, "w": vocab_embeddings} # function = ng.Function(graph, parameters, "embedding") ie_network = ng.function_to_cnn(function) ie = IECore() executable_network = ie.load_network(ie_network, 'CPU') output = executable_network.infer(inputs_dict) return output
def simple_if_without_parameters(condition_val): condition = ng.constant(condition_val, dtype=np.bool) # then_body then_constant = ng.constant(0.7, dtype=np.float) then_body_res_1 = ng.result(then_constant) then_body = GraphBody([], [then_body_res_1]) then_body_inputs = [] then_body_outputs = [TensorIteratorBodyOutputDesc(0, 0)] # else_body else_const = ng.constant(9.0, dtype=np.float) else_body_res_1 = ng.result(else_const) else_body = GraphBody([], [else_body_res_1]) else_body_inputs = [] else_body_outputs = [TensorIteratorBodyOutputDesc(0, 0)] if_node = ng.if_op(condition, [], (then_body, else_body), (then_body_inputs, else_body_inputs), (then_body_outputs, else_body_outputs)) relu = ng.relu(if_node) return relu
def test_sink_function_ctor(): input_data = ng.parameter([2, 2], name="input_data", dtype=np.float32) rv = ng.read_value(input_data, "var_id_667") add = ng.add(rv, input_data, name="MemoryAdd") node = ng.assign(add, "var_id_667") res = ng.result(add, "res") function = Function(results=[res], sinks=[node], parameters=[input_data], name="TestFunction") ordered_ops = function.get_ordered_ops() op_types = [op.get_type_name() for op in ordered_ops] assert op_types == ["Parameter", "ReadValue", "Add", "Assign", "Result"] assert len(function.get_ops()) == 5 assert function.get_output_size() == 1 assert function.get_output_op(0).get_type_name() == "Result" assert function.get_output_element_type(0) == input_data.get_element_type() assert list(function.get_output_shape(0)) == [2, 2] assert (function.get_parameters()[0].get_partial_shape()) == PartialShape([2, 2]) assert len(function.get_parameters()) == 1 assert len(function.get_results()) == 1 assert function.get_friendly_name() == "TestFunction"
def create_ngraph_function(args) -> Function: weights = np.fromfile(args.model, dtype=np.float32) weights_offset = 0 padding_begin = [0, 0] padding_end = [0, 0] # input input_shape = [64, 1, 28, 28] param_node = ngraph.parameter(input_shape, np.float32, 'Parameter') # convolution 1 conv_1_kernel_shape, conv_1_kernel_length = shape_and_length([20, 1, 5, 5]) conv_1_kernel = ngraph.constant( weights[0:conv_1_kernel_length].reshape(conv_1_kernel_shape)) weights_offset += conv_1_kernel_length conv_1_node = ngraph.convolution(param_node, conv_1_kernel, [1, 1], padding_begin, padding_end, [1, 1]) # add 1 add_1_kernel_shape, add_1_kernel_length = shape_and_length([1, 20, 1, 1]) add_1_kernel = ngraph.constant( weights[weights_offset:weights_offset + add_1_kernel_length].reshape(add_1_kernel_shape)) weights_offset += add_1_kernel_length add_1_node = ngraph.add(conv_1_node, add_1_kernel) # maxpool 1 maxpool_1_node = ngraph.max_pool(add_1_node, [2, 2], padding_begin, padding_end, [2, 2], 'ceil', None) # convolution 2 conv_2_kernel_shape, conv_2_kernel_length = shape_and_length( [50, 20, 5, 5]) conv_2_kernel = ngraph.constant( weights[weights_offset:weights_offset + conv_2_kernel_length].reshape(conv_2_kernel_shape)) weights_offset += conv_2_kernel_length conv_2_node = ngraph.convolution(maxpool_1_node, conv_2_kernel, [1, 1], padding_begin, padding_end, [1, 1]) # add 2 add_2_kernel_shape, add_2_kernel_length = shape_and_length([1, 50, 1, 1]) add_2_kernel = ngraph.constant( weights[weights_offset:weights_offset + add_2_kernel_length].reshape(add_2_kernel_shape)) weights_offset += add_2_kernel_length add_2_node = ngraph.add(conv_2_node, add_2_kernel) # maxpool 2 maxpool_2_node = ngraph.max_pool(add_2_node, [2, 2], padding_begin, padding_end, [2, 2], 'ceil', None) # reshape 1 reshape_1_dims, reshape_1_length = shape_and_length([2]) # workaround to get int64 weights from float32 ndarray w/o unnecessary copying dtype_weights = np.frombuffer(weights[weights_offset:weights_offset + 2 * reshape_1_length], dtype=np.int64) reshape_1_kernel = ngraph.constant(dtype_weights) weights_offset += 2 * reshape_1_length reshape_1_node = ngraph.reshape(maxpool_2_node, reshape_1_kernel, True) # matmul 1 matmul_1_kernel_shape, matmul_1_kernel_length = shape_and_length( [500, 800]) matmul_1_kernel = ngraph.constant( weights[weights_offset:weights_offset + matmul_1_kernel_length].reshape(matmul_1_kernel_shape)) weights_offset += matmul_1_kernel_length matmul_1_node = ngraph.matmul(reshape_1_node, matmul_1_kernel, False, True) # add 3 add_3_kernel_shape, add_3_kernel_length = shape_and_length([1, 500]) add_3_kernel = ngraph.constant( weights[weights_offset:weights_offset + add_3_kernel_length].reshape(add_3_kernel_shape)) weights_offset += add_3_kernel_length add_3_node = ngraph.add(matmul_1_node, add_3_kernel) # ReLU relu_node = ngraph.relu(add_3_node) # reshape 2 reshape_2_kernel = ngraph.constant(dtype_weights) reshape_2_node = ngraph.reshape(relu_node, reshape_2_kernel, True) # matmul 2 matmul_2_kernel_shape, matmul_2_kernel_length = shape_and_length([10, 500]) matmul_2_kernel = ngraph.constant( weights[weights_offset:weights_offset + matmul_2_kernel_length].reshape(matmul_2_kernel_shape)) weights_offset += matmul_2_kernel_length matmul_2_node = ngraph.matmul(reshape_2_node, matmul_2_kernel, False, True) # add 4 add_4_kernel_shape, add_4_kernel_length = shape_and_length([1, 10]) add_4_kernel = ngraph.constant( weights[weights_offset:weights_offset + add_4_kernel_length].reshape(add_4_kernel_shape)) weights_offset += add_4_kernel_length add_4_node = ngraph.add(matmul_2_node, add_4_kernel) # softmax softmax_axis = 1 softmax_node = ngraph.softmax(add_4_node, softmax_axis) # result result_node = ngraph.result(softmax_node) # nGraph function function = Function(result_node, [param_node], 'lenet') return function
def __init__(self, model_adapter, configuration=None, preload=False): super().__init__(model_adapter, configuration, preload=False) self.pooled_heatmaps_blob_name = 'pooled_heatmaps' self.heatmaps_blob_name = 'heatmaps' self.pafs_blob_name = 'pafs' function = ng.function_from_cnn(self.model_adapter.net) paf = function.get_output_op(0) paf_shape = paf.outputs()[0].get_shape() heatmap = function.get_output_op(1) heatmap_shape = heatmap.outputs()[0].get_shape() if len(paf_shape) != 4 and len(heatmap_shape) != 4: raise RuntimeError('OpenPose outputs must be 4-dimensional') if paf_shape[2] != heatmap_shape[2] and paf_shape[3] != heatmap_shape[ 3]: raise RuntimeError( 'Last two dimensions of OpenPose outputs must match') if paf_shape[1] * 2 == heatmap_shape[1]: paf, heatmap = heatmap, paf elif paf_shape[1] != heatmap_shape[1] * 2: raise RuntimeError( 'Size of second dimension of OpenPose of one output must be two times larger then size ' 'of second dimension of another output') paf = paf.inputs()[0].get_source_output().get_node() paf.set_friendly_name(self.pafs_blob_name) heatmap = heatmap.inputs()[0].get_source_output().get_node() heatmap.set_friendly_name(self.heatmaps_blob_name) # Add keypoints NMS to the network. # Heuristic NMS kernel size adjustment depending on the feature maps upsampling ratio. p = int(np.round(6 / 7 * self.upsample_ratio)) k = 2 * p + 1 pooled_heatmap = opset7.max_pool(heatmap, kernel_shape=(k, k), pads_begin=(p, p), pads_end=(p, p), strides=(1, 1), name=self.pooled_heatmaps_blob_name) f = ng.impl.Function([ ng.result(heatmap, name=self.heatmaps_blob_name), ng.result(pooled_heatmap, name=self.pooled_heatmaps_blob_name), ng.result(paf, name=self.pafs_blob_name) ], function.get_parameters(), 'hpe') self.model_adapter.net = IENetwork(ng.impl.Function.to_capsule(f)) self.inputs = self.model_adapter.get_input_layers() self.outputs = self.model_adapter.get_output_layers() self.output_scale = self.inputs[self.image_blob_name].shape[ -2] / self.outputs[self.heatmaps_blob_name].shape[-2] if self.target_size is None: self.target_size = self.inputs[self.image_blob_name].shape[-2] self.h = (self.target_size + self.size_divisor - 1) // self.size_divisor * self.size_divisor input_width = round(self.target_size * self.aspect_ratio) self.w = (input_width + self.size_divisor - 1) // self.size_divisor * self.size_divisor default_input_shape = self.inputs[self.image_blob_name].shape input_shape = { self.image_blob_name: (default_input_shape[:-2] + [self.h, self.w]) } self.logger.debug('\tReshape model from {} to {}'.format( default_input_shape, input_shape[self.image_blob_name])) super().reshape(input_shape) if preload: self.load() num_joints = self.outputs[self.heatmaps_blob_name].shape[ 1] - 1 # The last channel is for background self.decoder = OpenPoseDecoder( num_joints, score_threshold=self.confidence_threshold)
def create_ngraph_function(args: argparse.Namespace) -> ngraph.impl.Function: """Create a network on the fly from the source code using ngraph""" def shape_and_length(shape: list) -> typing.Tuple[list, int]: length = reduce(lambda x, y: x * y, shape) return shape, length weights = np.fromfile(args.model, dtype=np.float32) weights_offset = 0 padding_begin = padding_end = [0, 0] # input input_shape = [64, 1, 28, 28] param_node = ngraph.parameter(input_shape, np.float32, 'Parameter') # convolution 1 conv_1_kernel_shape, conv_1_kernel_length = shape_and_length([20, 1, 5, 5]) conv_1_kernel = ngraph.constant( weights[0:conv_1_kernel_length].reshape(conv_1_kernel_shape)) weights_offset += conv_1_kernel_length conv_1_node = ngraph.convolution(param_node, conv_1_kernel, [1, 1], padding_begin, padding_end, [1, 1]) # add 1 add_1_kernel_shape, add_1_kernel_length = shape_and_length([1, 20, 1, 1]) add_1_kernel = ngraph.constant( weights[weights_offset:weights_offset + add_1_kernel_length].reshape(add_1_kernel_shape), ) weights_offset += add_1_kernel_length add_1_node = ngraph.add(conv_1_node, add_1_kernel) # maxpool 1 maxpool_1_node = ngraph.max_pool(add_1_node, [2, 2], padding_begin, padding_end, [2, 2], 'ceil', None) # convolution 2 conv_2_kernel_shape, conv_2_kernel_length = shape_and_length( [50, 20, 5, 5]) conv_2_kernel = ngraph.constant( weights[weights_offset:weights_offset + conv_2_kernel_length].reshape(conv_2_kernel_shape), ) weights_offset += conv_2_kernel_length conv_2_node = ngraph.convolution(maxpool_1_node, conv_2_kernel, [1, 1], padding_begin, padding_end, [1, 1]) # add 2 add_2_kernel_shape, add_2_kernel_length = shape_and_length([1, 50, 1, 1]) add_2_kernel = ngraph.constant( weights[weights_offset:weights_offset + add_2_kernel_length].reshape(add_2_kernel_shape), ) weights_offset += add_2_kernel_length add_2_node = ngraph.add(conv_2_node, add_2_kernel) # maxpool 2 maxpool_2_node = ngraph.max_pool(add_2_node, [2, 2], padding_begin, padding_end, [2, 2], 'ceil', None) # reshape 1 reshape_1_dims, reshape_1_length = shape_and_length([2]) # workaround to get int64 weights from float32 ndarray w/o unnecessary copying dtype_weights = np.frombuffer( weights[weights_offset:weights_offset + 2 * reshape_1_length], dtype=np.int64, ) reshape_1_kernel = ngraph.constant(dtype_weights) weights_offset += 2 * reshape_1_length reshape_1_node = ngraph.reshape(maxpool_2_node, reshape_1_kernel, True) # matmul 1 matmul_1_kernel_shape, matmul_1_kernel_length = shape_and_length( [500, 800]) matmul_1_kernel = ngraph.constant( weights[weights_offset:weights_offset + matmul_1_kernel_length].reshape(matmul_1_kernel_shape), ) weights_offset += matmul_1_kernel_length matmul_1_node = ngraph.matmul(reshape_1_node, matmul_1_kernel, False, True) # add 3 add_3_kernel_shape, add_3_kernel_length = shape_and_length([1, 500]) add_3_kernel = ngraph.constant( weights[weights_offset:weights_offset + add_3_kernel_length].reshape(add_3_kernel_shape), ) weights_offset += add_3_kernel_length add_3_node = ngraph.add(matmul_1_node, add_3_kernel) # ReLU relu_node = ngraph.relu(add_3_node) # reshape 2 reshape_2_kernel = ngraph.constant(dtype_weights) reshape_2_node = ngraph.reshape(relu_node, reshape_2_kernel, True) # matmul 2 matmul_2_kernel_shape, matmul_2_kernel_length = shape_and_length([10, 500]) matmul_2_kernel = ngraph.constant( weights[weights_offset:weights_offset + matmul_2_kernel_length].reshape(matmul_2_kernel_shape), ) weights_offset += matmul_2_kernel_length matmul_2_node = ngraph.matmul(reshape_2_node, matmul_2_kernel, False, True) # add 4 add_4_kernel_shape, add_4_kernel_length = shape_and_length([1, 10]) add_4_kernel = ngraph.constant( weights[weights_offset:weights_offset + add_4_kernel_length].reshape(add_4_kernel_shape), ) weights_offset += add_4_kernel_length add_4_node = ngraph.add(matmul_2_node, add_4_kernel) # softmax softmax_axis = 1 softmax_node = ngraph.softmax(add_4_node, softmax_axis) # result result_node = ngraph.result(softmax_node) return ngraph.impl.Function(result_node, [param_node], 'lenet')