def create_diff_if_with_two_outputs(condition_val): condition = ng.constant(condition_val, dtype=np.bool) # then_body X_t = ng.parameter([2], np.float32, "X") Y_t = ng.parameter([2], np.float32, "Y") mmul_t = ng.matmul(X_t, Y_t, False, False) mul_t = ng.multiply(Y_t, X_t) then_body_res_1 = ng.result(mmul_t) then_body_res_2 = ng.result(mul_t) then_body = GraphBody([X_t, Y_t], [then_body_res_1, then_body_res_2]) then_body_inputs = [ TensorIteratorInvariantInputDesc(1, 0), TensorIteratorInvariantInputDesc(2, 1) ] then_body_outputs = [ TensorIteratorBodyOutputDesc(0, 0), TensorIteratorBodyOutputDesc(1, 1) ] # else_body X_e = ng.parameter([2], np.float32, "X") Z_e = ng.parameter([], np.float32, "Z") mul_e = ng.multiply(X_e, Z_e) else_body_res_1 = ng.result(Z_e) else_body_res_2 = ng.result(mul_e) else_body = GraphBody([X_e, Z_e], [else_body_res_1, else_body_res_2]) else_body_inputs = [ TensorIteratorInvariantInputDesc(1, 0), TensorIteratorInvariantInputDesc(3, 1) ] else_body_outputs = [ TensorIteratorBodyOutputDesc(0, 0), TensorIteratorBodyOutputDesc(1, 1) ] X = ng.constant([3, 4], dtype=np.float32) Y = ng.constant([2, 1], dtype=np.float32) Z = ng.constant(4.0, dtype=np.float32) if_node = ng.if_op(condition, [X, Y, Z], (then_body, else_body), (then_body_inputs, else_body_inputs), (then_body_outputs, else_body_outputs)) return if_node
def create_ngraph_function(args) -> Function: weights = np.fromfile(args.model, dtype=np.float32) weights_offset = 0 padding_begin = [0, 0] padding_end = [0, 0] # input input_shape = [64, 1, 28, 28] param_node = ngraph.parameter(input_shape, np.float32, 'Parameter') # convolution 1 conv_1_kernel_shape, conv_1_kernel_length = shape_and_length([20, 1, 5, 5]) conv_1_kernel = ngraph.constant( weights[0:conv_1_kernel_length].reshape(conv_1_kernel_shape)) weights_offset += conv_1_kernel_length conv_1_node = ngraph.convolution(param_node, conv_1_kernel, [1, 1], padding_begin, padding_end, [1, 1]) # add 1 add_1_kernel_shape, add_1_kernel_length = shape_and_length([1, 20, 1, 1]) add_1_kernel = ngraph.constant( weights[weights_offset:weights_offset + add_1_kernel_length].reshape(add_1_kernel_shape)) weights_offset += add_1_kernel_length add_1_node = ngraph.add(conv_1_node, add_1_kernel) # maxpool 1 maxpool_1_node = ngraph.max_pool(add_1_node, [2, 2], padding_begin, padding_end, [2, 2], 'ceil', None) # convolution 2 conv_2_kernel_shape, conv_2_kernel_length = shape_and_length( [50, 20, 5, 5]) conv_2_kernel = ngraph.constant( weights[weights_offset:weights_offset + conv_2_kernel_length].reshape(conv_2_kernel_shape)) weights_offset += conv_2_kernel_length conv_2_node = ngraph.convolution(maxpool_1_node, conv_2_kernel, [1, 1], padding_begin, padding_end, [1, 1]) # add 2 add_2_kernel_shape, add_2_kernel_length = shape_and_length([1, 50, 1, 1]) add_2_kernel = ngraph.constant( weights[weights_offset:weights_offset + add_2_kernel_length].reshape(add_2_kernel_shape)) weights_offset += add_2_kernel_length add_2_node = ngraph.add(conv_2_node, add_2_kernel) # maxpool 2 maxpool_2_node = ngraph.max_pool(add_2_node, [2, 2], padding_begin, padding_end, [2, 2], 'ceil', None) # reshape 1 reshape_1_dims, reshape_1_length = shape_and_length([2]) # workaround to get int64 weights from float32 ndarray w/o unnecessary copying dtype_weights = np.frombuffer(weights[weights_offset:weights_offset + 2 * reshape_1_length], dtype=np.int64) reshape_1_kernel = ngraph.constant(dtype_weights) weights_offset += 2 * reshape_1_length reshape_1_node = ngraph.reshape(maxpool_2_node, reshape_1_kernel, True) # matmul 1 matmul_1_kernel_shape, matmul_1_kernel_length = shape_and_length( [500, 800]) matmul_1_kernel = ngraph.constant( weights[weights_offset:weights_offset + matmul_1_kernel_length].reshape(matmul_1_kernel_shape)) weights_offset += matmul_1_kernel_length matmul_1_node = ngraph.matmul(reshape_1_node, matmul_1_kernel, False, True) # add 3 add_3_kernel_shape, add_3_kernel_length = shape_and_length([1, 500]) add_3_kernel = ngraph.constant( weights[weights_offset:weights_offset + add_3_kernel_length].reshape(add_3_kernel_shape)) weights_offset += add_3_kernel_length add_3_node = ngraph.add(matmul_1_node, add_3_kernel) # ReLU relu_node = ngraph.relu(add_3_node) # reshape 2 reshape_2_kernel = ngraph.constant(dtype_weights) reshape_2_node = ngraph.reshape(relu_node, reshape_2_kernel, True) # matmul 2 matmul_2_kernel_shape, matmul_2_kernel_length = shape_and_length([10, 500]) matmul_2_kernel = ngraph.constant( weights[weights_offset:weights_offset + matmul_2_kernel_length].reshape(matmul_2_kernel_shape)) weights_offset += matmul_2_kernel_length matmul_2_node = ngraph.matmul(reshape_2_node, matmul_2_kernel, False, True) # add 4 add_4_kernel_shape, add_4_kernel_length = shape_and_length([1, 10]) add_4_kernel = ngraph.constant( weights[weights_offset:weights_offset + add_4_kernel_length].reshape(add_4_kernel_shape)) weights_offset += add_4_kernel_length add_4_node = ngraph.add(matmul_2_node, add_4_kernel) # softmax softmax_axis = 1 softmax_node = ngraph.softmax(add_4_node, softmax_axis) # result result_node = ngraph.result(softmax_node) # nGraph function function = Function(result_node, [param_node], 'lenet') return function
def create_ngraph_function(args: argparse.Namespace) -> ngraph.impl.Function: """Create a network on the fly from the source code using ngraph""" def shape_and_length(shape: list) -> typing.Tuple[list, int]: length = reduce(lambda x, y: x * y, shape) return shape, length weights = np.fromfile(args.model, dtype=np.float32) weights_offset = 0 padding_begin = padding_end = [0, 0] # input input_shape = [64, 1, 28, 28] param_node = ngraph.parameter(input_shape, np.float32, 'Parameter') # convolution 1 conv_1_kernel_shape, conv_1_kernel_length = shape_and_length([20, 1, 5, 5]) conv_1_kernel = ngraph.constant( weights[0:conv_1_kernel_length].reshape(conv_1_kernel_shape)) weights_offset += conv_1_kernel_length conv_1_node = ngraph.convolution(param_node, conv_1_kernel, [1, 1], padding_begin, padding_end, [1, 1]) # add 1 add_1_kernel_shape, add_1_kernel_length = shape_and_length([1, 20, 1, 1]) add_1_kernel = ngraph.constant( weights[weights_offset:weights_offset + add_1_kernel_length].reshape(add_1_kernel_shape), ) weights_offset += add_1_kernel_length add_1_node = ngraph.add(conv_1_node, add_1_kernel) # maxpool 1 maxpool_1_node = ngraph.max_pool(add_1_node, [2, 2], padding_begin, padding_end, [2, 2], 'ceil', None) # convolution 2 conv_2_kernel_shape, conv_2_kernel_length = shape_and_length( [50, 20, 5, 5]) conv_2_kernel = ngraph.constant( weights[weights_offset:weights_offset + conv_2_kernel_length].reshape(conv_2_kernel_shape), ) weights_offset += conv_2_kernel_length conv_2_node = ngraph.convolution(maxpool_1_node, conv_2_kernel, [1, 1], padding_begin, padding_end, [1, 1]) # add 2 add_2_kernel_shape, add_2_kernel_length = shape_and_length([1, 50, 1, 1]) add_2_kernel = ngraph.constant( weights[weights_offset:weights_offset + add_2_kernel_length].reshape(add_2_kernel_shape), ) weights_offset += add_2_kernel_length add_2_node = ngraph.add(conv_2_node, add_2_kernel) # maxpool 2 maxpool_2_node = ngraph.max_pool(add_2_node, [2, 2], padding_begin, padding_end, [2, 2], 'ceil', None) # reshape 1 reshape_1_dims, reshape_1_length = shape_and_length([2]) # workaround to get int64 weights from float32 ndarray w/o unnecessary copying dtype_weights = np.frombuffer( weights[weights_offset:weights_offset + 2 * reshape_1_length], dtype=np.int64, ) reshape_1_kernel = ngraph.constant(dtype_weights) weights_offset += 2 * reshape_1_length reshape_1_node = ngraph.reshape(maxpool_2_node, reshape_1_kernel, True) # matmul 1 matmul_1_kernel_shape, matmul_1_kernel_length = shape_and_length( [500, 800]) matmul_1_kernel = ngraph.constant( weights[weights_offset:weights_offset + matmul_1_kernel_length].reshape(matmul_1_kernel_shape), ) weights_offset += matmul_1_kernel_length matmul_1_node = ngraph.matmul(reshape_1_node, matmul_1_kernel, False, True) # add 3 add_3_kernel_shape, add_3_kernel_length = shape_and_length([1, 500]) add_3_kernel = ngraph.constant( weights[weights_offset:weights_offset + add_3_kernel_length].reshape(add_3_kernel_shape), ) weights_offset += add_3_kernel_length add_3_node = ngraph.add(matmul_1_node, add_3_kernel) # ReLU relu_node = ngraph.relu(add_3_node) # reshape 2 reshape_2_kernel = ngraph.constant(dtype_weights) reshape_2_node = ngraph.reshape(relu_node, reshape_2_kernel, True) # matmul 2 matmul_2_kernel_shape, matmul_2_kernel_length = shape_and_length([10, 500]) matmul_2_kernel = ngraph.constant( weights[weights_offset:weights_offset + matmul_2_kernel_length].reshape(matmul_2_kernel_shape), ) weights_offset += matmul_2_kernel_length matmul_2_node = ngraph.matmul(reshape_2_node, matmul_2_kernel, False, True) # add 4 add_4_kernel_shape, add_4_kernel_length = shape_and_length([1, 10]) add_4_kernel = ngraph.constant( weights[weights_offset:weights_offset + add_4_kernel_length].reshape(add_4_kernel_shape), ) weights_offset += add_4_kernel_length add_4_node = ngraph.add(matmul_2_node, add_4_kernel) # softmax softmax_axis = 1 softmax_node = ngraph.softmax(add_4_node, softmax_axis) # result result_node = ngraph.result(softmax_node) return ngraph.impl.Function(result_node, [param_node], 'lenet')