def DumpJSTest(model, example, js_fd): assert model.compiled if model.dumped: return # check: types for t in model.GetTypes(): if t.type not in Configuration.support_types and \ t.type not in str(Configuration.support_types).lower(): print(" skip not support types: %s (%s)" % (example.examplesName, t.type), file=sys.stderr) return else: # use "TENSOR_FLOAT32" to support "TENSOR_FLOAT16" if t.type == "TENSOR_FLOAT16": t.type = "TENSOR_FLOAT32" # use "FLOAT32" to support "FLOAT16" if t.type == "FLOAT16": t.type = "FLOAT32" # support layout: NHWC for p in example.model.GetParameters(): if p.type.type == "BOOL": if p.GetValueAsNumpy() == False: if p in model.operands: model.operands.remove(p) for op in model.operations: if p in op.ins: op.ins.remove(p) else: print(" skip not support layout: %s (%s)" % (example.examplesName, p.GetValueAsNumpy()), file=sys.stderr) return # check data type for operation in model.operations: if operation.optype not in Configuration.check_list.keys() and \ operation.optype not in str(Configuration.check_list.keys()).lower(): print(" skip not support operation code: %s (%s)" % (example.examplesName, operation.optype), file=sys.stderr) return else: for inputIndex in range(len(example.model.GetInputs())): t = example.model.GetInputs()[inputIndex].type c = Configuration.check_list[operation.optype]["inputs"] if inputIndex in c: if t.type not in c[inputIndex]["types"]: print(" skip not support input(type): %s (%s)" % (example.examplesName, t.type), file=sys.stderr) return if len(t.dimensions) not in c[inputIndex]["dimensions"]: print( " skip not support input(dimension): %s (%s)" % (example.examplesName, t.dimensions), file=sys.stderr) return else: print(" skip not support input: %s (%s)" % (example.examplesName, example.model.GetInputs()[inputIndex]), file=sys.stderr) return for parameterIndex in range(len(example.model.GetParameters())): t = example.model.GetParameters()[parameterIndex].type c = Configuration.check_list[operation.optype]["inputs"] pii = parameterIndex + len(example.model.GetInputs()) if pii in c: if t.type not in c[pii]["types"]: print(" skip not support parameter(type): %s (%s)" % (example.examplesName, t.type), file=sys.stderr) return if len(t.dimensions) not in c[pii]["dimensions"]: print( " skip not support parameter(dimension): %s (%s)" % (example.examplesName, t.dimensions), file=sys.stderr) return else: print(" skip not support parameter: %s (%s)" % (example.examplesName, example.model.GetParameters()[parameterIndex]), file=sys.stderr) return for outputIndex in range(len(example.model.GetOutputs())): t = example.model.GetOutputs()[outputIndex].type c = Configuration.check_list[operation.optype]["outputs"] if outputIndex in c: if t.type not in c[outputIndex]["types"]: print(" skip not support output(type): %s (%s)" % (example.examplesName, t.type), file=sys.stderr) return if len(t.dimensions) not in c[outputIndex]["dimensions"]: print( " skip not support output(dimension): %s (%s)" % (example.examplesName, t.dimensions), file=sys.stderr) return else: print(" skip not support output: %s (%s)" % (example.examplesName, example.model.GetOutputs()[outputIndex]), file=sys.stderr) return # check: input and output and values for inputFeedDict, outputFeedDict in example.feedDicts: for inputOpName in example.model.GetInputs(): # check input value is None if len(inputFeedDict[inputOpName]) is 0: # For "TRANSPOSE": if perm is not given, it is set to (n-1...0) if model.operations[0].optype == "TRANSPOSE": perm_value = [] perm_dimensions = [] for num in range(len(model.operands[0].type.dimensions)): perm_value.insert(0, num) # set "perm" dimensions model.operands[1].type.dimensions.clear() model.operands[1].type.dimensions.append( len(model.operands[0].type.dimensions)) # set "perm" value inputFeedDict[inputOpName] = perm_value else: print(" skip input value is None: %s (%s - %s)" % (example.examplesName, model.operations[0].optype, inputOpName), file=sys.stderr) return # check: compatible dimensions if model.operations[0].optype == "MUL" or model.operations[ 0].optype == "ADD": if model.operands[0].type != model.operands[1].type: if len(model.operands[0].type.dimensions) != 1 or len( model.operands[1].type.dimensions) != 1: print( " skip not support input(compatible dimensions): %s (%s - %s)" % (example.examplesName, model.operands[0].type.dimensions, model.operands[1].type.dimensions), file=sys.stderr) return # check: scale if model.operations[0].optype == "CONV_2D" or model.operations[ 0].optype == "DEPTHWISE_CONV_2D": if model.operands[0].type.type == "TENSOR_QUANT8_ASYMM": if example.model.GetOutputs()[0].type.scale <= ( model.operands[0].type.scale * model.operands[1].type.scale): print( " skip not support output(scale): %s (%s <= (%s * %s))" % (example.examplesName, example.model.GetOutputs()[0].type.scale, model.operands[0].type.scale, model.operands[1].type.scale), file=sys.stderr) return # set js test names test_name = "" test_index = "" args = "options" test_info = tg.FileNames.specName.capitalize().replace("_", " ") test_name_array = test_info.split(" ") if test_name_array[-1].isdigit(): if test_name_array[-2] is not None and str( test_name_array[-2]) != "v1": test_name = " ".join(test_name_array[:-1]) test_index = test_name_array[-1] else: test_name = " ".join(test_name_array[:-1]) test_name = str(test_name) + "_" + test_name_array[-1] else: test_name = test_info print("", file=js_fd) for inputFeedDict, outputFeedDict in example.feedDicts: if Configuration.single_example_flag: if test_index == "": print( " it('check result for %s example', async function() {" % test_name, file=js_fd) else: print( " it('check result for %s example/%s', async function() {" % (test_name, test_index), file=js_fd) else: if test_index == "": print( " it('check result for %s example-%s', async function() {" % (test_name, Configuration.example_count), file=js_fd) else: print( " it('check result for %s example/%s-%s', async function() {" % (test_name, test_index, Configuration.example_count), file=js_fd) print(" // For '%s' example: %s" % (test_name, example.examplesName), file=js_fd) print(" let model = await nn.createModel(%s);" % args, file=js_fd) print(" let operandIndex = 0;\n", file=js_fd) # set input and output values for inputOpName in example.model.GetInputs(): print(" let %s_value = %s;" % (inputOpName, inputFeedDict[inputOpName]), file=js_fd) for outputOpName in example.model.GetOutputs(): print(" let %s_expect = %s;" % (outputOpName, outputFeedDict[outputOpName]), file=js_fd) print("", file=js_fd) # set input and output types for t in model.GetTypes(): if t.scale == 0.0 and t.zeroPoint == 0 and t.extraParams is None: if t.type in ["FLOAT32", "INT32", "UINT32"]: typeDef = " let %s = {type: nn.%s};" % (t, t.type) else: typeDef = " let %s = {type: nn.%s, dimensions: [%s]};\n let %s_length = product(%s.dimensions);" % ( t, t.type, t.GetDimensionsString()[1:-1], t, t) else: if t.extraParams is None or t.extraParams.hide: typeDef = " let %s = {type: nn.%s, dimensions: [%s], scale: %s, zeroPoint: %d};\n let %s_length = product(%s.dimensions);" % ( t, t.type, t.GetDimensionsString()[1:-1], tg.PrettyPrintAsFloat(t.scale)[:-1], t.zeroPoint, t, t) else: typeDef = " let %s = {type: nn.%s, dimensions: [%s], scale: %s, zeroPoint: %d, %s};\n let %s_length = product(%s.dimensions);" % ( t, t.type, t.GetDimensionsString()[1:-1], tg.PrettyPrintAsFloat(t.scale)[:-1], t.zeroPoint, t.extraParams.GetConstructor(), t, t) print(typeDef, file=js_fd) print("", file=js_fd) # set operands for op in model.operands: print(" let %s = operandIndex++;" % op, file=js_fd) print(" model.addOperand(%s);" % op.type, file=js_fd) print("", file=js_fd) # set other inputs value(support only one input) if len(example.model.GetInputs()) > 1: for inputIndex in range(len(example.model.GetInputs())): if inputIndex is not 0: inputType = example.model.GetInputs()[inputIndex].type.type str_array = typeToArray(inputType) print(" model.setOperandValue(%s, new %s(%s_value));" % (example.model.GetInputs()[inputIndex], str_array, example.model.GetInputs()[inputIndex]), file=js_fd) print("", file=js_fd) # set parameter for p in model.GetParameters(): parameterType = p.type.type str_array = typeToArray(parameterType) print(" model.setOperandValue(%s, new %s([%s]));" % (p, str_array, GetJointStr(p.value)), file=js_fd) # set operations for op in model.operations: print(" model.addOperation(nn.%s, [%s], [%s]);" % (op.optype, tg.GetJointStr(op.ins), tg.GetJointStr(op.outs)), file=js_fd) print("", file=js_fd) # identify inputs and outputs print(" model.identifyInputsAndOutputs([%s], [%s]);" % (example.model.GetInputs()[0], tg.GetJointStr(example.model.GetOutputs())), file=js_fd) print(" await model.finish();", file=js_fd) print("", file=js_fd) # compiling model print(" let compilation = await model.createCompilation();", file=js_fd) print(" compilation.setPreference(getPreferenceCode(%s.prefer));" % args, file=js_fd) print(" await compilation.finish();", file=js_fd) print("", file=js_fd) # executing model print(" let execution = await compilation.createExecution();", file=js_fd) print("", file=js_fd) # set input and output inputType = example.model.GetInputs()[0].type.type str_array = typeToArray(inputType) print(" let %s_input = new %s(%s_value);" % (example.model.GetInputs()[0], str_array, example.model.GetInputs()[0]), file=js_fd) print(" execution.setInput(0, %s_input);" % example.model.GetInputs()[0], file=js_fd) for outputIndex in range(len(example.model.GetOutputs())): outputType = example.model.GetOutputs()[outputIndex].type.type str_array = typeToArray(outputType) print(" let %s_output = new %s(%s_length);" % (example.model.GetOutputs()[outputIndex], str_array, example.model.GetOutputs()[outputIndex].type), file=js_fd) print(" execution.setOutput(%s, %s_output);" % (outputIndex, example.model.GetOutputs()[outputIndex]), file=js_fd) print("", file=js_fd) print(" await execution.startCompute();", file=js_fd) print("", file=js_fd) # assert output for output in example.model.GetOutputs(): print(" for (let i = 0; i < %s_length; ++i) {" % output.type, file=js_fd) print( " assert.isTrue(almostEqualCTS(%s_output[i], %s_expect[i]));" % (output, output), file=js_fd) print(" }", file=js_fd) print(" });", file=js_fd) Configuration.example_count = Configuration.example_count + 1 model.dumped = True
def DumpMixedType(operands, feedDict): supportedTensors = [ "DIMENSIONS", "TENSOR_FLOAT32", "TENSOR_INT32", "TENSOR_QUANT8_ASYMM", "TENSOR_OEM_BYTE", "TENSOR_QUANT16_SYMM", "TENSOR_FLOAT16", "TENSOR_BOOL8", "TENSOR_QUANT8_SYMM_PER_CHANNEL", "TENSOR_QUANT16_ASYMM", "TENSOR_QUANT8_SYMM", ] typedMap = {t: [] for t in supportedTensors} FeedAndGet = lambda op, d: op.Feed(d).GetListInitialization() # group the operands by type for operand in operands: try: typedMap[operand.type.type].append(FeedAndGet(operand, feedDict)) typedMap["DIMENSIONS"].append( "{%d, {%s}}" % (operand.index, GetJointStr(operand.dimensions))) except KeyError as e: traceback.print_exc() sys.exit("Cannot dump tensor of type {}".format(operand.type.type)) mixedTypeTemplate = """\ {{ // See tools/test_generator/include/TestHarness.h:MixedTyped // int -> Dimensions map .operandDimensions = {{{dimensions_map}}}, // int -> FLOAT32 map .float32Operands = {{{float32_map}}}, // int -> INT32 map .int32Operands = {{{int32_map}}}, // int -> QUANT8_ASYMM map .quant8AsymmOperands = {{{uint8_map}}}, // int -> QUANT16_SYMM map .quant16SymmOperands = {{{int16_map}}}, // int -> FLOAT16 map .float16Operands = {{{float16_map}}}, // int -> BOOL8 map .bool8Operands = {{{bool8_map}}}, // int -> QUANT8_SYMM_PER_CHANNEL map .quant8ChannelOperands = {{{int8_map}}}, // int -> QUANT16_ASYMM map .quant16AsymmOperands = {{{uint16_map}}}, // int -> QUANT8_SYMM map .quant8SymmOperands = {{{quant8_symm_map}}}, }}""" return mixedTypeTemplate.format( dimensions_map=tg.GetJointStr(typedMap.get("DIMENSIONS", [])), float32_map=tg.GetJointStr(typedMap.get("TENSOR_FLOAT32", [])), int32_map=tg.GetJointStr(typedMap.get("TENSOR_INT32", [])), uint8_map=tg.GetJointStr( typedMap.get("TENSOR_QUANT8_ASYMM", []) + typedMap.get("TENSOR_OEM_BYTE", [])), int16_map=tg.GetJointStr(typedMap.get("TENSOR_QUANT16_SYMM", [])), float16_map=tg.GetJointStr(typedMap.get("TENSOR_FLOAT16", [])), int8_map=tg.GetJointStr( typedMap.get("TENSOR_QUANT8_SYMM_PER_CHANNEL", [])), bool8_map=tg.GetJointStr(typedMap.get("TENSOR_BOOL8", [])), uint16_map=tg.GetJointStr(typedMap.get("TENSOR_QUANT16_ASYMM", [])), quant8_symm_map=tg.GetJointStr(typedMap.get("TENSOR_QUANT8_SYMM", [])))