def test_check_output(self): if core.is_compiled_with_cuda(): use_gpu = True self.check_output_with_option(use_gpu, flatten=True, quant=True) self.assertTrue( PassVersionChecker.IsCompatible( 'quant_conv2d_dequant_fuse_pass'))
def test_check_output(self): if core.is_compiled_with_cuda(): use_gpu = True # TRT output shape of fc is (1, 1000, 1, 1). To compare the output value only, flatten the results. self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass'))
def test_check_output(self): # There is no cpu pass for transpose_flatten_concat_fuse if core.is_compiled_with_cuda(): use_gpu = True self.check_output_with_option(use_gpu) PassVersionChecker.IsCompatible('transpose_flatten_concat_fuse_pass')
def test_check_output(self): if core.is_compiled_with_cuda(): use_gpu = True self.check_output_with_option(use_gpu) self.assertTrue( PassVersionChecker.IsCompatible( 'conv_elementwise_add_act_fuse_pass'))
def test_check_output(self): #self.quant_dequant() if core.is_compiled_with_cuda(): use_gpu = True self.check_output_with_option( use_gpu, atol=1, flatten=False, rtol=1e-1) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass'))
def test_check_output(self): if os.path.exists(self.path + "_opt_cache"): shutil.rmtree(self.path + "_opt_cache") if core.is_compiled_with_cuda(): use_gpu = True self.check_output_with_option(use_gpu) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass'))
def test_check_output(self): use_gpu = [False] if core.is_compiled_with_cuda(): use_gpu.append(True) for i in range(len(use_gpu)): self.check_output_with_option(use_gpu[i]) self.assertTrue(PassVersionChecker.IsCompatible('fc_fuse_pass'))
def check_output(self): if core.is_compiled_with_cuda(): use_gpu = True atol = 1e-5 if self.trt_parameters.precision == AnalysisConfig.Precision.Half: atol = 2e-2 self.check_output_with_option(use_gpu, atol, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass'))
def check_op_version(self): status = True for pass_name in self.passes: if pass_name not in self.available_passes_in_framework: continue if not PassVersionChecker.IsCompatible(pass_name): self.fail_log('{} version check failed.'.format(pass_name)) status = False return status
def check_output(self, remove_cache=False): if remove_cache and os.path.exists(self.path + "_opt_cache"): shutil.rmtree(self.path + "_opt_cache") if core.is_compiled_with_cuda(): use_gpu = True atol = 1e-5 if self.trt_parameters.precision == AnalysisConfig.Precision.Half: atol = 2e-2 self.check_output_with_option(use_gpu, atol, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass'))
def test_check_output(self): if core.is_compiled_with_cuda(): use_gpu = True if os.path.exists(self.path + "_opt_cache"): shutil.rmtree(self.path + "_opt_cache") if self.trt_parameters.precision == AnalysisConfig.Precision.Float32: self.check_output_with_option(use_gpu) else: self.check_output_with_option(use_gpu, 1e-3) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass'))
def test_check_output(self): use_gpu = False self.check_output_with_option(use_gpu) self.assertTrue( PassVersionChecker.IsCompatible('repeated_fc_relu_fuse_pass'))
def test_check_output(self): use_gpu = False self.check_output_with_option(use_gpu) self.assertTrue( PassVersionChecker.IsCompatible('squared_mat_sub_fuse_pass'))
def test_check_output(self): use_gpu = False self.check_output_with_option(use_gpu) self.assertTrue( PassVersionChecker.IsCompatible("layer_norm_fuse_pass"))
def test_check_output(self): if core.is_compiled_with_cuda(): use_gpu = True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass'))
def test_check_output(self): self.check_output() self.assertTrue( PassVersionChecker.IsCompatible('shuffle_channel_detect_pass'))
def test_check_output(self): self.check_output() self.assertTrue(PassVersionChecker.IsCompatible(self.pass_name))
def test_check_output(self): self.check_output() self.assertTrue( PassVersionChecker.IsCompatible('conv_eltwiseadd_bn_fuse_pass'))
def test_check_output(self): use_gpu = False self.check_output_with_option(use_gpu) self.assertTrue( PassVersionChecker.IsCompatible("conv_bias_mkldnn_fuse_pass"))
def test_compatible(self): self.assertTrue( PassVersionChecker.IsCompatible('seq_concat_fc_fuse_pass'))
def test_pass_compatible(self): self.assertTrue(PassVersionChecker.IsCompatible(self.pass_name))
def test_check_output(self): self.check_output() self.assertTrue( PassVersionChecker.IsCompatible('conv_affine_channel_fuse_pass'))
def test_check_output(self): use_gpu = False self.check_output_with_option(use_gpu, flatten=True) self.assertTrue(PassVersionChecker.IsCompatible('cpu_bfloat16_pass'))
def test_check_output(self): self.check_output() self.assertTrue( PassVersionChecker.IsCompatible('seqconv_eltadd_relu_fuse_pass'))