def test_wordcount_it(self): test_pipeline = TestPipeline(is_integration_test=True) # Set extra options to the pipeline for test purpose output = '/'.join([test_pipeline.get_option('output'), test_pipeline.get_option('job_name'), 'results']) pipeline_verifiers = [PipelineStateMatcher(), FileChecksumMatcher(output + '*-of-*', self.DEFAULT_CHECKSUM)] extra_opts = {'output': output, 'on_success_matcher': all_of(*pipeline_verifiers)} # Get pipeline options from command argument: --test-pipeline-options, # and start pipeline job by calling pipeline main function. wordcount.run(test_pipeline.get_full_options_as_args(**extra_opts))
def test_bigquery_tornadoes_it(self): test_pipeline = TestPipeline(is_integration_test=True) # Set extra options to the pipeline for test purpose output_table = ('BigQueryTornadoesIT' '.monthly_tornadoes_%s' % int(round(time.time() * 1000))) query = 'SELECT month, tornado_count FROM [%s]' % output_table pipeline_verifiers = [PipelineStateMatcher(), BigqueryMatcher( project=test_pipeline.get_option('project'), query=query, checksum=self.DEFAULT_CHECKSUM)] extra_opts = {'output': output_table, 'on_success_matcher': all_of(*pipeline_verifiers)} # Get pipeline options from command argument: --test-pipeline-options, # and start pipeline job by calling pipeline main function. bigquery_tornadoes.run( test_pipeline.get_full_options_as_args(**extra_opts))
def test_bigquery_tornadoes_it(self): test_pipeline = TestPipeline(is_integration_test=True) # Set extra options to the pipeline for test purpose output_table = ('BigQueryTornadoesIT' '.monthly_tornadoes_%s' % int(round(time.time() * 1000))) query = 'SELECT month, tornado_count FROM [%s]' % output_table pipeline_verifiers = [PipelineStateMatcher(), BigqueryMatcher( project=test_pipeline.get_option('project'), query=query, checksum=self.DEFAULT_CHECKSUM)] extra_opts = {'output': output_table, 'on_success_matcher': all_of(*pipeline_verifiers)} # Get pipeline options from command argument: --test-pipeline-options, # and start pipeline job by calling pipeline main function. bigquery_tornadoes.run( test_pipeline.get_full_options_as_args(**extra_opts))
def test_wordcount_it(self): test_pipeline = TestPipeline(is_integration_test=True) # Set extra options to the pipeline for test purpose output = '/'.join([ test_pipeline.get_option('output'), test_pipeline.get_option('job_name'), 'results' ]) pipeline_verifiers = [ PipelineStateMatcher(), FileChecksumMatcher(output + '*-of-*', self.DEFAULT_CHECKSUM) ] extra_opts = { 'output': output, 'on_success_matcher': all_of(*pipeline_verifiers) } # Get pipeline options from command argument: --test-pipeline-options, # and start pipeline job by calling pipeline main function. wordcount.run(test_pipeline.get_full_options_as_args(**extra_opts))
def test_append_extra_options(self): test_pipeline = TestPipeline() for case in self.EXTRA_OPT_CASES: opt_list = test_pipeline.get_full_options_as_args(**case['options']) self.assertListEqual(sorted(opt_list), sorted(case['expected']))
def test_create_test_pipeline_options(self): test_pipeline = TestPipeline(argv=self.TEST_CASE['options']) test_options = PipelineOptions(test_pipeline.get_full_options_as_args()) self.assertDictContainsSubset(self.TEST_CASE['expected_dict'], test_options.get_all_options())
def test_empty_option_args_parsing(self): test_pipeline = TestPipeline() self.assertListEqual([], test_pipeline.get_full_options_as_args())
def test_option_args_parsing(self): test_pipeline = TestPipeline(argv=self.TEST_CASE['options']) self.assertListEqual( sorted(test_pipeline.get_full_options_as_args()), sorted(self.TEST_CASE['expected_list']))