def test_batch_case(self): from pyflink.shell import b_env, bt_env, FileSystem, OldCsv, DataTypes, Schema # example begin import tempfile import os import shutil sink_path = tempfile.gettempdir() + '/batch.csv' if os.path.exists(sink_path): if os.path.isfile(sink_path): os.remove(sink_path) else: shutil.rmtree(sink_path) b_env.set_parallelism(1) t = bt_env.from_elements([(1, 'hi', 'hello'), (2, 'hi', 'hello')], ['a', 'b', 'c']) bt_env.connect(FileSystem().path(sink_path))\ .with_format(OldCsv() .field_delimiter(',') .field("a", DataTypes.BIGINT()) .field("b", DataTypes.STRING()) .field("c", DataTypes.STRING()))\ .with_schema(Schema() .field("a", DataTypes.BIGINT()) .field("b", DataTypes.STRING()) .field("c", DataTypes.STRING()))\ .create_temporary_table("batch_sink") exec_insert_table(t.select("a + 1, b, c"), "batch_sink") # verify code, do not copy these code to shell.py with open(sink_path, 'r') as f: lines = f.read() self.assertEqual(lines, '2,hi,hello\n' + '3,hi,hello\n')
################################################################################ # test pyflink shell environment from pyflink.shell import b_env, bt_env, FileSystem, OldCsv, DataTypes, Schema import tempfile import os import shutil sink_path = tempfile.gettempdir() + '/batch.csv' if os.path.exists(sink_path): if os.path.isfile(sink_path): os.remove(sink_path) else: shutil.rmtree(sink_path) b_env.set_parallelism(1) t = bt_env.from_elements([(1, 'hi', 'hello'), (2, 'hi', 'hello')], ['a', 'b', 'c']) bt_env.connect(FileSystem().path(sink_path)) \ .with_format(OldCsv() .field_delimiter(',') .field("a", DataTypes.BIGINT()) .field("b", DataTypes.STRING()) .field("c", DataTypes.STRING())) \ .with_schema(Schema() .field("a", DataTypes.BIGINT()) .field("b", DataTypes.STRING()) .field("c", DataTypes.STRING())) \ .create_temporary_table("batch_sink") t.select("a + 1, b, c").execute_insert("batch_sink").get_job_client().get_job_execution_result().result() with open(sink_path, 'r') as f: