def _populate_secondary_fs_partitions(self, db_name):
   # This directory may already exist. So we needn't mind if this call fails.
   call(["hadoop", "fs", "-mkdir", get_secondary_fs_path("/multi_fs_tests/")], shell=False)
   check_call(["hadoop", "fs", "-mkdir",
               get_secondary_fs_path("/multi_fs_tests/%s.db/" % db_name)], shell=False)
   check_call(["hadoop", "fs", "-cp", "/test-warehouse/alltypes_parquet/",
               get_secondary_fs_path("/multi_fs_tests/%s.db/" % db_name)], shell=False)
   check_call(["hadoop", "fs", "-cp", "/test-warehouse/tinytable/",
               get_secondary_fs_path("/multi_fs_tests/%s.db/" % db_name)], shell=False)
 def test_multiple_filesystems(self, vector, unique_database):
   try:
     self._populate_secondary_fs_partitions(unique_database)
     self.run_test_case('QueryTest/multiple-filesystems', vector, use_db=unique_database)
   finally:
     # We delete this from the secondary filesystem here because the database was created
     # in HDFS but the queries will create this path in the secondary FS as well. So
     # dropping the database will not delete the directory in the secondary FS.
     check_call(["hadoop", "fs", "-rm", "-r",
         get_secondary_fs_path("/multi_fs_tests/%s.db/" % unique_database)], shell=False)
Exemple #3
0
 def _populate_secondary_fs_partitions(self, db_name):
     # This directory may already exist. So we needn't mind if this call fails.
     call([
         "hadoop", "fs", "-mkdir",
         get_secondary_fs_path("/multi_fs_tests/")
     ],
          shell=False)
     check_call([
         "hadoop", "fs", "-mkdir",
         get_secondary_fs_path("/multi_fs_tests/%s.db/" % db_name)
     ],
                shell=False)
     self.filesystem_client.copy("/test-warehouse/alltypes_parquet/",
                                 get_secondary_fs_path(
                                     "/multi_fs_tests/%s.db/" % db_name),
                                 overwrite=True)
     self.filesystem_client.copy("/test-warehouse/tinytable/",
                                 get_secondary_fs_path(
                                     "/multi_fs_tests/%s.db/" % db_name),
                                 overwrite=True)
 def test_multiple_filesystems(self, vector, unique_database):
     try:
         self._populate_secondary_fs_partitions(unique_database)
         self.run_test_case('QueryTest/multiple-filesystems',
                            vector,
                            use_db=unique_database)
     finally:
         # We delete this from the secondary filesystem here because the database was created
         # in HDFS but the queries will create this path in the secondary FS as well. So
         # dropping the database will not delete the directory in the secondary FS.
         check_call([
             "hadoop", "fs", "-rm", "-r",
             get_secondary_fs_path(
                 "/multi_fs_tests/%s.db/" % unique_database)
         ],
                    shell=False)