def test_recovery_failed_nobackup(self): # Create the coverage cov, dset = self.get_cov() if cov._persistence_layer.master_manager.storage_type() != 'hdf': # TODO: Check for something Cassandra related self.assertTrue(True) else: cov_pth = cov.persistence_dir cov.close() # Analyze the valid coverage dr = CoverageDoctor(cov_pth, 'dprod', dset) dr_result = dr.analyze() # Corrupt the Master File fo = open(cov._persistence_layer.master_manager.file_path, "wb") fo.write('Junk') fo.close() # Corrupt the lon Parameter file fo = open(cov._persistence_layer.parameter_metadata['lon'].file_path, "wb") fo.write('Junk') fo.close() corrupt_res = dr.analyze(reanalyze=True) self.assertTrue(corrupt_res.is_corrupt) # Repair the metadata files with self.assertRaises(ValueError): temp_cov_dir = dr.repair(reanalyze=True, backup=False, copy_over=False, keep_temp=False) log.info('Temporary Coverage Directory: {0}', temp_cov_dir) self.assertFalse(os.path.exists(temp_cov_dir)) nofix_res = dr.analyze(reanalyze=True) self.assertTrue(nofix_res.is_corrupt)
def test_coverage_recovery(self): # Create the coverage dp_id, stream_id, route, stream_def_id, dataset_id = self.load_data_product() self.populate_dataset(dataset_id, 36) dset = self.dataset_management.read_dataset(dataset_id) dprod = self.dpsc_cli.read_data_product(dp_id) cov = DatasetManagementService._get_simplex_coverage(dataset_id) cov_pth = cov.persistence_dir cov.close() # Analyze the valid coverage dr = CoverageDoctor(cov_pth, dprod, dset) dr_result = dr.analyze() # Get original values (mock) orig_cov = AbstractCoverage.load(cov_pth) time_vals_orig = orig_cov.get_time_values() # TODO: Destroy the metadata files # TODO: RE-analyze coverage # TODO: Should be corrupt, take action to repair if so # Repair the metadata files dr.repair_metadata() # TODO: Re-analyze fixed coverage fixed_cov = AbstractCoverage.load(cov_pth) self.assertIsInstance(fixed_cov, AbstractCoverage) time_vals_fixed = fixed_cov.get_time_values() self.assertTrue(np.array_equiv(time_vals_orig, time_vals_fixed))
def test_coverage_recovery(self): # Create the coverage cov, dset = self.get_cov() cov_pth = cov.persistence_dir cov.close() # Analyze the valid coverage dr = CoverageDoctor(cov_pth, 'dprod', dset) dr_result = dr.analyze() # TODO: Turn these into meaningful Asserts self.assertEqual(len(dr_result.get_brick_corruptions()), 0) self.assertEqual(len(dr_result.get_brick_size_ratios()), 6) self.assertEqual(len(dr_result.get_corruptions()), 0) self.assertEqual(len(dr_result.get_master_corruption()), 0) self.assertEqual(len(dr_result.get_param_corruptions()), 0) self.assertEqual(len(dr_result.get_param_size_ratios()), 3) self.assertEqual(len(dr_result.get_master_size_ratio()), 1) self.assertEqual(len(dr_result.get_size_ratios()), 10) self.assertEqual(dr_result.master_status[1], 'NORMAL') self.assertFalse(dr_result.is_corrupt) self.assertEqual(dr_result.param_file_count, 3) self.assertEqual(dr_result.brick_file_count, 6) self.assertEqual(dr_result.total_file_count, 10) # Get original values (mock) orig_cov = AbstractCoverage.load(cov_pth) time_vals_orig = orig_cov.get_time_values() orig_cov.close() # Corrupt the Master File fo = open(cov._persistence_layer.master_manager.file_path, "wb") fo.write('Junk') fo.close() # Corrupt the lon Parameter file fo = open(cov._persistence_layer.parameter_metadata['lon'].file_path, "wb") fo.write('Junk') fo.close() corrupt_res = dr.analyze(reanalyze=True) self.assertTrue(corrupt_res.is_corrupt) # Repair the metadata files dr.repair(reanalyze=True) fixed_res = dr.analyze(reanalyze=True) self.assertFalse(fixed_res.is_corrupt) fixed_cov = AbstractCoverage.load(cov_pth) self.assertIsInstance(fixed_cov, AbstractCoverage) time_vals_fixed = fixed_cov.get_time_values() fixed_cov.close() self.assertTrue(np.array_equiv(time_vals_orig, time_vals_fixed))
def test_coverage_recovery(self): # Create the coverage cov, dset = self.get_cov() if cov._persistence_layer.master_manager.storage_type() != 'hdf': # TODO: Check for something Cassandra related self.assertTrue(True) else: cov_pth = cov.persistence_dir cov.close() # Analyze the valid coverage dr = CoverageDoctor(cov_pth, 'dprod', dset) dr_result = dr.analyze() # TODO: Turn these into meaningful Asserts self.assertEqual(len(dr_result.get_brick_corruptions()), 0) self.assertEqual(len(dr_result.get_brick_size_ratios()), 6) self.assertEqual(len(dr_result.get_corruptions()), 0) self.assertEqual(len(dr_result.get_master_corruption()), 0) self.assertEqual(len(dr_result.get_param_corruptions()), 0) self.assertEqual(len(dr_result.get_param_size_ratios()), 3) self.assertEqual(len(dr_result.get_master_size_ratio()), 1) self.assertEqual(len(dr_result.get_size_ratios()), 10) self.assertEqual(dr_result.master_status[1], 'NORMAL') self.assertFalse(dr_result.is_corrupt) self.assertEqual(dr_result.param_file_count, 3) self.assertEqual(dr_result.brick_file_count, 6) self.assertEqual(dr_result.total_file_count, 10) # Get original values (mock) orig_cov = AbstractCoverage.load(cov_pth) time_vals_orig = orig_cov.get_time_values() orig_cov.close() # Corrupt the Master File fo = open(cov._persistence_layer.master_manager.file_path, "wb") fo.write('Junk') fo.close() # Corrupt the lon Parameter file fo = open(cov._persistence_layer.parameter_metadata['lon'].file_path, "wb") fo.write('Junk') fo.close() corrupt_res = dr.analyze(reanalyze=True) self.assertTrue(corrupt_res.is_corrupt) # Repair the metadata files dr.repair(reanalyze=True) fixed_res = dr.analyze(reanalyze=True) self.assertFalse(fixed_res.is_corrupt) fixed_cov = AbstractCoverage.load(cov_pth) self.assertIsInstance(fixed_cov, AbstractCoverage) time_vals_fixed = fixed_cov.get_time_values() fixed_cov.close() self.assertTrue(np.array_equiv(time_vals_orig, time_vals_fixed))
def test_coverage_recovery(self): # Create the coverage dp_id, stream_id, route, stream_def_id, dataset_id = self.load_data_product() self.populate_dataset(dataset_id, 36) dset = self.dataset_management.read_dataset(dataset_id) dprod = self.dpsc_cli.read_data_product(dp_id) cov = DatasetManagementService._get_simplex_coverage(dataset_id) cov_pth = cov.persistence_dir cov.close() # Analyze the valid coverage dr = CoverageDoctor(cov_pth, dprod, dset) dr_result = dr.analyze() # TODO: Turn these into meaningful Asserts self.assertEqual(len(dr_result.get_brick_corruptions()), 0) self.assertEqual(len(dr_result.get_brick_size_ratios()), 8) self.assertEqual(len(dr_result.get_corruptions()), 0) self.assertEqual(len(dr_result.get_master_corruption()), 0) self.assertEqual(len(dr_result.get_param_corruptions()), 0) self.assertEqual(len(dr_result.get_param_size_ratios()), 64) self.assertEqual(len(dr_result.get_master_size_ratio()), 1) self.assertEqual(len(dr_result.get_size_ratios()), 73) self.assertEqual(dr_result.master_status[1], 'NORMAL') self.assertFalse(dr_result.is_corrupt) self.assertEqual(dr_result.param_file_count, 64) self.assertEqual(dr_result.brick_file_count, 8) self.assertEqual(dr_result.total_file_count, 73) # Get original values (mock) orig_cov = AbstractCoverage.load(cov_pth) time_vals_orig = orig_cov.get_time_values() orig_cov.close() # Corrupt the Master File fo = open(cov._persistence_layer.master_manager.file_path, "wb") fo.write('Junk') fo.close() # Corrupt the lon Parameter file fo = open(cov._persistence_layer.parameter_metadata['lon'].file_path, "wb") fo.write('Junk') fo.close() corrupt_res = dr.analyze(reanalyze=True) self.assertTrue(corrupt_res.is_corrupt) # Repair the metadata files dr.repair(reanalyze=True) fixed_res = dr.analyze(reanalyze=True) self.assertFalse(fixed_res.is_corrupt) fixed_cov = AbstractCoverage.load(cov_pth) self.assertIsInstance(fixed_cov, AbstractCoverage) time_vals_fixed = fixed_cov.get_time_values() fixed_cov.close() self.assertTrue(np.array_equiv(time_vals_orig, time_vals_fixed))
def test_recovery_failed_nobackup(self): # Create the coverage cov, dset = self.get_cov() cov_pth = cov.persistence_dir cov.close() # Analyze the valid coverage dr = CoverageDoctor(cov_pth, 'dprod', dset) dr_result = dr.analyze() # Corrupt the Master File fo = open(cov._persistence_layer.master_manager.file_path, "wb") fo.write('Junk') fo.close() # Corrupt the lon Parameter file fo = open(cov._persistence_layer.parameter_metadata['lon'].file_path, "wb") fo.write('Junk') fo.close() corrupt_res = dr.analyze(reanalyze=True) self.assertTrue(corrupt_res.is_corrupt) # Repair the metadata files with self.assertRaises(ValueError): temp_cov_dir = dr.repair(reanalyze=True, backup=False, copy_over=False, keep_temp=False) log.info('Temporary Coverage Directory: {0}', temp_cov_dir) self.assertFalse(os.path.exists(temp_cov_dir)) nofix_res = dr.analyze(reanalyze=True) self.assertTrue(nofix_res.is_corrupt)
'unable to open file (File accessibilty: Unable to open file)', 'unable to open file (File accessability: Unable to open file)'] if fs in ex.message: # The view coverage couldn't load it's underlying reference coverage cpth = ex.message[len(fs):-1] for err in accessibility_errors: if err in ex.message: cpth = self.get_coverage_path(dataset_id) self.pause_ingestion(self.get_stream_id(dataset_id)) break else: log.critical("Unmatched error: %s", ex.message) raise # Return the CoverageDoctor instance return CoverageDoctor(cpth, dprod_obj, dset_obj) def run_coverage_doctor(self, dataset_id, data_product_id=None): log.error('Running coverage doctor') dr = self.get_coverage_doctor(dataset_id, data_product_id=data_product_id) if dr.analyze().is_corrupt: dr.repair() else: log.error("Repair Not Necessary") return "Repair Not Necessary" if not dr.analyze(reanalyze=True).is_corrupt: log.error("Repair Successful") return "Repair Successful" else:
def test_coverage_recovery(self): # Create the coverage dp_id, stream_id, route, stream_def_id, dataset_id = self.load_data_product( ) self.populate_dataset(dataset_id, 36) dset = self.dataset_management.read_dataset(dataset_id) dprod = self.dpsc_cli.read_data_product(dp_id) cov = DatasetManagementService._get_simplex_coverage(dataset_id) cov_pth = cov.persistence_dir cov.close() num_params = len(cov.list_parameters()) num_bricks = 8 total = num_params + num_bricks + 1 # Analyze the valid coverage dr = CoverageDoctor(cov_pth, dprod, dset) dr_result = dr.analyze() # TODO: Turn these into meaningful Asserts self.assertEqual(len(dr_result.get_brick_corruptions()), 0) self.assertEqual(len(dr_result.get_brick_size_ratios()), num_bricks) self.assertEqual(len(dr_result.get_corruptions()), 0) self.assertEqual(len(dr_result.get_master_corruption()), 0) self.assertEqual(len(dr_result.get_param_corruptions()), 0) self.assertEqual(len(dr_result.get_param_size_ratios()), num_params) self.assertEqual(len(dr_result.get_master_size_ratio()), 1) self.assertEqual(len(dr_result.get_size_ratios()), total) self.assertEqual(dr_result.master_status[1], 'NORMAL') self.assertFalse(dr_result.is_corrupt) self.assertEqual(dr_result.param_file_count, num_params) self.assertEqual(dr_result.brick_file_count, num_bricks) self.assertEqual(dr_result.total_file_count, total) # Get original values (mock) orig_cov = AbstractCoverage.load(cov_pth) time_vals_orig = orig_cov.get_time_values() orig_cov.close() # Corrupt the Master File fo = open(cov._persistence_layer.master_manager.file_path, "wb") fo.write('Junk') fo.close() # Corrupt the lon Parameter file fo = open(cov._persistence_layer.parameter_metadata['lon'].file_path, "wb") fo.write('Junk') fo.close() corrupt_res = dr.analyze(reanalyze=True) self.assertTrue(corrupt_res.is_corrupt) # Repair the metadata files dr.repair(reanalyze=True) fixed_res = dr.analyze(reanalyze=True) self.assertFalse(fixed_res.is_corrupt) fixed_cov = AbstractCoverage.load(cov_pth) self.assertIsInstance(fixed_cov, AbstractCoverage) time_vals_fixed = fixed_cov.get_time_values() fixed_cov.close() self.assertTrue(np.array_equiv(time_vals_orig, time_vals_fixed))