def test_firstcal_metrics_run(self): # get argument object a = utils.get_metrics_ArgumentParser('firstcal_metrics') if DATA_PATH not in sys.path: sys.path.append(DATA_PATH) arg0 = "--std_cut=0.5" arg1 = "--extension=.firstcal_metrics.json" arg2 = "--metrics_path={}".format( os.path.join(DATA_PATH, 'test_output')) arguments = ' '.join([arg0, arg1, arg2]) # Test runing with no files cmd = ' '.join([arguments, '']) args = a.parse_args(cmd.split()) history = cmd self.assertRaises(AssertionError, firstcal_metrics.firstcal_metrics_run, args.files, args, history) # Test running with file filename = os.path.join(DATA_PATH, 'zen.2457555.50099.yy.HH.uvcA.first.calfits') dest_file = os.path.join( DATA_PATH, 'test_output', 'zen.2457555.50099.yy.HH.uvcA.first.calfits.' + 'firstcal_metrics.json') if os.path.exists(dest_file): os.remove(dest_file) cmd = ' '.join([arguments, filename]) args = a.parse_args(cmd.split()) history = cmd firstcal_metrics.firstcal_metrics_run(args.files, args, history) self.assertTrue(os.path.exists(dest_file)) os.remove(dest_file)
def test_ant_metrics_run_no_metrics(): """Test an argument is raised if no metrics are set to True.""" # get arguments a = utils.get_metrics_ArgumentParser('ant_metrics') if DATA_PATH not in sys.path: sys.path.append(DATA_PATH) arg1 = "--crossCut=5" arg2 = "--deadCut=5" arg3 = "--extension=.ant_metrics.hdf5" arg4 = "--metrics_path={}".format(os.path.join(DATA_PATH, 'test_output')) arg5 = "--vis_format=miriad" arg6 = "--alwaysDeadCut=10" arg7 = "--skip_mean_vij" arg8 = "--skip_red_corr" arg9 = "--skip_cross_pols" arguments = ' '.join( [arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9]) xx_file = os.path.join(DATA_PATH, 'zen.2458002.47754.xx.HH.uvA') dest_file = os.path.join(DATA_PATH, 'test_output', 'zen.2458002.47754.HH.uvA.ant_metrics.hdf5') if os.path.exists(dest_file): os.remove(dest_file) cmd = ' '.join([arguments, xx_file]) args = a.parse_args(cmd.split()) history = cmd pols = list(args.pol.split(',')) pytest.raises(AssertionError, ant_metrics.ant_metrics_run, args.files, pols, args.crossCut, args.deadCut, args.alwaysDeadCut, args.metrics_path, args.extension, args.vis_format, args.verbose, history, args.run_mean_vij, args.run_red_corr, args.run_cross_pols)
def test_run_ant_metrics_no_files(): # get argument object a = utils.get_metrics_ArgumentParser('ant_metrics') if DATA_PATH not in sys.path: sys.path.append(DATA_PATH) arg1 = "--crossCut=5" arg2 = "--deadCut=5" arg3 = "--extension=.ant_metrics.hdf5" arg4 = "--metrics_path={}".format(os.path.join(DATA_PATH, 'test_output')) arg5 = "--vis_format=miriad" arg6 = "--alwaysDeadCut=10" arg7 = "--run_mean_vij" arg8 = "--run_red_corr" arg9 = "--run_cross_pols" arguments = ' '.join( [arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9]) # test running with no files cmd = ' '.join([arguments, '']) args = a.parse_args(cmd.split()) pols = list(args.pol.split(',')) history = cmd pytest.raises(AssertionError, ant_metrics.ant_metrics_run, args.files, pols, args.crossCut, args.deadCut, args.alwaysDeadCut, args.metrics_path, args.extension, args.vis_format, args.verbose, history, args.run_mean_vij, args.run_red_corr, args.run_cross_pols)
def test_run_ant_metrics_one_file(): a = utils.get_metrics_ArgumentParser('ant_metrics') if DATA_PATH not in sys.path: sys.path.append(DATA_PATH) arg1 = "--crossCut=5" arg2 = "--deadCut=5" arg3 = "--extension=.ant_metrics.hdf5" arg4 = "--metrics_path={}".format(os.path.join(DATA_PATH, 'test_output')) arg5 = "--vis_format=miriad" arg6 = "--alwaysDeadCut=10" arg7 = "--run_mean_vij" arg8 = "--run_red_corr" arg9 = "--run_cross_pols" arguments = ' '.join( [arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9]) # test running with a lone file lone_file = os.path.join(DATA_PATH, 'zen.2457698.40355.xx.HH.uvcAA') cmd = ' '.join([arguments, lone_file]) args = a.parse_args(cmd.split()) history = cmd pols = list(args.pol.split(',')) # this test raises a warning, then fails... uvtest.checkWarnings(pytest.raises, [ AssertionError, ant_metrics.ant_metrics_run, args.files, pols, args.crossCut, args.deadCut, args.alwaysDeadCut, args.metrics_path, args.extension, args.vis_format, args.verbose, history, args.run_mean_vij, args.run_red_corr, args.run_cross_pols ], nwarnings=1, message='Could not find')
def test_get_metrics_ArgumentParser_xrfi_run(): a = utils.get_metrics_ArgumentParser('xrfi_run') # First try defaults - test a few of them args = a.parse_args('') assert args.kt_size == 8 assert args.sig_init == 6.0 assert args.ex_ants is None # try to set something args = a.parse_args(['--sig_adj', '3.0']) assert args.sig_adj == 3.0
def test_get_metrics_ArgumentParser_day_threshold_run(): a = utils.get_metrics_ArgumentParser('day_threshold_run') # First try defaults - test a few of them args = a.parse_args(['fooey']) assert args.nsig_f_adj == 3.0 assert args.nsig_f == 7.0 assert args.data_files == ['fooey'] # try to set something args = a.parse_args(['--nsig_t', '3.0', 'fooey']) assert args.nsig_t == 3.0
def test_get_metrics_ArgumentParser_firstcal_metrics(): a = utils.get_metrics_ArgumentParser('firstcal_metrics') # First try defaults - test a few of them args = a.parse_args('') nt.assert_equal(args.std_cut, 0.5) nt.assert_equal(args.extension, '.firstcal_metrics.json') nt.assert_equal(args.metrics_path, '') # try to set something args = a.parse_args(['--extension', 'foo']) nt.assert_equal(args.extension, 'foo')
def test_get_metrics_ArgumentParser_firstcal_metrics(): a = utils.get_metrics_ArgumentParser('firstcal_metrics') # First try defaults - test a few of them args = a.parse_args('') assert args.std_cut == 0.5 assert args.extension == '.firstcal_metrics.hdf5' assert args.metrics_path == '' # try to set something args = a.parse_args(['--extension', 'foo']) assert args.extension == 'foo'
def test_get_metrics_ArgumentParser_xrfi_apply(): a = utils.get_metrics_ArgumentParser('xrfi_apply') # First try defaults - test a few of them args = a.parse_args('') nt.assert_equal(args.infile_format, 'miriad') nt.assert_equal(args.extension, 'R') nt.assert_equal(args.flag_file, None) nt.assert_equal(args.output_uvflag, True) # try to set something args = a.parse_args(['--waterfalls', 'a,g']) nt.assert_equal(args.waterfalls, 'a,g')
def test_get_metrics_ArgumentParser_omnical_metrics(): a = utils.get_metrics_ArgumentParser('omnical_metrics') # First try defaults - test a few of them args = a.parse_args('') assert args.fc_files is None assert args.phs_std_cut == 0.3 assert args.extension == '.omni_metrics.json' assert args.metrics_path == '' # try to set something args = a.parse_args(['--extension', 'foo']) assert args.extension, 'foo'
def test_get_metrics_ArgumentParser_omnical_metrics(): a = utils.get_metrics_ArgumentParser('omnical_metrics') # First try defaults - test a few of them args = a.parse_args('') nt.assert_equal(args.fc_files, None) nt.assert_equal(args.phs_std_cut, 0.3) nt.assert_equal(args.extension, '.omni_metrics.json') nt.assert_equal(args.metrics_path, '') # try to set something args = a.parse_args(['--extension', 'foo']) nt.assert_equal(args.extension, 'foo')
def test_get_metrics_ArgumentParser_ant_metrics(): a = utils.get_metrics_ArgumentParser('ant_metrics') # First try defaults - test a few of them args = a.parse_args('') nt.assert_equal(args.crossCut, 5.0) nt.assert_equal(args.alwaysDeadCut, 10.0) nt.assert_equal(args.metrics_path, '') nt.assert_equal(args.verbose, True) # try to set something args = a.parse_args(['--extension', 'foo']) nt.assert_equal(args.extension, 'foo')
def test_get_metrics_ArgumentParser_ant_metrics(): a = utils.get_metrics_ArgumentParser('ant_metrics') # First try defaults - test a few of them args = a.parse_args('') assert args.crossCut == 5.0 assert args.alwaysDeadCut == 10.0 assert args.metrics_path == '' assert args.verbose is True # try to set something args = a.parse_args(['--extension', 'foo']) assert args.extension == 'foo'
def test_get_metrics_ArgumentParser_xrfi_h1c_run(): a = utils.get_metrics_ArgumentParser('xrfi_h1c_run') # First try defaults - test a few of them args = a.parse_args('') assert args.infile_format == 'miriad' assert args.summary_ext == 'flag_summary.h5' assert args.algorithm == 'xrfi_simple' assert args.nsig_df == 6.0 assert args.px_threshold == 0.2 # try to set something args = a.parse_args(['--px_threshold', '4.0']) assert args.px_threshold == 4.0
def test_get_metrics_ArgumentParser_xrfi_apply(): a = utils.get_metrics_ArgumentParser('xrfi_apply') # First try defaults - test a few of them args = a.parse_args('') assert args.infile_format == 'miriad' assert args.extension == 'R' assert args.flag_file is None assert args.output_uvflag is True assert args.output_uvflag_ext == 'flags.h5' # try to set something args = a.parse_args(['--waterfalls', 'a,g']) assert args.waterfalls == 'a,g'
def test_get_metrics_ArgumentParser_xrfi_run(): a = utils.get_metrics_ArgumentParser('xrfi_run') # First try defaults - test a few of them args = a.parse_args('') nt.assert_equal(args.infile_format, 'miriad') nt.assert_equal(args.summary_ext, '.flag_summary.npz') nt.assert_equal(args.algorithm, 'xrfi_simple') nt.assert_equal(args.nsig_df, 6.0) nt.assert_equal(args.px_threshold, 0.2) # try to set something args = a.parse_args(['--px_threshold', '4.0']) nt.assert_equal(args.px_threshold, 4.0)
def test_get_metrics_ArgumentParser_delay_xrfi_run(): a = utils.get_metrics_ArgumentParser('delay_xrfi_run') # First try defaults - test a few of them args = a.parse_args('') nt.assert_equal(args.infile_format, 'miriad') nt.assert_equal(args.algorithm, 'xrfi_simple') nt.assert_equal(args.nsig_dt, 6.0) nt.assert_equal(args.px_threshold, 0.2) nt.assert_equal(args.filename, None) nt.assert_equal(args.tol, 1e-7) nt.assert_equal(args.waterfalls, None) # try to set something args = a.parse_args(['--waterfalls', 'a,g']) nt.assert_equal(args.waterfalls, 'a,g')
def test_get_metrics_ArgumentParser_delay_xrfi_h1c_idr2_1_run(): a = utils.get_metrics_ArgumentParser('delay_xrfi_h1c_idr2_1_run') # First try defaults - test a few of them args = a.parse_args('') assert args.infile_format == 'miriad' assert args.algorithm == 'xrfi_simple' assert args.nsig_dt == 6.0 assert args.px_threshold == 0.2 assert args.filename is None assert args.tol == 1e-7 assert args.waterfalls is None # try to set something args = a.parse_args(['--waterfalls', 'a,g']) assert args.waterfalls == 'a,g'
def test_ant_metrics_run_only_red_corr(): # get arguments a = utils.get_metrics_ArgumentParser('ant_metrics') if DATA_PATH not in sys.path: sys.path.append(DATA_PATH) arg0 = "-p xx,yy,xy,yx" arg1 = "--crossCut=5" arg2 = "--deadCut=5" arg3 = "--extension=.ant_metrics.hdf5" arg4 = "--metrics_path={}".format(os.path.join(DATA_PATH, 'test_output')) arg5 = "--vis_format=miriad" arg6 = "--alwaysDeadCut=10" arg7 = "--skip_mean_vij" arg8 = "--run_red_corr" arg9 = "--skip_cross_pols" arguments = ' '.join( [arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9]) xx_file = os.path.join(DATA_PATH, 'zen.2458002.47754.xx.HH.uvA') dest_file = os.path.join(DATA_PATH, 'test_output', 'zen.2458002.47754.HH.ant_metrics.hdf5') if os.path.exists(dest_file): os.remove(dest_file) cmd = ' '.join([arguments, xx_file]) args = a.parse_args(cmd.split()) history = cmd pols = list(args.pol.split(',')) if os.path.exists(dest_file): os.remove(dest_file) ant_metrics.ant_metrics_run(args.files, pols, args.crossCut, args.deadCut, args.alwaysDeadCut, args.metrics_path, args.extension, args.vis_format, args.verbose, history=history, run_mean_vij=args.run_mean_vij, run_red_corr=args.run_red_corr, run_cross_pols=args.run_cross_pols) assert os.path.exists(dest_file) os.remove(dest_file)
def test_firstcal_metrics_run(self): # get arg parse a = utils.get_metrics_ArgumentParser('omnical_metrics') if DATA_PATH not in sys.path: sys.path.append(DATA_PATH) # test w/ no file arguments = '' cmd = ' '.join([arguments, '']) args = a.parse_args(cmd.split()) history = cmd self.assertRaises(AssertionError, omnical_metrics.omnical_metrics_run, args.files, args, history) # test w/ no fc_file arguments = '' cmd = ' '.join([arguments, self.oc_file]) args = a.parse_args(cmd.split()) history = cmd outfile = self.oc_file + '.omni_metrics.json' if os.path.isfile(outfile): os.remove(outfile) omnical_metrics.omnical_metrics_run(args.files, args, history) outfile = self.oc_file + '.omni_metrics.json' self.assertTrue(os.path.isfile(outfile)) os.remove(outfile) # test w/ extension arguments = '--extension=.omni.json' cmd = ' '.join([arguments, self.oc_file]) args = a.parse_args(cmd.split()) history = cmd outfile = self.oc_file + '.omni.json' if os.path.isfile(outfile): os.remove(outfile) omnical_metrics.omnical_metrics_run(args.files, args, history) self.assertTrue(os.path.isfile(outfile)) os.remove(outfile) # test w/ metrics_path arguments = '--metrics_path={}'.format(self.out_dir) cmd = ' '.join([arguments, self.oc_file]) args = a.parse_args(cmd.split()) history = cmd outfile = os.path.join(self.out_dir, self.oc_basename + '.omni_metrics.json') if os.path.isfile(outfile): os.remove(outfile) omnical_metrics.omnical_metrics_run(args.files, args, history) self.assertTrue(os.path.isfile(outfile)) os.remove(outfile) # test w/ options arguments = '--fc_files={0} --no_bandcut --phs_std_cut=0.5 --chisq_std_zscore_cut=4.0'.format( self.fc_file) cmd = ' '.join([arguments, self.oc_file]) args = a.parse_args(cmd.split()) history = cmd outfile = self.oc_file + '.omni_metrics.json' if os.path.isfile(outfile): os.remove(outfile) omnical_metrics.omnical_metrics_run(args.files, args, history) self.assertTrue(os.path.isfile(outfile)) os.remove(outfile) # test make plots arguments = '--fc_files={0} --make_plots'.format(self.fc_file) cmd = ' '.join([arguments, self.oc_file]) args = a.parse_args(cmd.split()) history = cmd omnical_metrics.omnical_metrics_run(args.files, args, history) outfile = self.oc_file + '.omni_metrics.json' outpng1 = self.oc_file + '.chisq_std.png' outpng2 = self.oc_file + '.phs_std.png' outpng3 = self.oc_file + '.phs_ft.png' outpng4 = self.oc_file + '.phs_hist.png' self.assertTrue(os.path.isfile(outfile)) self.assertTrue(os.path.isfile(outpng1)) self.assertTrue(os.path.isfile(outpng2)) self.assertTrue(os.path.isfile(outpng3)) self.assertTrue(os.path.isfile(outpng4)) os.remove(outfile) os.remove(outpng1) os.remove(outpng2) os.remove(outpng3) os.remove(outpng4)
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2018 the HERA Project # Licensed under the MIT License from hera_qm import utils from hera_qm import ant_metrics import sys a = utils.get_metrics_ArgumentParser('ant_metrics') args = a.parse_args() history = ' '.join(sys.argv) if args.pol == '': args.pol = ['xx', 'yy', 'xy', 'xy'] else: args.pol = list(args.pol.split(',')) ant_metrics.ant_metrics_run(args.files, pols=args.pol, crossCut=args.crossCut, deadCut=args.deadCut, alwaysDeadCut=args.alwaysDeadCut, metrics_path=args.metrics_path, extension=args.extension, vis_format=args.vis_format, verbose=args.verbose, history=history, run_mean_vij=args.run_mean_vij, run_red_corr=args.run_red_corr, run_cross_pols=args.run_cross_pols)
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2019 the HERA Project # Licensed under the MIT License from hera_qm import utils from hera_qm import firstcal_metrics import sys ap = utils.get_metrics_ArgumentParser('firstcal_metrics') args = ap.parse_args() files = args.files history = ' '.join(sys.argv) firstcal_metrics.firstcal_metrics_run(files, args, history)
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2019 the HERA Project # Licensed under the MIT License import sys from hera_qm import utils from hera_qm import xrfi ap = utils.get_metrics_ArgumentParser('xrfi_run') args = ap.parse_args() history = ' '.join(sys.argv) xrfi.xrfi_run(args.ocalfits_file, args.acalfits_file, args.model_file, args.data_file, history, xrfi_path=args.xrfi_path, kt_size=args.kt_size, kf_size=args.kf_size, sig_init=args.sig_init, sig_adj=args.sig_adj, ex_ants=args.ex_ants, ant_str=args.ant_str, metrics_file=args.metrics_file, clobber=args.clobber)
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2019 the HERA Project # Licensed under the MIT License import sys from hera_qm import utils from hera_qm import xrfi ap = utils.get_metrics_ArgumentParser('day_threshold_run') args = ap.parse_args() history = ' '.join(sys.argv) if args.run_if_first is None or sorted( args.data_files)[0] == args.run_if_first: xrfi.day_threshold_run(args.data_files, history, nsig_f=args.nsig_f, nsig_t=args.nsig_t, nsig_f_adj=args.nsig_f_adj, nsig_t_adj=args.nsig_t_adj, clobber=args.clobber) else: print( sorted(args.data_files)[0], 'is not', args.run_if_first, '...skipping.')
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2018 the HERA Project # Licensed under the MIT License import sys import numpy as np from hera_qm import utils as qm_utils from hera_qm import xrfi from hera_cal import delay_filter from hera_cal import io from pyuvdata import UVData a = qm_utils.get_metrics_ArgumentParser('delay_xrfi_run') args = a.parse_args() filename = args.filename history = ' '.join(sys.argv) # Read data, apply delay filter, update UVData object uv = UVData() uv.read_miriad(filename) # apply a priori waterfall flags if args.waterfalls is not None: waterfalls = args.waterfalls.split(',') if len(waterfalls) > 0: xrfi.flag_apply(waterfalls, uv, force_pol=True) # set kwargs kwargs = {} if args.window == 'tukey': kwargs['alpha'] = args.alpha
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2019 the HERA Project # Licensed under the MIT License import sys import numpy as np from hera_qm import utils as qm_utils from hera_qm import xrfi from hera_cal import delay_filter from hera_cal import io from pyuvdata import UVData ap = qm_utils.get_metrics_ArgumentParser('delay_xrfi_h1c_idr2_1_run') args = ap.parse_args() filename = args.filename history = ' '.join(sys.argv) # Read data, apply delay filter, update UVData object uv = UVData() uv.read_miriad(filename) # apply a priori waterfall flags if args.waterfalls is not None: waterfalls = args.waterfalls.split(',') if len(waterfalls) > 0: xrfi.flag_apply(waterfalls, uv, force_pol=True) # set kwargs kwargs = {} if args.window == 'tukey': kwargs['alpha'] = args.alpha
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2018 the HERA Project # Licensed under the MIT License import sys from hera_qm import utils from hera_qm import xrfi a = utils.get_metrics_ArgumentParser('xrfi_apply') args = a.parse_args() filename = args.filename history = ' '.join(sys.argv) xrfi.xrfi_apply(filename, history, infile_format=args.infile_format, xrfi_path=args.xrfi_path, outfile_format=args.outfile_format, extension=args.extension, overwrite=args.overwrite, flag_file=args.flag_file, waterfalls=args.waterfalls, output_uvflag=args.output_uvflag, output_uvflag_ext=args.output_uvflag_ext)
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2019 the HERA Project # Licensed under the MIT License from hera_qm import utils from hera_qm import omnical_metrics import sys ap = utils.get_metrics_ArgumentParser('omnical_metrics') args = ap.parse_args() files = args.files history = ' '.join(sys.argv) omnical_metrics.omnical_metrics_run(files, args, history)
def test_firstcal_metrics_run(omnicalrun_data): pytest.importorskip('matplotlib.pyplot') # get arg parse a = utils.get_metrics_ArgumentParser('omnical_metrics') if DATA_PATH not in sys.path: sys.path.append(DATA_PATH) # test w/ no file arguments = '' cmd = ' '.join([arguments, '']) args = a.parse_args(cmd.split()) history = cmd pytest.raises(AssertionError, omnical_metrics.omnical_metrics_run, args.files, args, history) # test w/ no fc_file arguments = '' cmd = ' '.join([arguments, omnicalrun_data.oc_file]) args = a.parse_args(cmd.split()) history = cmd outfile = utils.strip_extension( omnicalrun_data.oc_file) + '.omni_metrics.json' if os.path.isfile(outfile): os.remove(outfile) omnical_metrics.omnical_metrics_run(args.files, args, history) assert os.path.isfile(outfile) os.remove(outfile) # test w/ extension arguments = '--extension=.omni.json' cmd = ' '.join([arguments, omnicalrun_data.oc_file]) args = a.parse_args(cmd.split()) history = cmd outfile = utils.strip_extension(omnicalrun_data.oc_file) + '.omni.json' if os.path.isfile(outfile): os.remove(outfile) omnical_metrics.omnical_metrics_run(args.files, args, history) assert os.path.isfile(outfile) os.remove(outfile) # test w/ metrics_path arguments = '--metrics_path={}'.format(omnicalrun_data.out_dir) cmd = ' '.join([arguments, omnicalrun_data.oc_file]) args = a.parse_args(cmd.split()) history = cmd outfile = os.path.join( omnicalrun_data.out_dir, utils.strip_extension(omnicalrun_data.oc_basename) + '.omni_metrics.json') if os.path.isfile(outfile): os.remove(outfile) omnical_metrics.omnical_metrics_run(args.files, args, history) assert os.path.isfile(outfile) os.remove(outfile) # test w/ options arguments = '--fc_files={0} --no_bandcut --phs_std_cut=0.5 --chisq_std_zscore_cut=4.0'.format( omnicalrun_data.fc_file) cmd = ' '.join([arguments, omnicalrun_data.oc_file]) args = a.parse_args(cmd.split()) history = cmd outfile = utils.strip_extension( omnicalrun_data.oc_file) + '.omni_metrics.json' if os.path.isfile(outfile): os.remove(outfile) omnical_metrics.omnical_metrics_run(args.files, args, history) assert os.path.isfile(outfile) os.remove(outfile) # test make plots arguments = '--fc_files={0} --make_plots'.format(omnicalrun_data.fc_file) cmd = ' '.join([arguments, omnicalrun_data.oc_file]) args = a.parse_args(cmd.split()) history = cmd omnical_metrics.omnical_metrics_run(args.files, args, history) basename = utils.strip_extension(omnicalrun_data.oc_file) outfile = basename + '.omni_metrics.json' outpng1 = basename + '.chisq_std.png' outpng2 = basename + '.phs_std.png' outpng3 = basename + '.phs_ft.png' outpng4 = basename + '.phs_hist.png' assert os.path.isfile(outfile) assert os.path.isfile(outpng1) assert os.path.isfile(outpng2) assert os.path.isfile(outpng3) assert os.path.isfile(outpng4) os.remove(outfile) os.remove(outpng1) os.remove(outpng2) os.remove(outpng3) os.remove(outpng4)