def test_sites_only_propagation(self): """ Make sure propagation works correclty with limited tables provided """ directory = os.path.join(WD, 'data_files', '3_0', 'McMurdo') con = cb.Contribution(directory, dmodel=DMODEL, read_tables=['sites'], custom_filenames={ 'locations': '_locations.txt', 'samples': '_samples.txt' }) self.assertEqual(['sites'], list(con.tables.keys())) con.propagate_all_tables_info() self.assertEqual(sorted(['samples', 'sites', 'locations']), sorted(con.tables.keys())) for fname in ['_locations.txt', '_samples.txt']: os.remove(os.path.join(directory, fname)) # con = cb.Contribution(directory, dmodel=DMODEL, read_tables=['sites'], custom_filenames={ 'locations': '_locations.txt', 'samples': '_samples.txt' }) samp_df = pd.DataFrame(index=['mc01b'], columns=['sample', 'site'], data=[['mc01b', 'fake site']]) samp_df = cb.MagicDataFrame(dtype='samples', df=samp_df) con.tables['samples'] = samp_df self.assertEqual('fake site', con.tables['samples'].df.loc['mc01b', 'site']) con.propagate_all_tables_info() self.assertEqual(sorted(['samples', 'sites', 'locations']), sorted(con.tables.keys())) # mc01b does not update b/c sample_df value trumps value from sites table self.assertEqual('fake site', con.tables['samples'].df.loc['mc01b', 'site']) # however, additional samples should be added self.assertIn('mc01d', con.tables['samples'].df.index) for fname in ['_locations.txt', '_samples.txt']: os.remove(os.path.join(directory, fname)) # con = cb.Contribution(self.directory, dmodel=DMODEL, read_tables=['sites'], custom_filenames={ 'locations': '_locations.txt', 'samples': '_samples.txt' }) self.assertEqual(['sites'], list(con.tables.keys())) con.propagate_all_tables_info() self.assertEqual(sorted(['sites', 'locations']), sorted(con.tables.keys())) for fname in ['_locations.txt']: # no samples available this time os.remove(os.path.join(self.directory, fname))
def test_get_min_max_lat_lon(self): site_container = cb.MagicDataFrame(dtype='sites') site_container.add_row('site1', { 'lat': 10, 'lon': 4, 'location': 'location1' }) site_container.add_row('site2', { 'lat': 10.2, 'lon': 5, 'location': 'location1' }) site_container.add_row('site3', { 'lat': 20, 'lon': '15', 'location': 'location2' }) site_container.add_row('site4', {'lat': None, 'location': 'location1'}) loc_container = cb.MagicDataFrame( dtype='locations', columns=['lat_n', 'lat_s', 'lon_e', 'lon_w', 'location']) site_container.df loc_container.add_row('location1', {}) loc_container.add_row('location2', {}) con = cb.Contribution(".", read_tables=['images']) con.tables['sites'] = site_container con.tables['locations'] = loc_container con.get_min_max_lat_lon() self.assertEqual(10., con.tables['locations'].df.loc['location1', 'lat_s']) self.assertEqual(15., con.tables['locations'].df.loc['location2', 'lon_e']) os.remove(os.path.join(".", "locations.txt"))
def test_find_missing_items(self): for table in self.con.tables: self.assertEqual(set(), self.con.find_missing_items(table)) self.con.tables['sites'].delete_row(0) missing = self.con.find_missing_items('sites') self.assertEqual(set(['hz05']), missing) con = cb.Contribution(PROJECT_WD) for table in con.tables: self.assertEqual(set(), con.find_missing_items(table)) directory = os.path.join(WD, 'data_files', '3_0', 'McMurdo') con = cb.Contribution(directory) for table in con.tables: self.assertEqual(set(), con.find_missing_items(table))
def test_add_empty_magic_table(self): con = cb.Contribution(self.directory, read_tables=['specimens'], dmodel=DMODEL) self.assertEqual(set(['specimens']), set(con.tables.keys())) con.add_empty_magic_table('samples') self.assertEqual(set(['specimens', 'samples']), set(con.tables.keys())) self.assertEqual(0, len(con.tables['samples'].df))
def on_clear(self, event): """ initialize window to allow user to empty the working directory """ dia = pmag_menu_dialogs.ClearWD(self.parent, self.parent.WD) clear = dia.do_clear() if clear: self.parent.contribution = cb.Contribution( self.parent.WD, dmodel=self.parent.contribution.data_model)
def test_propagate_average_up(self): directory = os.path.join('data_files', '3_0', 'McMurdo') con = cb.Contribution(directory, read_tables=['sites', 'samples']) con.tables['sites'].df.drop(['lat', 'lon'], axis='columns', inplace=True) con.tables['samples'].df.loc['mc01a', 'lat'] = -60. # test basic function con.propagate_average_up() self.assertTrue( all(con.tables['sites'].df[['lat', 'lon']].values.ravel())) self.assertEqual([-75.61875], con.tables['sites'].df.loc['mc01', 'lat'].unique()) # make sure does not overwrite existing values con = cb.Contribution(directory, read_tables=['sites', 'samples']) con.tables['sites'].df.loc['mc01', 'lon'] = 12 con.propagate_average_up() self.assertEqual([12], con.tables['sites'].df.loc['mc01', 'lon'].unique()) self.assertNotIn('new_lat', con.tables['sites'].df.columns) self.assertNotIn('new_lon', con.tables['sites'].df.columns) # make sure works with only some sample data available con = cb.Contribution(directory, read_tables=['sites', 'samples']) con.tables['samples'].df.drop(['lon'], axis='columns', inplace=True) con.propagate_average_up() # fails gracefully? con = cb.Contribution(directory, read_tables=['sites', 'samples']) con.tables['samples'].df.drop(['site'], axis='columns', inplace=True) con.tables['sites'].df.loc['mc01', 'lat'] = '' con.propagate_average_up() # fails gracefully? con = cb.Contribution(directory, read_tables=['sites', 'samples'], custom_filenames={'samples': '_samples.txt'}) res = con.propagate_average_up() self.assertIsNone(res) # fails gracefully? res = con.propagate_average_up(target_df_name='samples', source_df_name='sites') self.assertIsNone(res) # fails gracefully? res = con.propagate_average_up(target_df_name='sites', source_df_name='specimens') self.assertIsNone(res)
def test_propagate_name_down(self): directory = os.path.join(WD, 'data_files', 'convert_2_magic', 'cit_magic', 'PI47') con = cb.Contribution(directory) self.assertNotIn('location', con.tables['measurements'].df.columns) # need to actually test this con.propagate_name_down('sample', 'measurements') con.propagate_name_down('site', 'measurements') con.propagate_name_down('location', 'measurements') self.assertIn('location', con.tables['measurements'].df.columns)
def on_clear(self, event): """ initialize window to allow user to empty the working directory """ dia = pmag_menu_dialogs.ClearWD(self.parent, self.parent.WD) clear = dia.do_clear() if clear: print('-I- Clear data object') self.contribution = cb.Contribution(self.WD, dmodel=self.data_model) self.edited = False
def get_wd_data(self): """ Show dialog to get user input for which directory to set as working directory. Called by self.get_dm_and_wd """ wait = wx.BusyInfo('Reading in data from current working directory, please wait...') #wx.Yield() print('-I- Read in any available data from working directory') self.contribution = cb.Contribution(self.WD, dmodel=self.data_model) del wait
def test_add_magic_table(self): con = cb.Contribution(self.directory, read_tables=['specimens'], dmodel=DMODEL) self.assertEqual(set(['specimens']), set(con.tables.keys())) con.add_magic_table('samples') self.assertEqual(set(['specimens', 'samples']), set(con.tables.keys())) self.assertGreater(len(con.tables['samples'].df), 0) con.add_magic_table('unknown', 'sites.txt') self.assertEqual(set(['specimens', 'samples', 'sites']), set(con.tables.keys())) self.assertGreater(len(con.tables['sites'].df), 0)
def test_propagate_name_down_fail(self): """fail gracefully""" directory = os.path.join(WD, 'data_files', 'convert_2_magic', 'cit_magic', 'PI47') con = cb.Contribution(directory) self.assertNotIn('sample', con.tables['measurements'].df.columns) self.assertNotIn('location', con.tables['measurements'].df.columns) # missing link: del con.tables['samples'].df['site'] meas_df = con.propagate_location_to_measurements() self.assertIn('sample', con.tables['measurements'].df.columns) self.assertNotIn('location', meas_df.columns)
def __init__(self, WD, parent, contribution=None): SIZE = wx.DisplaySize() SIZE = (SIZE[0] * .95, SIZE[1] * .95) wx.Frame.__init__(self, parent, wx.ID_ANY, size=SIZE, name='ErMagicBuilder') self.parent = parent self.main_frame = self.Parent self.panel = wx.ScrolledWindow(self) self.panel.SetScrollbars(1, 1, 1, 1) if sys.platform in ['win32', 'win64']: self.panel.SetScrollbars(20, 20, 50, 50) os.chdir(WD) self.WD = os.getcwd() self.site_lons = [] self.site_lats = [] # if ErMagic data object was not passed in, # create one based on the working directory if not contribution: self.contribution = cb.Contribution(self.WD) else: self.contribution = contribution # first propagate from measurements self.contribution.propagate_measurement_info() # then propagate from other tables # (i.e., if sites are in specimens or samples but not measurements) self.contribution.propagate_all_tables_info() # then add in blank tables if any are missing self.table_list = [ "specimens", "samples", "sites", "locations", "ages" ] for table in self.table_list: if table not in self.contribution.tables: new_table = cb.MagicDataFrame( dtype=table, dmodel=self.contribution.data_model) self.contribution.tables[table] = new_table self.SetTitle("Earth-Ref Magic Builder") self.InitUI() # hide mainframe, bind close event so that it closes the current window not the mainframe self.parent.Hide() self.parent.Bind( wx.EVT_MENU, lambda event: self.parent.menubar.on_quit(event, self), self.parent.menubar.file_quit)
def on_btn_unpack(self, event): """ Create dialog to choose a file to unpack with download magic. Then run download_magic and create self.contribution. """ dlg = wx.FileDialog( None, message="choose txt file to unpack", defaultDir=self.WD, defaultFile="", style=wx.FD_OPEN #| wx.FD_CHANGE_DIR ) if dlg.ShowModal() == wx.ID_OK: FILE = dlg.GetPath() input_dir, f = os.path.split(FILE) else: return False outstring = "download_magic.py -f {} -WD {} -ID {} -DM {}".format( f, self.WD, input_dir, self.data_model_num) # run as module: print("-I- running python script:\n %s" % (outstring)) wait = wx.BusyInfo("Please wait, working...") wx.SafeYield() ex = None try: if ipmag.download_magic(f, self.WD, input_dir, overwrite=True, data_model=self.data_model): text = "Successfully ran download_magic.py program.\nMagIC files were saved in your working directory.\nSee Terminal/message window for details." else: text = "Something went wrong. Make sure you chose a valid file downloaded from the MagIC database and try again." except Exception as ex: text = "Something went wrong. Make sure you chose a valid file downloaded from the MagIC database and try again." del wait dlg = wx.MessageDialog(self, caption="Saved", message=text, style=wx.OK) result = dlg.ShowModal() if result == wx.ID_OK: dlg.Destroy() if ex: raise (ex) self.contribution = cb.Contribution(self.WD)
def on_clear(self, event): """ initialize window to allow user to empty the working directory """ dia = pmag_menu_dialogs.ClearWD(self.parent, self.parent.WD) clear = dia.do_clear() if clear: # clear directory, but use previously acquired data_model if self.data_model_num == 2.5: self.parent.er_magic = builder.ErMagicBuilder( self.parent.WD, self.parent.er_magic.data_model) elif self.data_model_num == 3: self.parent.contribution = cb.Contribution( self.parent.WD, dmodel=self.parent.contribution.data_model)
def on_btn_convert_3(self, event): """ Open dialog for rough conversion of 2.5 files to 3.0 files. Offer link to earthref for proper upgrade. """ dia = pw.UpgradeDialog(None) dia.Center() res = dia.ShowModal() if res == wx.ID_CANCEL: webbrowser.open("https://www2.earthref.org/MagIC/upgrade", new=2) return ## more nicely styled way, but doesn't link to earthref #msg = "This tool is meant for relatively simple upgrades (for instance, a measurement file, a sample file, and a criteria file).\nIf you have a more complex contribution to upgrade, and you want maximum accuracy, use the upgrade tool at https://www2.earthref.org/MagIC/upgrade.\n\nDo you want to continue?" #result = pw.warning_with_override(msg) #if result == wx.ID_NO: #webbrowser.open("https://www2.earthref.org/MagIC/upgrade", new=2) #return # turn files from 2.5 --> 3.0 (rough translation) meas, upgraded, no_upgrade = pmag.convert_directory_2_to_3( 'magic_measurements.txt', input_dir=self.WD, output_dir=self.WD, data_model=self.contribution.data_model) if not meas: wx.MessageBox( '2.5 --> 3.0 failed. Do you have a magic_measurements.txt file in your working directory?', 'Info', wx.OK | wx.ICON_INFORMATION) return # create a contribution self.contribution = cb.Contribution(self.WD) # make skeleton files with specimen, sample, site, location data self.contribution.propagate_measurement_info() # pop up upgraded_string = ", ".join(upgraded) if no_upgrade: no_upgrade_string = ", ".join(no_upgrade) msg = '2.5 --> 3.0 translation completed!\n\nThese 3.0 format files were created: {}.\n\nHowever, these 2.5 format files could not be upgraded: {}.\n\nTo convert all 2.5 files, use the MagIC upgrade tool: https://www2.earthref.org/MagIC/upgrade\n'.format( upgraded_string, no_upgrade_string) if 'criteria.txt' in upgraded: msg += '\nNote: Please check your criteria file for completeness and accuracy, as not all 2.5 files will be fully upgraded.' if 'pmag_criteria.txt' in no_upgrade: msg += '\nNote: Not all criteria files can be upgraded, even on the MagIC site. You may need to recreate an old pmag_criteria file from scratch in Thellier GUI or Demag GUI.' wx.MessageBox(msg, 'Warning', wx.OK | wx.ICON_INFORMATION) else: msg = '2.5 --> 3.0 translation completed!\nThese files were converted: {}'.format( upgraded_string) wx.MessageBox(msg, 'Info', wx.OK | wx.ICON_INFORMATION)
def test_propagate_cols_up(self): directory = os.path.join('data_files', '3_0', 'McMurdo') con = cb.Contribution(directory, read_tables=['sites', 'samples'], custom_filenames={'locations': '_locations.txt'}) con.tables['samples'].df.loc['mc01a', 'lithologies'] = 'other:Trachyte' ind = con.tables['samples'].df.columns.get_loc('lithologies') con.tables['samples'].df.iloc[2, ind] = None con.tables['samples'].df.iloc[3, ind] = np.nan con.tables['samples'].df.iloc[4, ind] = '' con.tables['sites'].df.loc['mc01', 'lithologies'] = '' con.tables['sites'].df[:10][['lithologies', 'geologic_types']] cols = ['lithologies', 'geologic_types'] con.propagate_cols_up(cols, 'sites', 'samples') self.assertEqual( 'Other:Trachyte', con.tables['sites'].df.loc['mc01', 'lithologies'].unique()[0]) self.assertEqual( 'Basalt', con.tables['sites'].df.loc['mc02', 'lithologies'].unique()[0]) self.assertTrue(all(con.tables['sites'].df['lithologies'])) # fail gracefully con = cb.Contribution(directory, read_tables=['sites']) con.propagate_cols_up(cols, 'sites', 'samples')
def test3_with_contribution(self): dir_path = os.path.join(WD, 'data_files', '3_0', 'Megiddo') con = cb.Contribution(directory=dir_path) outfile, error_message, errors, all_errors = ipmag.upload_magic3( contribution=con) msg = "file validation has failed. You may run into problems if you try to upload this file." self.assertEqual(error_message, msg) # delete any upload file that was partially created import re pattern = re.compile('\A[^.]*\.[a-zA-Z]*\.\d{4}\_?\d*\.txt') possible_files = os.listdir(dir_path) files = [] for f in possible_files: if pattern.match(f): files.append(f) pmag.remove_files(files, dir_path)
def test_propagate_cols_up_old(self): directory = os.path.join(WD, 'data_files', '3_0', 'McMurdo') con = cb.Contribution(directory, dmodel=DMODEL, read_tables=['sites', 'samples']) con.tables['sites'].df.loc[:, 'lithologies'] = None con.tables['sites'].df.loc[:, 'geologic_types'] = 'your type' con.tables['samples'].df.loc[:, 'geologic_types'] = 'my_type' con.propagate_cols(['lithologies', 'geologic_types'], 'sites', 'samples', down=False) self.assertEqual( 'Basalt', con.tables['sites'].get_first_non_null_value( 'mc50', 'lithologies')) self.assertEqual( 'your type', con.tables['sites'].get_first_non_null_value( 'mc50', 'geologic_types'))
def test_depth_propagation(self): dir_path = os.path.join(WD, 'data_files', 'core_depthplot') #con = cb.Contribution(dir_path) #self.assertNotIn('core_depth', con.tables['sites'].df.index) #con.propagate_cols(['core_depth'], 'sites', 'samples', down=False) #self.assertIn('core_depth', con.tables['sites'].df.columns) #self.assertEqual(con.tables['sites'].df.loc['15-1-013', 'core_depth'], 55.23) # outfile, error_message, errors, all_errors = ipmag.upload_magic3( dir_path=dir_path) print('mv {} {}'.format(outfile, WD)) os.system('mv {} {}'.format(outfile, WD)) outfile = os.path.join(WD, os.path.split(outfile)[1]) ipmag.download_magic(outfile) con = cb.Contribution(WD) self.assertIn('core_depth', con.tables['sites'].df.columns) self.assertEqual(con.tables['sites'].df.loc['15-1-013', 'core_depth'], 55.23)
def test_init_empty(self): tables = [ 'measurements', 'specimens', 'samples', 'sites', 'locations', 'ages', 'criteria', 'contribution' ] files = os.listdir(WD) for table in tables: fname = table + ".txt" if fname in files: try: print(os.path.join(WD, fname)) os.remove(os.path.join(WD, fname)) except OSError: print( "error when removing files for empty directory test in test_contribution_builder" ) con = cb.Contribution(WD, dmodel=DMODEL) self.assertEqual(0, len(con.tables))
def on_upload_file(self, event): if not hasattr(self, "contribution"): self.contribution = cb.Contribution(self.WD, dmodel=self.data_model) wait = wx.BusyInfo('Validating data, please wait...') wx.SafeYield() res, error_message, has_problems, all_failing_items = ipmag.upload_magic(dir_path=self.WD, vocab=self.contribution.vocab) self.failing_items = all_failing_items if has_problems: self.highlight_problems(has_problems) if not has_problems: self.validation_mode = set() self.message.SetLabel('Validated!') self.bSizer_msg.ShowItems(False) self.hbox.Fit(self) # do alert that your file passed dlg = wx.MessageDialog(self,caption="Message:", message="Your contribution has passed validations!\nGo to https://www.earthref.org/MagIC to upload:\n{}".format(res), style=wx.OK) dlg.ShowModal() del wait
def get_wd_data(self): self.edited = False self.validation_mode = False self.reset_highlights() wait = wx.BusyInfo('Reading in data from current working directory, please wait...') wx.SafeYield() print('-I- Read in any available data from working directory') self.contribution = cb.Contribution(self.WD, dmodel=self.data_model) # propagate names from measurements into other tables if "measurements" in self.contribution.tables: self.contribution.propagate_measurement_info() # propagate names from any table into other tables # (i.e., site name from samples) self.contribution.propagate_all_tables_info() # extract average lats/lons from sites table self.contribution.get_min_max_lat_lon() # extract age info from ages table and put into other tables self.contribution.propagate_ages() # finish up self.edited = False del wait
def main(): """ NAME bryson_xpeem_measurements.py DESCRIPTION converts James Bruson XPEEM files into a MagIC format measurement file SYNTAX bryson_xpeem_measurements.py [command line options] OPTIONS -h: prints the help message and quits. -d DIRECTORY: specify directory where the XPEEM files are located, otherwise current directory is used. """ if '-h' in sys.argv: # check if help is needed print(main.__doc__) sys.exit() # graceful quit if '-d' in sys.argv: ind = sys.argv.index('-d') dir_name = sys.argv[ind + 1] else: dir_name = "" if dir_name != "": os.chdir(dir_name) file_list = os.listdir() print(file_list) x_spacing = 9.488e-9 y_spacing = 9.709e-9 md = cb.Contribution() #md stands for magic file data location_data = [{ 'location': 'Portales Valley Meteorite', 'location_type': 'Meteorite', 'geologic_classes': 'Meteorite', 'lithologies': 'H Ordinary Chondrite', 'lat_s': '0', 'lat_n': '0', 'lon_w': '0', 'lon_e': '0', 'age': '4.5', 'age_unit': 'Ga' }] md.add_magic_table_from_data('locations', location_data) md.write_table_to_file('locations') siteA_data = [{ 'site': 'Interface A', 'location': 'Portales Valley Meteorite', 'result_type': 'i', 'result_quality': 'g', 'method_codes': 'GM-CC', 'citations': '10.1029/2019JE005951', 'geologic_classes': 'Meteorite', 'lithologies': 'H Ordinary Chondrite', 'geologic_types': 'Meteorite', 'lat': '0', 'lon': '0', 'age': '4.5', 'age_unit': 'Ga', 'int_abs': '0.000019', 'int_abs_sigma': '0.000006' }] siteB_data = [{ 'site': 'Interface B', 'location': 'Portales Valley Meteorite', 'result_type': 'i', 'result_quality': 'g', 'method_codes': 'GM-CC', 'citations': '10.1029/2019JE005951', 'geologic_classes': 'Meteorite', 'lithologies': 'H Ordinary Chondrite', 'geologic_types': 'Meteorite', 'lat': '0', 'lon': '0', 'age': '4.5', 'age_unit': 'Ga', 'int_abs': '0.000009', 'int_abs_sigma': '0.0000035' }] md.add_magic_table_from_data('sites', siteA_data + siteB_data) md.write_table_to_file('sites') samp_num = 0 samps = [] specs = [] measurements = [] for file in file_list: file_dir = file[:-4] site = file[2:3] print('site=', site) spec_name = file_dir prev_samp_num = samp_num samp_num = file[3:5] samp_name = file[:5] print('samp_num=', samp_num, ' prev_samp_num=', prev_samp_num) if samp_num != prev_samp_num: samp = { 'sample': samp_name, 'site': 'Interface ' + site, 'result_type': 'i', 'result_quality': 'g', ' method_codes': 'GM-CC', 'citations': '10.1029/2019JE005951', 'geologic_classes': 'Meteorite', 'lithologies': 'H Ordinary Chondrite', 'geologic_types': 'Meteorite' } samps.append(samp) spec = { 'specimen': spec_name, 'sample': samp_name, 'result_quality': 'g', 'method_codes': 'GM-CC', 'citations': '10.1029/2019JE005951', 'geologic_classes': 'Meteorite', 'lithologies': 'H Ordinary Chondrite', 'geologic_types': 'Meteorite' } specs.append(spec) m = open(dir_name + file, 'r') line = m.readline() y = 0 sequence = 0 while line != "": # print(line) values = line.split('\t') x = 0 for value in values: # print('value=',value,' x=',x,' y=',y) measurement = { 'measurement': spec_name + str(x) + str(y), 'experiment': spec_name + '_xpeem', 'specimen': spec_name, 'sequence': sequence, 'standard': 'u', 'quality': 'g', 'method_codes': 'GM-CC', 'citations': '10.1029/2019JE005951', 'derived_value': 'XPEEM,' + str(value) + ',10.1088/1742-6596/430/1/012127', 'meas_pos_x': str(x * x_spacing), 'meas_pos_y': str(y * y_spacing) } measurements.append(measurement) x += 1 sequence += 1 # print('measurement=',measurement) y += 1 line = m.readline() print("file_dir", file_dir) md.add_magic_table_from_data('samples', samps) md.write_table_to_file('samples') md.add_magic_table_from_data('specimens', specs) md.write_table_to_file('specimens') md.add_magic_table_from_data('measurements', measurements) md.write_table_to_file('measurements') # sys.command('upload_magic.py') print("end")
def on_btn_unpack(self, event): """ Create dialog to choose a file to unpack with download magic. Then run download_magic and create self.contribution. """ def magic_download_dia(warn=""): dia = pw.TextDialog( self, "Download from MagIC\nusing contribution id or DOI", "MagIC id/DOI", warn) res = dia.ShowModal() magic_id = dia.text_ctrl.return_value() if res == wx.ID_CANCEL: return wx.ID_CANCEL if res == wx.ID_OK: return magic_id else: return False dlg = pw.ChooseOne( self, "Download from MagIC", "Unpack previous downloaded file", text= "You can unpack a downloaded file from MagIC, or download a file from MagIC directly using the contribution id or DOI.", title="") dlg.Centre() res = dlg.ShowModal() # try to download directly from MagIC if res == wx.ID_YES: magic_id = True warning = "" while magic_id: magic_id = magic_download_dia(warning) # if magic id was blank if magic_id == "": warning = "You must provide a MagIC contribution id or DOI" magic_id = True continue # if user canceled the download if magic_id == wx.ID_CANCEL: return # if everything looks good, try to download if len(str(magic_id)) < 8: # use contribution id status, stuff = ipmag.download_magic_from_id(magic_id) f = "magic_contribution_{}.txt".format(magic_id) else: # use DOI status, stuff = ipmag.download_magic_from_doi(magic_id) f = "magic_contribution.txt" if not status: warning = stuff if status: break if not os.path.exists(os.path.join(self.WD, f)): os.rename(os.path.join(os.getcwd(), f), os.path.join(self.WD, f)) input_dir = self.WD # try to unpack a previously downloaded file if res == wx.ID_NO: dlg = wx.FileDialog( None, message="choose txt file to unpack", defaultDir=self.WD, defaultFile="", style=wx.FD_OPEN #| wx.FD_CHANGE_DIR ) if dlg.ShowModal() == wx.ID_OK: FILE = dlg.GetPath() input_dir, f = os.path.split(FILE) else: return False outstring = "download_magic.py -f {} -WD {} -ID {}".format( f, self.WD, input_dir) # run as module: print("-I- running python script:\n %s" % (outstring)) wait = wx.BusyInfo("Please wait, working...") wx.SafeYield() ex = None try: if ipmag.download_magic(f, self.WD, input_dir, overwrite=True, data_model=self.data_model): text = "Successfully ran download_magic.py program.\nMagIC files were saved in your working directory.\nSee Terminal/message window for details." else: text = "Something went wrong. Make sure you chose a valid file downloaded from the MagIC database and try again." return except Exception as ex: text = "Something went wrong. Make sure you chose a valid file downloaded from the MagIC database and try again." del wait dlg = wx.MessageDialog(self, caption="Saved", message=text, style=wx.OK) result = dlg.ShowModal() if result == wx.ID_OK: dlg.Destroy() if ex: raise (ex) return self.contribution = cb.Contribution(self.WD) # make a success pop-up dlg = wx.MessageDialog( self, caption="Success", message= "You can now add orientation information or metadata, or open one of the analysis tools", style=wx.OK) dlg.ShowModal()
def main(): """ NAME make_magic_plots.py DESCRIPTION inspects magic directory for available plots. SYNTAX make_magic_plots.py [command line options] INPUT magic files OPTIONS -h prints help message and quits -f FILE specifies input file name -fmt [png,eps,svg,jpg,pdf] specify format, default is png """ if '-h' in sys.argv: print(main.__doc__) sys.exit() # reset log files for fname in ['log.txt', 'errors.txt']: f = os.path.join(os.getcwd(), fname) if os.path.exists(f): os.remove(f) dirlist = ['./'] dir_path = os.getcwd() # if '-fmt' in sys.argv: ind = sys.argv.index("-fmt") fmt = sys.argv[ind + 1] else: fmt = 'png' if '-f' in sys.argv: ind = sys.argv.index("-f") filelist = [sys.argv[ind + 1]] else: filelist = os.listdir(dir_path) ## initialize some variables samp_file = 'samples.txt' azimuth_key = 'azimuth' meas_file = 'measurements.txt' loc_key = 'location' loc_file = 'locations.txt' method_key = 'method_codes' dec_key = 'dir_dec' inc_key = 'dir_inc' tilt_corr_key = "dir_tilt_correction" aniso_tilt_corr_key = "aniso_tilt_correction" hyst_bcr_key = "hyst_bcr" hyst_mr_key = "hyst_mr_moment" hyst_ms_key = "hyst_ms_moment" hyst_bc_key = "hyst_bc" Mkeys = ['magnitude', 'magn_moment', 'magn_volume', 'magn_mass'] results_file = 'sites.txt' hyst_file = 'specimens.txt' aniso_file = 'specimens.txt' # create contribution and propagate data throughout con = cb.Contribution() con.propagate_location_to_measurements() con.propagate_location_to_specimens() con.propagate_location_to_samples() if not con.tables: print('-E- No MagIC tables could be found in this directory') error_log("No MagIC tables found") return # check to see if propagation worked, otherwise you can't plot by location lowest_table = None for table in con.ancestry: if table in con.tables: lowest_table = table break do_full_directory = False # check that locations propagated down to the lowest table in the contribution if 'location' in con.tables[lowest_table].df.columns: # are there any locations in the lowest table? if not all(con.tables[lowest_table].df['location'].isnull()): locs = con.tables['locations'].df.index.unique() lowest_locs = con.tables[lowest_table].df['location'].unique() incorrect_locs = set(lowest_locs).difference(set(locs)) # are they actual locations? if not incorrect_locs: info_log( 'location names propagated to {}'.format(lowest_table)) else: do_full_directory = True error_log('location names did not propagate fully to {} table'. format(lowest_table)) else: do_full_directory = True error_log( 'could not propagate location names down to {} table'.format( lowest_table)) else: do_full_directory = True error_log('could not propagate location names down to {} table'.format( lowest_table)) all_data = {} all_data['measurements'] = con.tables.get('measurements', None) all_data['specimens'] = con.tables.get('specimens', None) all_data['samples'] = con.tables.get('samples', None) all_data['sites'] = con.tables.get('sites', None) all_data['locations'] = con.tables.get('locations', None) locations = con.tables['locations'].df.index.unique() dirlist = [loc for loc in locations if cb.not_null(loc) and loc != 'nan'] if not dirlist: dirlist = ["./"] if do_full_directory: dirlist = ["./"] # plot the whole contribution as one location if dirlist == ["./"]: error_log('plotting the entire contribution as one location') for fname in os.listdir("."): if fname.endswith(".txt"): shutil.copy(fname, "tmp_" + fname) # if possible, go through all data by location # use tmp_*.txt files to separate out by location for loc in dirlist: print('\nworking on: ', loc) def get_data(dtype, loc_name): """ Extract data of type dtype for location loc_name. Write tmp_dtype.txt files if possible. """ if cb.not_null(all_data[dtype]): data_container = all_data[dtype] data_df = data_container.df[data_container.df['location'] == loc_name] data = data_container.convert_to_pmag_data_list(df=data_df) res = data_container.write_magic_file( 'tmp_{}.txt'.format(dtype), df=data_df) if not res: return [] return data meas_data = get_data('measurements', loc) spec_data = get_data('specimens', loc) samp_data = get_data('samples', loc) site_data = get_data('sites', loc) location_data = get_data('locations', loc) if loc == "./": # if you can't sort by location, do everything together try: meas_data = con.tables[ 'measurements'].convert_to_pmag_data_list() except KeyError: meas_data = None try: spec_data = con.tables['specimens'].convert_to_pmag_data_list() except KeyError: spec_data = None try: samp_data = con.tables['samples'].convert_to_pmag_data_list() except KeyError: samp_data = None try: site_data = con.tables['sites'].convert_to_pmag_data_list() except KeyError: site_data = None crd = 's' if samp_file in filelist: # find coordinate systems samps = samp_data file_type = "samples" # get all non blank sample orientations Srecs = pmag.get_dictitem(samps, azimuth_key, '', 'F') if len(Srecs) > 0: crd = 'g' print('using geographic coordinates') else: print('using specimen coordinates') else: if VERBOSE: print('-I- No sample data found') if meas_file in filelist: # start with measurement data print('working on measurements data') data = meas_data file_type = 'measurements' # looking for zeq_magic possibilities # get all non blank method codes AFZrecs = pmag.get_dictitem(data, method_key, 'LT-AF-Z', 'has') # get all non blank method codes TZrecs = pmag.get_dictitem(data, method_key, 'LT-T-Z', 'has') # get all non blank method codes MZrecs = pmag.get_dictitem(data, method_key, 'LT-M-Z', 'has') # get all dec measurements Drecs = pmag.get_dictitem(data, dec_key, '', 'F') # get all inc measurements Irecs = pmag.get_dictitem(data, inc_key, '', 'F') for key in Mkeys: Mrecs = pmag.get_dictitem(data, key, '', 'F') # get intensity data if len(Mrecs) > 0: break # potential for stepwise demag curves if len(AFZrecs) > 0 or len(TZrecs) > 0 or len(MZrecs) > 0 and len( Drecs) > 0 and len(Irecs) > 0 and len(Mrecs) > 0: CMD = 'zeq_magic.py -f tmp_measurements.txt -fsp tmp_specimens.txt -fsa tmp_samples.txt -fsi tmp_sites.txt -sav -fmt ' + fmt + ' -crd ' + crd print(CMD) info_log(CMD, loc) os.system(CMD) # looking for thellier_magic possibilities if len(pmag.get_dictitem(data, method_key, 'LP-PI-TRM', 'has')) > 0: CMD = 'thellier_magic.py -f tmp_measurements.txt -fsp tmp_specimens.txt -sav -fmt ' + fmt print(CMD) info_log(CMD, loc) os.system(CMD) # looking for hysteresis possibilities if len(pmag.get_dictitem(data, method_key, 'LP-HYS', 'has')) > 0: # find hyst experiments # check for reqd columns missing = check_for_reqd_cols(data, ['treat_temp']) if missing: error_log( 'LP-HYS method code present, but required column(s) [{}] missing' .format(", ".join(missing)), loc, "quick_hyst.py") else: CMD = 'quick_hyst.py -f tmp_measurements.txt -sav -fmt ' + fmt print(CMD) info_log(CMD, loc) os.system(CMD) # equal area plots of directional data # at measurment level (by specimen) if data: missing = check_for_reqd_cols(data, ['dir_dec', 'dir_inc']) if not missing: CMD = "eqarea_magic.py -f tmp_measurements.txt -obj spc -sav -no-tilt -fmt " + fmt print(CMD) os.system(CMD) info_log(CMD, loc, "eqarea_magic.py") else: if VERBOSE: print('-I- No measurement data found') # site data if results_file in filelist: print('-I- result file found', results_file) data = site_data file_type = 'sites' print('-I- working on site directions') print('number of datapoints: ', len(data), loc) dec_key = 'dir_dec' inc_key = 'dir_inc' int_key = 'int_abs' SiteDIs = pmag.get_dictitem(data, dec_key, "", 'F') # find decs SiteDIs = pmag.get_dictitem(SiteDIs, inc_key, "", 'F') # find decs and incs dir_data_found = len(SiteDIs) print('{} Dec/inc pairs found'.format(dir_data_found)) # only individual results - not poles # get only individual results (if result_type col is available) if SiteDIs: if 'result_type' in SiteDIs[0]: SiteDIs = pmag.get_dictitem(SiteDIs, 'result_type', 'i', 'has') # then convert tilt_corr_key to correct format old_SiteDIs = SiteDIs SiteDIs = [] for rec in old_SiteDIs: if tilt_corr_key not in rec: error_log( "Directional data found, but missing {}, can't plot directions" .format(tilt_corr_key), loc, "eqarea_magic.py") break if cb.is_null( rec[tilt_corr_key]) and rec[tilt_corr_key] != 0: rec[tilt_corr_key] = "" else: try: rec[tilt_corr_key] = str( int(float(rec[tilt_corr_key]))) except ValueError: rec[tilt_corr_key] = "" SiteDIs.append(rec) print('number of individual directions: ', len(SiteDIs)) # tilt corrected coordinates SiteDIs_t = pmag.get_dictitem(SiteDIs, tilt_corr_key, '100', 'T', float_to_int=True) print('number of tilt corrected directions: ', len(SiteDIs_t)) SiteDIs_g = pmag.get_dictitem( SiteDIs, tilt_corr_key, '0', 'T', float_to_int=True) # geographic coordinates print('number of geographic directions: ', len(SiteDIs_g)) SiteDIs_s = pmag.get_dictitem( SiteDIs, tilt_corr_key, '-1', 'T', float_to_int=True) # sample coordinates print('number of sample directions: ', len(SiteDIs_s)) SiteDIs_x = pmag.get_dictitem(SiteDIs, tilt_corr_key, '', 'T') # no coordinates print('number of no coordinates directions: ', len(SiteDIs_x)) if len(SiteDIs_t) > 0 or len(SiteDIs_g) > 0 or len( SiteDIs_s) > 0 or len(SiteDIs_x) > 0: CRD = "" if len(SiteDIs_t) > 0: CRD = ' -crd t' elif len(SiteDIs_g) > 0: CRD = ' -crd g' elif len(SiteDIs_s) > 0: CRD = ' -crd s' CMD = 'eqarea_magic.py -f tmp_sites.txt -fsp tmp_specimens.txt -fsa tmp_samples.txt -flo tmp_locations.txt -sav -fmt ' + fmt + CRD print(CMD) info_log(CMD, loc) os.system(CMD) else: if dir_data_found: error_log( '{} dec/inc pairs found, but no equal area plots were made' .format(dir_data_found), loc, "equarea_magic.py") # print('-I- working on VGP map') VGPs = pmag.get_dictitem(SiteDIs, 'vgp_lat', "", 'F') # are there any VGPs? if len(VGPs) > 0: # YES! CMD = 'vgpmap_magic.py -f tmp_sites.txt -prj moll -res c -sym ro 5 -sav -fmt png' print(CMD) info_log(CMD, loc, 'vgpmap_magic.py') os.system(CMD) else: print('-I- No vgps found') print('-I- Look for intensities') # is there any intensity data? if site_data: if int_key in site_data[0].keys(): # old way, wasn't working right: #CMD = 'magic_select.py -key ' + int_key + ' 0. has -F tmp1.txt -f tmp_sites.txt' Selection = pmag.get_dictkey(site_data, int_key, dtype="f") with open('intensities.txt', 'w') as out: for rec in Selection: if rec != 0: out.write(str(rec * 1e6) + "\n") histfile = 'LO:_' + loc + \ '_TY:_intensities_histogram:_.' + fmt # maybe run histplot.main here instead, so you can return an error message CMD = "histplot.py -b 1 -xlab 'Intensity (uT)' -sav -f intensities.txt -F " + histfile os.system(CMD) info_log(CMD, loc) print(CMD) else: print('-I- No intensities found') else: print('-I- No intensities found') ## if hyst_file in filelist: print('working on hysteresis', hyst_file) data = spec_data file_type = 'specimens' hdata = pmag.get_dictitem(data, hyst_bcr_key, '', 'F') hdata = pmag.get_dictitem(hdata, hyst_mr_key, '', 'F') hdata = pmag.get_dictitem(hdata, hyst_ms_key, '', 'F') # there are data for a dayplot hdata = pmag.get_dictitem(hdata, hyst_bc_key, '', 'F') if len(hdata) > 0: CMD = 'dayplot_magic.py -f tmp_specimens.txt -sav -fmt ' + fmt info_log(CMD, loc) print(CMD) else: print('no hysteresis data found') if aniso_file in filelist: # do anisotropy plots if possible print('working on anisotropy', aniso_file) data = spec_data file_type = 'specimens' # make sure there is some anisotropy data if not data: print('No anisotropy data found') elif 'aniso_s' not in data[0]: print('No anisotropy data found') else: # get specimen coordinates if aniso_tilt_corr_key not in data[0]: sdata = data else: sdata = pmag.get_dictitem(data, aniso_tilt_corr_key, '-1', 'T', float_to_int=True) # get specimen coordinates gdata = pmag.get_dictitem(data, aniso_tilt_corr_key, '0', 'T', float_to_int=True) # get specimen coordinates tdata = pmag.get_dictitem(data, aniso_tilt_corr_key, '100', 'T', float_to_int=True) CRD = "" CMD = 'aniso_magic.py -x -B -sav -fmt ' + fmt if len(sdata) > 3: CMD = CMD + ' -crd s' print(CMD) info_log(CMD, loc) os.system(CMD) if len(gdata) > 3: CMD = CMD + ' -crd g' print(CMD) info_log(CMD, loc) os.system(CMD) if len(tdata) > 3: CMD = CMD + ' -crd t' print(CMD) info_log(CMD, loc) os.system(CMD) # remove temporary files for fname in glob.glob('tmp*.txt'): os.remove(fname) try: os.remove('intensities.txt') except FileNotFoundError: pass if loc_file in filelist: data, file_type = pmag.magic_read(loc_file) # read in location data print('-I- working on pole map') poles = pmag.get_dictitem(data, 'pole_lat', "", 'F') # are there any poles? poles = pmag.get_dictitem(poles, 'pole_lon', "", 'F') # are there any poles? if len(poles) > 0: # YES! CMD = 'polemap_magic.py -sav -fmt png' print(CMD) info_log(CMD, "all locations", "polemap_magic.py") os.system(CMD) else: print('-I- No poles found')
def main(): """ NAME magic_geomagia.py DESCRIPTION Takes a MagIC file and outputs data for easier input into Max Brown's GEOMAGIA database Requires the habanero python package to be installed. Try "pip install habanero" if you get a "ModuleNotFoundError: No module named 'habanero'" error. SYNTAX magic_geomagia.py [command line options] OPTIONS -h: prints the help message and quits. -f FILE: the MagIC data file name that will be converted to GEOMAGIA files OUTPUT: Print to standard out the GEOMAGIA insert command for the reference and the site level data EXAMPLE: magic_geomagia.py -f magic_contribution_16578.txt Nick Jarboe """ if '-h' in sys.argv: # check if help is needed print(main.__doc__) sys.exit() # graceful quit if '-f' in sys.argv: ind=sys.argv.index('-f') file_name=sys.argv[ind+1] else: print("MagIC file name needed. Please add the file name after the -f option.") sys.exit() # Create all the table files from the magic.txt file so they can be imported by the cb command = "download_magic.py -f " + file_name os.system(command) md = cb.Contribution() #md stands for magic file data md.propagate_location_to_measurements() md.propagate_location_to_specimens() md.propagate_location_to_samples() if not md.tables: print('-E- No MagIC tables could be found in this directory') error_log("No MagIC tables found") return doi=md.tables['contribution'].df.iloc[0]['reference'] id=md.tables['contribution'].df.iloc[0]['id'] timestamp=md.tables['contribution'].df.iloc[0]['timestamp'] contributor=md.tables['contribution'].df.iloc[0]['contributor'] print("c=",contributor) contributor=contributor.replace('@','') print("c=",contributor) cr = Crossref() ref=cr.works(doi) # authors = "Doe J.X., Alexander,T.G." status= ref["status"] message= ref["message"] # print("message=",message) authors= message["author"] # print("authors=",authors) authorList="" for author in authors: # print ("Name:",author['given'], author['family']) author_given="" names=author['given'].split(' ') for name in names: author_given +=name[0]+"." authorList += author['family'] + " " + author_given + ", " # print(authorList) authorList=authorList[:-2] # print(authorList) title = message['title'][0] year = message['created']['date-parts'][0][0] # print(year) journal = message['short-container-title'][0] volume = message['volume'] # print(volume) pages='0' if "page" in message.keys(): pages = message['page'] # print(pages) url = "https://earthref.org/MagIC/doi/" + doi print("REFS") print("Insert into REFS values(NULL,'", authorList, "','", title, "', ", year, ", '", journal, "', ", volume, ", '", pages, "', '", doi, "', '", url, "');", sep='') print() print("ARCHEODIJ") sites=md.tables['sites'].df locations=md.tables['locations'].df print("UID,NUM_SAMPLES,NUM_ACC_SPEC,NUM_MEAS_SPEC,BA,SIGMA_BA,AGE, AGE_MIN,AGE_MAX,NUM_SIGMAS,AGE_ERROR_TYPE_ID,SITE_LAT, SITE_LON,VADM,SIGMA_VADM,SITE_ID,PI_METHODS_ID,AC_ID,MD_CK_ ID,AN_CORR_ID,CR_CORR_ID,DM_METHOD_ID,AF_STEP,T_STEP,DM_ ANALYSIS_ID,SPECIMEN_TYPE_ID,MATERIAL_ID,REFERENCE_ID,NUM_ C14_SAMPLES,C14_ID,CALIB_C14_AGE,CALIB_C14_AGE_SIGMA_MIN, CALIB_C14_AGE_SIGMA_MAX,NUM_C14_SIGMAS,CALC_CALIB_C14_AGE, CALC_CALIB_C14_AGE_SIGMA_MIN,CALC_CALIB_C14_AGE_SIGMA_MAX, C14_CALIB_SOFTWARE_ID,CALC_C14_CALIB_SOFTWARE_ID,C14_CALIB_DATASET_ID,CALC_C14_ CALIB_DATASET_ID,DENDRO_ID,TOT_NUM_DENDRO,NUM_DENDRO_ USED,DATING_METHOD_ID,NUM_DIR_SAMPLES,NUM_DIR_SPECIMENS,NUM_ DIR_SPEC_COLLECTED,DECL,INCL,ALPHA_95,K,VDM,SIGMA_VDM,SAMPLE_ID,c_csv,SITE_NAME, SITE_HORIZON,1000,1001,1002,1003,1004,1005,1006,1007,1008,1009,1010,1011,1012,1013,1014, SUPERSEEDED,UPLOAD_YEAR,UPLOAD_MONTH,UPLOADER,EDITOR,EDIT_DATE,NOTES") for index, row in sites.iterrows(): int_n_samples,int_n_specimens,int_n_total_specimens,int_abs,int_abs_sigma=-1,-1,-1,-1,-1 if 'int_n_samples' in sites.columns.values: int_n_samples=row['int_n_samples'] if 'int_n_specimens' in sites.columns.values: int_n_specimens=row['int_n_specimens'] if 'int_n_total_specimens' in sites.columns.values: int_n_total_specimens=row['int_n_total_specimens'] if int_n_specimens == -1 and int_n_samples >0: int_n_spcimens = int_n_samples if 'int_abs' in sites.columns.values: int_abs=row['int_abs'] if int_abs is not None: int_abs=round(int_abs*1e6,1) if 'int_abs_sigma' in sites.columns.values: int_abs_sigma=row['int_abs_sigma'] if int_abs_sigma is not None: int_abs_sigma=round(row['int_abs_sigma']*1e6,1) age,age_high,age_low=-1e9,-1e9,-1e9 age_error_type='0' # if 'age_unit' not in sites.columns.values: print("Malformed Magic sites data table. Required column row 'age_unit' is missing") sys.exit() age_unit=row['age_unit'] if 'age' in sites.columns.values: age=row['age'] age=pmag.age_to_BP(age,age_unit) if 'age_high' in sites.columns.values: age_high=row['age_high'] age_high=pmag.age_to_BP(age_high,age_unit) if 'age_low' in sites.columns.values: age_low=row['age_low'] age_low=pmag.age_to_BP(age_low,age_unit) if 'age_sigma' in sites.columns.values: age_sigma=row['age_sigma'] age_sigma=pmag.age_to_BP(age_sigma,age_unit) age_high=age+age_sigma age_low=age-age_sigma age_error_type='5' #Magic is one sigma for all sigma state/province column to data modelages if age_low > age_high: # MagIC lets age_high and age_low be in any order. Fix that for GEOMAGIA temp=age_high age_high=age_low age_low=temp if age == -1e9: # If only age_low and age_high are in the MagIC file then calculate the age. age=(age_high+age_low)/2 age_error_type='8' #If MagIC age only high and low then error type is "range" age_min=age-age_low # GEOMAGIA has the max and min as differences from the age, not absolute. age_max=age_high-age age_BP=age age=1950-age #GEOMAGIA want +-AD/BC so convert BP to AD/-BC lat=row['lat'] lon=row['lon'] vadm,vadm_sigma=-1,-1 if 'vadm' in sites.columns.values: vadm=row['vadm'] vadm=vadm/1e22 if 'vadm_sigma' in sites.columns.values: vadm=row['vadm'] vadm=vadm/1e22 site_name=row['site'] # For paleointensity codes just give the method code list and Max will decide on the right # GEOMAGIA code. method_codes="No MagIC method codes available" if 'method_codes' in sites.columns.values: method_codes=row['method_codes'] # Just give Max all the method codes for him to decide for now paleointensity_procedure=method_codes alteration_monitor="0" alteration_monitor=method_codes_to_geomagia(method_codes,'ALTERATION_MONIT_CORR') multidomain_check="0" multidomain_check=method_codes_to_geomagia(method_codes,'MD_CHECKS') anisotropy_correction="0" anisotropy_correction=method_codes_to_geomagia(method_codes,'ANISOTROPY_CORRECTION') cooling_rate="0" cooling_rate=method_codes_to_geomagia(method_codes,'COOLING_RATE') demag_method="0" demag_method=method_codes_to_geomagia(method_codes,'DM_METHODS') demag_analysis="0" demag_analysis=method_codes_to_geomagia(method_codes,'DM_ANALYSIS') specimen_shape="0" specimen_shape=method_codes_to_geomagia(method_codes,'SPECIMEN_TYPE_ID') materials="" geologic_types="" if 'geologic_types' in sites.columns.values: geologic_types=row['geologic_types'] if ":" in geologic_types: gtypes=geologic_types.split(":") for gtype in gtypes: materials=materials+pmag.vocab_convert(gtype,"geomagia")+":" materials=materials[:-1] else: materials=pmag.vocab_convert(geologic_types,"geomagia") geochron_codes="" if ":" in method_codes: gcodes=method_codes.split(":") for gcode in gcodes: if "GM-" == gcode[:3]: geochron_codes=geochron_codes+pmag.vocab_convert(gcode,"geomagia")+":" geochron_codes=geochron_codes[:-1] else: geochron_codes=pmag.vocab_convert(geochron_codes,"geomagia") if geochron_codes == "": geochron_codes="0" dir_n_samples="-1" if 'dir_n_samples' in sites.columns.values: dir_n_samples=row['dir_n_samples'] dir_n_samples="-1" if 'dir_n_samples' in sites.columns.values: dir_n_samples=row['dir_n_samples'] # Not in MagIC dir_n_specimens="-1" # using total number of samples for total specimen number dir_n_total_samples="-1" if 'dir_n_total_samples' in sites.columns.values: dir_n_total_samples=row['dir_n_total_samples'] dir_dec="999" if 'dir_dec' in sites.columns.values: dir_dec=row['dir_dec'] dir_inc="999" if 'dir_inc' in sites.columns.values: dir_inc=row['dir_inc'] dir_alpha95="-1" if 'dir_alpha95' in sites.columns.values: dir_alpha95=row['dir_alpha95'] dir_k="-1" if 'dir_k' in sites.columns.values: dir_k=row['dir_k'] vdm=-1 if 'vdm' in sites.columns.values: vdm=float(row['vdm']) vdm=vdm/1e22 vdm_sigma=-1 if 'vdm_sigma' in sites.columns.values: vdm_sigma=float(row['vdm_sigma']) vdm_sigma=vdm_sigma/1e22 # Could try and get sample names from samples table (using Contribution object) but just taking the list # if it exists for now. sample_list="-1" if 'samples' in sites.columns.values: sample_list=row['samples'] # c_csv is in GEOMAGIA insert. What it is I don't know. Max said set to 0 c_csv='0' # This place_id is SITE_ID in GEOMAGIA place_id="0" location=row['location'] if 'state_province' in locations.columns.values: place=locations.loc[location,'state_province'] if place != "": place_id=pmag.vocab_convert(place,'GEOMAGIA') if place_id == "0": if 'country' in locations.columns.values: place=locations.loc[location,'country'] if place != "": place_id=pmag.vocab_convert(place,'GEOMAGIA') if place_id == "0": if 'continent_ocean' in locations.columns.values: place_id=locations.loc[location,'continent_ocean'] if place != "": place_id=pmag.vocab_convert(place,'GEOMAGIA') site=row['site'] dt=dateutil.parser.parse(timestamp) description="-1" if 'description' in sites.columns.values: description=row['description'] if age_BP <= 50000: print("0",int_n_samples,int_n_specimens,int_n_total_specimens,int_abs,int_abs_sigma,age,age_min,age_max,"1",age_error_type,lat,lon,vadm,vadm_sigma,place_id,paleointensity_procedure,alteration_monitor,multidomain_check,anisotropy_correction,cooling_rate,demag_method,"0","0",demag_analysis,specimen_shape,materials,doi,"-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1",geochron_codes,dir_n_samples,dir_n_samples,dir_n_total_samples,dir_dec,dir_inc,dir_alpha95,dir_k,vdm,vdm_sigma,sample_list,c_csv,location,site,"-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1","-1",dt.year,dt.month,contributor,"-1,-1",description,sep=',')
def main(): """ NAME quick_hyst.py DESCRIPTION makes plots of hysteresis data SYNTAX quick_hyst.py [command line options] OPTIONS -h prints help message and quits -f: specify input file, default is measurements.txt -spc SPEC: specify specimen name to plot and quit -sav save all plots and quit -fmt [png,svg,eps,jpg] """ args = sys.argv if "-h" in args: print(main.__doc__) sys.exit() plots = 0 pltspec = "" verbose = pmagplotlib.verbose #version_num = pmag.get_version() dir_path = pmag.get_named_arg('-WD', '.') dir_path = os.path.realpath(dir_path) meas_file = pmag.get_named_arg('-f', 'measurements.txt') fmt = pmag.get_named_arg('-fmt', 'png') if '-sav' in args: verbose = 0 plots = 1 if '-spc' in args: ind = args.index("-spc") pltspec = args[ind + 1] verbose = 0 plots = 1 # con = cb.Contribution(dir_path, read_tables=['measurements'], custom_filenames={'measurements': meas_file}) # get as much name data as possible (used for naming plots) if not 'measurements' in con.tables: print("-W- No measurement file found") return con.propagate_location_to_measurements() if 'measurements' not in con.tables: print(main.__doc__) print('bad file') sys.exit() meas_container = con.tables['measurements'] #meas_df = meas_container.df # # initialize some variables # define figure numbers for hyst,deltaM,DdeltaM curves HystRecs = [] HDD = {} HDD['hyst'] = 1 pmagplotlib.plot_init(HDD['hyst'], 5, 5) # # get list of unique experiment names and specimen names # sids = [] hyst_data = meas_container.get_records_for_code('LP-HYS') #experiment_names = hyst_data['experiment_name'].unique() if not len(hyst_data): print("-W- No hysteresis data found") return sids = hyst_data['specimen'].unique() # if 'treat_temp' is provided, use that value, otherwise assume 300 hyst_data['treat_temp'].where(hyst_data['treat_temp'].notnull(), '300', inplace=True) # start at first specimen, or at provided specimen ('-spc') k = 0 if pltspec != "": try: print(sids) k = list(sids).index(pltspec) except ValueError: print('-W- No specimen named: {}.'.format(pltspec)) print('-W- Please provide a valid specimen name') return intlist = ['magn_moment', 'magn_volume', 'magn_mass'] while k < len(sids): locname, site, sample, synth = '', '', '', '' s = sids[k] if verbose: print(s, k + 1, 'out of ', len(sids)) # B, M for hysteresis, Bdcd,Mdcd for irm-dcd data B, M = [], [] # get all measurements for this specimen spec = hyst_data[hyst_data['specimen'] == s] # get names if 'location' in spec: locname = spec['location'].iloc[0] if 'site' in spec: site = spec['sample'].iloc[0] if 'sample' in spec: sample = spec['sample'].iloc[0] # get all records with non-blank values in any intlist column # find intensity data for int_column in intlist: if int_column in spec.columns: int_col = int_column break meas_data = spec[spec[int_column].notnull()] if len(meas_data) == 0: break # c = ['k-', 'b-', 'c-', 'g-', 'm-', 'r-', 'y-'] cnum = 0 Temps = [] xlab, ylab, title = '', '', '' Temps = meas_data['treat_temp'].unique() for t in Temps: print('working on t: ', t) t_data = meas_data[meas_data['treat_temp'] == t] m = int_col B = t_data['meas_field_dc'].astype(float).values M = t_data[m].astype(float).values # now plot the hysteresis curve(s) # if len(B) > 0: B = numpy.array(B) M = numpy.array(M) if t == Temps[-1]: xlab = 'Field (T)' ylab = m title = 'Hysteresis: ' + s if t == Temps[0]: pmagplotlib.clearFIG(HDD['hyst']) pmagplotlib.plot_xy(HDD['hyst'], B, M, sym=c[cnum], xlab=xlab, ylab=ylab, title=title) pmagplotlib.plot_xy(HDD['hyst'], [1.1 * B.min(), 1.1 * B.max()], [0, 0], sym='k-', xlab=xlab, ylab=ylab, title=title) pmagplotlib.plot_xy(HDD['hyst'], [0, 0], [1.1 * M.min(), 1.1 * M.max()], sym='k-', xlab=xlab, ylab=ylab, title=title) if verbose and not set_env.IS_WIN: pmagplotlib.draw_figs(HDD) cnum += 1 if cnum == len(c): cnum = 0 # files = {} if plots: if pltspec != "": s = pltspec for key in list(HDD.keys()): if pmagplotlib.isServer: if synth == '': files[ key] = "LO:_" + locname + '_SI:_' + site + '_SA:_' + sample + '_SP:_' + s + '_TY:_' + key + '_.' + fmt else: files[ key] = 'SY:_' + synth + '_TY:_' + key + '_.' + fmt else: if synth == '': filename = '' for item in [locname, site, sample, s, key]: if item: item = item.replace(' ', '_') filename += item + '_' if filename.endswith('_'): filename = filename[:-1] filename += ".{}".format(fmt) files[key] = filename else: files[key] = "{}_{}.{}".format(synth, key, fmt) pmagplotlib.save_plots(HDD, files) if pltspec != "": sys.exit() if verbose: pmagplotlib.draw_figs(HDD) ans = input( "S[a]ve plots, [s]pecimen name, [q]uit, <return> to continue\n " ) if ans == "a": files = {} for key in list(HDD.keys()): if pmagplotlib.isServer: # use server plot naming convention files[ key] = "LO:_" + locname + '_SI:_' + site + '_SA:_' + sample + '_SP:_' + s + '_TY:_' + key + '_.' + fmt else: # use more readable plot naming convention filename = '' for item in [locname, site, sample, s, key]: if item: item = item.replace(' ', '_') filename += item + '_' if filename.endswith('_'): filename = filename[:-1] filename += ".{}".format(fmt) files[key] = filename pmagplotlib.save_plots(HDD, files) if ans == '': k += 1 if ans == "p": del HystRecs[-1] k -= 1 if ans == 'q': print("Good bye") sys.exit() if ans == 's': keepon = 1 specimen = input( 'Enter desired specimen name (or first part there of): ') while keepon == 1: try: k = sids.index(specimen) keepon = 0 except: tmplist = [] for qq in range(len(sids)): if specimen in sids[qq]: tmplist.append(sids[qq]) print(specimen, " not found, but this was: ") print(tmplist) specimen = input('Select one or try again\n ') k = sids.index(specimen) else: k += 1 if len(B) == 0: if verbose: print('skipping this one - no hysteresis data') k += 1
def main(): """ NAME make_magic_plots.py DESCRIPTION inspects magic directory for available data and makes plots SYNTAX make_magic_plots.py [command line options] INPUT magic files OPTIONS -h prints help message and quits -f FILE specifies input file name -fmt [png,eps,svg,jpg,pdf] specify format, default is png """ if '-h' in sys.argv: print(main.__doc__) sys.exit() # reset log files for fname in ['log.txt', 'errors.txt']: f = os.path.join(os.getcwd(), fname) if os.path.exists(f): os.remove(f) image_recs = [] dirlist = ['./'] dir_path = os.getcwd() # if '-fmt' in sys.argv: ind = sys.argv.index("-fmt") fmt = sys.argv[ind + 1] else: fmt = 'png' if '-f' in sys.argv: ind = sys.argv.index("-f") filelist = [sys.argv[ind + 1]] else: filelist = os.listdir(dir_path) ## initialize some variables samp_file = 'samples.txt' meas_file = 'measurements.txt' #loc_key = 'location' loc_file = 'locations.txt' method_key = 'method_codes' dec_key = 'dir_dec' inc_key = 'dir_inc' tilt_corr_key = "dir_tilt_correction" aniso_tilt_corr_key = "aniso_tilt_correction" hyst_bcr_key = "hyst_bcr" hyst_mr_key = "hyst_mr_moment" hyst_ms_key = "hyst_ms_moment" hyst_bc_key = "hyst_bc" Mkeys = ['magnitude', 'magn_moment', 'magn_volume', 'magn_mass'] results_file = 'sites.txt' hyst_file = 'specimens.txt' aniso_file = 'specimens.txt' # create contribution and propagate data throughout full_con = cb.Contribution() full_con.propagate_location_to_measurements() full_con.propagate_location_to_specimens() full_con.propagate_location_to_samples() if not full_con.tables: print('-E- No MagIC tables could be found in this directory') error_log("No MagIC tables found") return # try to get the contribution id for error logging con_id = "" if 'contribution' in full_con.tables: if 'id' in full_con.tables['contribution'].df.columns: con_id = full_con.tables['contribution'].df.iloc[0]['id'] # check to see if propagation worked, otherwise you can't plot by location lowest_table = None for table in full_con.ancestry: if table in full_con.tables: lowest_table = table break do_full_directory = False # check that locations propagated down to the lowest table in the contribution if 'location' in full_con.tables[lowest_table].df.columns: if 'locations' not in full_con.tables: info_log( 'location names propagated to {}, but could not be validated'. format(lowest_table)) # are there any locations in the lowest table? elif not all(full_con.tables[lowest_table].df['location'].isnull()): locs = full_con.tables['locations'].df.index.unique() lowest_locs = full_con.tables[lowest_table].df['location'].unique() incorrect_locs = set(lowest_locs).difference(set(locs)) # are they actual locations? if not incorrect_locs: info_log( 'location names propagated to {}'.format(lowest_table)) else: do_full_directory = True error_log( 'location names did not propagate fully to {} table (looks like there are some naming inconsistencies between tables)' .format(lowest_table), con_id=con_id) else: do_full_directory = True error_log( 'could not propagate location names down to {} table'.format( lowest_table), con_id=con_id) else: do_full_directory = True error_log('could not propagate location names down to {} table'.format( lowest_table), con_id=con_id) all_data = {} all_data['measurements'] = full_con.tables.get('measurements', None) all_data['specimens'] = full_con.tables.get('specimens', None) all_data['samples'] = full_con.tables.get('samples', None) all_data['sites'] = full_con.tables.get('sites', None) all_data['locations'] = full_con.tables.get('locations', None) if 'locations' in full_con.tables: locations = full_con.tables['locations'].df.index.unique() else: locations = [''] dirlist = [ loc for loc in locations if cb.not_null(loc, False) and loc != 'nan' ] if not dirlist: dirlist = ["./"] if do_full_directory: dirlist = ["./"] # plot the whole contribution as one location if dirlist == ["./"]: error_log('plotting the entire contribution as one location', con_id=con_id) for fname in os.listdir("."): if fname.endswith(".txt"): shutil.copy(fname, "tmp_" + fname) # if possible, go through all data by location # use tmp_*.txt files to separate out by location for loc in dirlist: print('\nworking on: ', loc) def get_data(dtype, loc_name): """ Extract data of type dtype for location loc_name. Write tmp_dtype.txt files if possible. """ if cb.not_null(all_data[dtype], False): data_container = all_data[dtype] if loc_name == "./": data_df = data_container.df else: # awkward workaround for chars like "(" and "?" that break in regex try: data_df = data_container.df[data_container.df[ 'location'].astype(str).str.contains(loc_name, na=False)] except: #sre_constants.error: data_df = data_container.df[ data_container.df['location'] == loc_name] data = data_container.convert_to_pmag_data_list(df=data_df) res = data_container.write_magic_file( 'tmp_{}.txt'.format(dtype), df=data_df) if not res: return [], [] return data, data_df return [], [] meas_data, meas_df = get_data('measurements', loc) spec_data, spec_df = get_data('specimens', loc) samp_data, samp_df = get_data('samples', loc) site_data, site_df = get_data('sites', loc) loc_data, loc_df = get_data('locations', loc) con = cb.Contribution(read_tables=[]) con.tables['measurements'] = cb.MagicDataFrame(df=meas_df, dtype="measurements") con.tables['specimens'] = cb.MagicDataFrame(df=spec_df, dtype="specimens") con.tables['samples'] = cb.MagicDataFrame(df=samp_df, dtype="samples") con.tables['sites'] = cb.MagicDataFrame(df=site_df, dtype="sites") con.tables['locations'] = cb.MagicDataFrame(df=loc_df, dtype="locations") if loc == "./": # if you can't sort by location, do everything together con = full_con try: meas_data = con.tables[ 'measurements'].convert_to_pmag_data_list() except KeyError: meas_data = None try: spec_data = con.tables['specimens'].convert_to_pmag_data_list() except KeyError: spec_data = None try: samp_data = con.tables['samples'].convert_to_pmag_data_list() except KeyError: samp_data = None try: site_data = con.tables['sites'].convert_to_pmag_data_list() except KeyError: site_data = None crd = 's' if 'samples' in con.tables: if 'azimuth' in con.tables['samples'].df.columns: if any(con.tables['samples'].df['azimuth'].dropna()): crd = 'g' if crd == 's': print('using specimen coordinates') else: print('using geographic coordinates') if meas_file in filelist and meas_data: # start with measurement data print('working on plotting measurements data') data = meas_data file_type = 'measurements' # looking for zeq_magic possibilities # get all non blank method codes AFZrecs = pmag.get_dictitem(data, method_key, 'LT-AF-Z', 'has') # get all non blank method codes TZrecs = pmag.get_dictitem(data, method_key, 'LT-T-Z', 'has') # get all non blank method codes MZrecs = pmag.get_dictitem(data, method_key, 'LT-M-Z', 'has') # get all dec measurements Drecs = pmag.get_dictitem(data, dec_key, '', 'F') # get all inc measurements Irecs = pmag.get_dictitem(data, inc_key, '', 'F') for key in Mkeys: Mrecs = pmag.get_dictitem(data, key, '', 'F') # get intensity data if len(Mrecs) > 0: break # potential for stepwise demag curves if len(AFZrecs) > 0 or len(TZrecs) > 0 or len(MZrecs) > 0 and len( Drecs) > 0 and len(Irecs) > 0 and len(Mrecs) > 0: #CMD = 'zeq_magic.py -f tmp_measurements.txt -fsp tmp_specimens.txt -fsa tmp_samples.txt -fsi tmp_sites.txt -sav -fmt ' + fmt + ' -crd ' + crd + " -new" CMD = "ipmag.zeq_magic(crd={}, n_plots='all', contribution={}, image_records=True)".format( crd, con) print(CMD) info_log(CMD, loc) res, outfiles, zeq_images = ipmag.zeq_magic(crd=crd, n_plots='all', contribution=con, image_records=True) image_recs.extend(zeq_images) # looking for thellier_magic possibilities if len(pmag.get_dictitem(data, method_key, 'LP-PI-TRM', 'has')) > 0: #CMD = 'thellier_magic.py -f tmp_measurements.txt -fsp tmp_specimens.txt -sav -fmt ' + fmt CMD = "ipmag.thellier_magic(n_specs='all', fmt='png', contribution={}, image_records=True)".format( con) print(CMD) info_log(CMD, loc) res, outfiles, thellier_images = ipmag.thellier_magic( n_specs='all', fmt="png", contribution=con, image_records=True) image_recs.extend(thellier_images) # looking for hysteresis possibilities if len(pmag.get_dictitem(data, method_key, 'LP-HYS', 'has')) > 0: # find hyst experiments # check for reqd columns missing = check_for_reqd_cols(data, ['treat_temp']) if missing: error_log( 'LP-HYS method code present, but required column(s) [{}] missing' .format(", ".join(missing)), loc, "quick_hyst.py", con_id=con_id) else: #CMD = 'quick_hyst.py -f tmp_measurements.txt -sav -fmt ' + fmt CMD = "ipmag.quick_hyst(fmt='png', n_plots='all', contribution={}, image_records=True)".format( con) print(CMD) info_log(CMD, loc) res, outfiles, quick_hyst_recs = ipmag.quick_hyst( fmt="png", n_plots='all', contribution=con, image_records=True) image_recs.extend(quick_hyst_recs) # equal area plots of directional data # at measurement level (by specimen) if data: missing = check_for_reqd_cols(data, ['dir_dec', 'dir_inc']) if not missing: #CMD = "eqarea_magic.py -f tmp_measurements.txt -obj spc -sav -no-tilt -fmt " + fmt CMD = "ipmag.eqarea_magic(fmt='png', n_plots='all', ignore_tilt=True, plot_by='spc', contribution={}, source_table='measurements', image_records=True)".format( con) print(CMD) info_log(CMD, loc, "eqarea_magic.py") res, outfiles, eqarea_spc_images = ipmag.eqarea_magic( fmt="png", n_plots='all', ignore_tilt=True, plot_by="spc", contribution=con, source_table="measurements", image_records=True) image_recs.extend(eqarea_spc_images) else: if VERBOSE: print('-I- No measurement data found') # site data if results_file in filelist and site_data: print('-I- result file found', results_file) data = site_data file_type = 'sites' print('-I- working on site directions') print('number of datapoints: ', len(data), loc) dec_key = 'dir_dec' inc_key = 'dir_inc' int_key = 'int_abs' SiteDIs = pmag.get_dictitem(data, dec_key, "", 'F') # find decs SiteDIs = pmag.get_dictitem(SiteDIs, inc_key, "", 'F') # find decs and incs dir_data_found = len(SiteDIs) print('{} Dec/inc pairs found'.format(dir_data_found)) if SiteDIs: # then convert tilt_corr_key to correct format old_SiteDIs = SiteDIs SiteDIs = [] for rec in old_SiteDIs: if tilt_corr_key not in rec: rec[tilt_corr_key] = "0" # make sure tilt_corr_key is a correct format try: rec[tilt_corr_key] = str(int(float( rec[tilt_corr_key]))) except ValueError: rec[tilt_corr_key] = "0" SiteDIs.append(rec) print('number of individual directions: ', len(SiteDIs)) # tilt corrected coordinates SiteDIs_t = pmag.get_dictitem(SiteDIs, tilt_corr_key, '100', 'T', float_to_int=True) print('number of tilt corrected directions: ', len(SiteDIs_t)) SiteDIs_g = pmag.get_dictitem( SiteDIs, tilt_corr_key, '0', 'T', float_to_int=True) # geographic coordinates print('number of geographic directions: ', len(SiteDIs_g)) SiteDIs_s = pmag.get_dictitem( SiteDIs, tilt_corr_key, '-1', 'T', float_to_int=True) # sample coordinates print('number of sample directions: ', len(SiteDIs_s)) SiteDIs_x = pmag.get_dictitem(SiteDIs, tilt_corr_key, '', 'T') # no coordinates print('number of no coordinates directions: ', len(SiteDIs_x)) if len(SiteDIs_t) > 0 or len(SiteDIs_g) > 0 or len( SiteDIs_s) > 0 or len(SiteDIs_x) > 0: CRD = "" if len(SiteDIs_t) > 0: CRD = ' -crd t' crd = "t" elif len(SiteDIs_g) > 0: CRD = ' -crd g' crd = "g" elif len(SiteDIs_s) > 0: CRD = ' -crd s' crd = "s" #CMD = 'eqarea_magic.py -f tmp_sites.txt -fsp tmp_specimens.txt -fsa tmp_samples.txt -flo tmp_locations.txt -sav -fmt ' + fmt + CRD CMD = "ipmag.eqarea_magic(crd={}, fmt='png', n_plots='all', contribution={}, source_table='sites')".format( crd, con) print(CMD) info_log(CMD, loc) res, outfiles, eqarea_site_recs = ipmag.eqarea_magic( crd=crd, fmt="png", n_plots='all', contribution=con, source_table="sites", image_records=True) image_recs.extend(eqarea_site_recs) else: if dir_data_found: error_log( '{} dec/inc pairs found, but no equal area plots were made' .format(dir_data_found), loc, "equarea_magic.py", con_id=con_id) # print('-I- working on VGP map') VGPs = pmag.get_dictitem(SiteDIs, 'vgp_lat', "", 'F') # are there any VGPs? if len(VGPs) > 0: # YES! #CMD = 'vgpmap_magic.py -f tmp_sites.txt -prj moll -res c -sym ro 5 -sav -fmt png' CMD = "ipmag.vgpmap_magic(proj='moll', sym='ro', size=5, fmt='png', contribution={})".format( con) print(CMD) info_log(CMD, loc, 'vgpmap_magic.py') res, outfiles, vgpmap_recs = ipmag.vgpmap_magic( proj='moll', sym='ro', size=5, fmt="png", contribution=con, image_records=True) image_recs.extend(vgpmap_recs) else: print('-I- No vgps found') print('-I- Look for intensities') # is there any intensity data? if site_data: if int_key in site_data[0].keys(): # old way, wasn't working right: #CMD = 'magic_select.py -key ' + int_key + ' 0. has -F tmp1.txt -f tmp_sites.txt' Selection = pmag.get_dictkey(site_data, int_key, dtype="f") selection = [i * 1e6 for i in Selection if i != 0] loc = loc.replace(" ", "_") if loc == "./": loc_name = "" else: loc_name = loc histfile = 'LO:_' + loc_name + \ '_TY:_intensities_histogram:_.' + fmt CMD = "histplot.py -twin -b 1 -xlab 'Intensity (uT)' -sav -f intensities.txt -F " + histfile CMD = "ipmag.histplot(data=selection, outfile=histfile, xlab='Intensity (uT)', binsize=1, norm=-1, save_plots=True)".format( histfile) info_log(CMD, loc) print(CMD) ipmag.histplot(data=selection, outfile=histfile, xlab="Intensity (uT)", binsize=1, norm=-1, save_plots=True) histplot_rec = { 'file': histfile, 'type': 'Other', 'title': 'Intensity histogram', 'software_packages': version.version, 'keywords': "", 'timestamp': datetime.date.today().isoformat() } image_recs.append(histplot_rec) else: print('-I- No intensities found') else: print('-I- No intensities found') ## if hyst_file in filelist and spec_data: print('working on hysteresis', hyst_file) data = spec_data file_type = 'specimens' hdata = pmag.get_dictitem(data, hyst_bcr_key, '', 'F') hdata = pmag.get_dictitem(hdata, hyst_mr_key, '', 'F') hdata = pmag.get_dictitem(hdata, hyst_ms_key, '', 'F') # there are data for a dayplot hdata = pmag.get_dictitem(hdata, hyst_bc_key, '', 'F') if len(hdata) > 0: CMD = "ipmag.dayplot_magic(save=True, fmt='png', contribution={}, image_records=True)".format( con) info_log(CMD, loc) print(CMD) res, outfiles, dayplot_recs = ipmag.dayplot_magic( save=True, fmt='png', contribution=con, image_records=True) image_recs.extend(dayplot_recs) else: print('no hysteresis data found') if aniso_file in filelist and spec_data: # do anisotropy plots if possible print('working on anisotropy', aniso_file) data = spec_data file_type = 'specimens' # make sure there is some anisotropy data if not data: print('No anisotropy data found') elif 'aniso_s' not in data[0]: print('No anisotropy data found') else: # get specimen coordinates if aniso_tilt_corr_key not in data[0]: sdata = data else: sdata = pmag.get_dictitem(data, aniso_tilt_corr_key, '-1', 'T', float_to_int=True) # get specimen coordinates gdata = pmag.get_dictitem(data, aniso_tilt_corr_key, '0', 'T', float_to_int=True) # get specimen coordinates tdata = pmag.get_dictitem(data, aniso_tilt_corr_key, '100', 'T', float_to_int=True) if len(sdata) > 3: CMD = "ipmag.aniso_magic(iboot=0, ihext=1, crd='s', fmt='png', contribution={})".format( con) print(CMD) info_log(CMD, loc) res, files, aniso_recs = ipmag.aniso_magic( iboot=0, ihext=1, crd="s", fmt="png", contribution=con, image_records=True) image_recs.extend(aniso_recs) if len(gdata) > 3: CMD = "ipmag.aniso_magic(iboot=0, ihext=1, crd='g', fmt='png', contribution={})".format( con) print(CMD) info_log(CMD, loc) res, files, aniso_recs = ipmag.aniso_magic( iboot=0, ihext=1, crd="g", fmt="png", contribution=con, image_records=True) image_recs.extend(aniso_recs) if len(tdata) > 3: CMD = "ipmag.aniso_magic(iboot=0, ihext=1, crd='g', fmt='png', contribution={})".format( con) print(CMD) info_log(CMD, loc) res, files, aniso_recs = ipmag.aniso_magic( iboot=0, ihext=1, crd="t", fmt="png", contribution=con, image_records=True) image_recs.extend(aniso_recs) # remove temporary files for fname in glob.glob('tmp*.txt'): os.remove(fname) # now we need full contribution data if loc_file in filelist and loc_data: #data, file_type = pmag.magic_read(loc_file) # read in location data data = loc_data print('-I- working on pole map') poles = pmag.get_dictitem(data, 'pole_lat', "", 'F') # are there any poles? poles = pmag.get_dictitem(poles, 'pole_lon', "", 'F') # are there any poles? if len(poles) > 0: # YES! CMD = 'polemap_magic.py -sav -fmt png -rev gv 40' CMD = 'ipmag.polemap_magic(flip=True, rsym="gv", rsymsize=40, fmt="png", contribution={})'.format( full_con) print(CMD) info_log(CMD, "all locations", "polemap_magic.py") res, outfiles, polemap_recs = ipmag.polemap_magic( flip=True, rsym="gv", rsymsize=40, fmt="png", contribution=full_con, image_records=True) image_recs.extend(polemap_recs) else: print('-I- No poles found') if image_recs: new_image_file = os.path.join(dir_path, 'new_images.txt') old_image_file = os.path.join(dir_path, 'images.txt') pmag.magic_write(new_image_file, image_recs, 'images') if os.path.exists(old_image_file): ipmag.combine_magic([old_image_file, new_image_file], outfile=old_image_file, magic_table="images", dir_path=dir_path) else: os.rename(new_image_file, old_image_file) if set_env.isServer: thumbnails.make_thumbnails(dir_path)
def main(): """ NAME bryson_xpeem_measurements.py DESCRIPTION converts James Bruson XPEEM files into a MagIC format measurement file SYNTAX bryson_xpeem_measurements.py [command line options] OPTIONS -h: prints the help message and quits. -d DIRECTORY: specify directory where the XPEEM files are located, otherwise current directory is used. -s: set the starting measurement sequence number. Default:1 """ if '-h' in sys.argv: # check if help is needed print(main.__doc__) sys.exit() # graceful quit if '-s' in sys.argv: ind = sys.argv.index('-s') sequence = int(sys.argv[ind + 1]) print("-s =", sys.argv[ind + 1]) else: sequence = 1 if '-d' in sys.argv: ind = sys.argv.index('-d') dir_name = sys.argv[ind + 1] else: dir_name = "" if dir_name != "": os.chdir(dir_name) file_list = os.listdir() print(file_list) else: file_list = os.listdir() x_spacing = 9.488e-9 y_spacing = 9.709e-9 md = cb.Contribution() #md stands for magic file data location_data = [{ 'location': 'Portales Valley Meteorite', 'location_type': 'Meteorite', 'geologic_classes': 'Meteorite', 'lithologies': 'H Ordinary Chondrite', 'lat_s': '0', 'lat_n': '0', 'lon_w': '0', 'lon_e': '0', 'age': '4.5', 'age_unit': 'Ga' }] md.add_magic_table_from_data('locations', location_data) md.write_table_to_file('locations') siteA_data = [{ 'site': 'Interface A', 'location': 'Portales Valley Meteorite', 'result_type': 'i', 'result_quality': 'g', 'method_codes': 'GM-CC', 'citations': '10.1029/2019JE005951', 'geologic_classes': 'Meteorite', 'lithologies': 'H Ordinary Chondrite', 'geologic_types': 'Meteorite', 'lat': '0', 'lon': '0', 'age': '4.5', 'age_unit': 'Ga', 'int_abs': '0.000019', 'int_abs_sigma': '0.000006' }] siteB_data = [{ 'site': 'Interface B', 'location': 'Portales Valley Meteorite', 'result_type': 'i', 'result_quality': 'g', 'method_codes': 'GM-CC', 'citations': '10.1029/2019JE005951', 'geologic_classes': 'Meteorite', 'lithologies': 'H Ordinary Chondrite', 'geologic_types': 'Meteorite', 'lat': '0', 'lon': '0', 'age': '4.5', 'age_unit': 'Ga', 'int_abs': '0.000009', 'int_abs_sigma': '0.0000035' }] md.add_magic_table_from_data('sites', siteA_data + siteB_data) md.write_table_to_file('sites') samp_names = [] samps = [] specs = [] for file in file_list: file_dir = file[:-9] mf = open(file_dir + 'measurements.txt', 'w') mf.write("tab\tmeasurements\n") mf.write( 'measurement\texperiment\tspecimen\tsequence\tstandard\tquality\tmethod_codes\tcitations\tderived_value\tmeas_pos_x\tmeas_pos_y\n' ) print("file=", file) site = file[2:3] print('site=', site) spec_name = file_dir samp_name = file[:5] print('samp_name=', samp_name) if samp_name not in samp_names: samp_names.append(samp_name) samp = { 'sample': samp_name, 'site': 'Interface ' + site, 'result_type': 'i', 'result_quality': 'g', ' method_codes': 'GM-CC', 'citations': '10.1029/2019JE005951', 'geologic_classes': 'Meteorite', 'lithologies': 'H Ordinary Chondrite', 'geologic_types': 'Meteorite' } samps.append(samp) spec = { 'specimen': spec_name, 'sample': samp_name, 'result_quality': 'g', 'method_codes': 'GM-CC', 'citations': '10.1029/2019JE005951', 'geologic_classes': 'Meteorite', 'lithologies': 'H Ordinary Chondrite', 'geologic_types': 'Meteorite' } specs.append(spec) m = open(dir_name + file, 'r') line = m.readline() y = 0 while line != "": # print(line) values = line.split('\t') x = 0 for value in values: # print('value=',value,' x=',x,' y=',y) # print('sequence=',sequence) if value == '': value = '0' if value == '\t': value = '0' mf.write(spec_name + str(x) + str(y) + '\t' + spec_name + '_xpeem\t' + spec_name + '\t' + str(sequence) + '\tu\tg\tGM-CC\t10.1029/2019JE005951\tXPEEM,' + str(int(value)) + ',10.1088/1742-6596/430/1/012127\t' + str(x * x_spacing) + '\t' + str(y * y_spacing) + '\n') # print('measurement=',measurement) x += 1 sequence += 1 y += 1 line = m.readline() print("file_dir", file_dir) mf.close() md.add_magic_table_from_data('samples', samps) md.write_table_to_file('samples') md.add_magic_table_from_data('specimens', specs) md.write_table_to_file('specimens') # md.add_magic_table_from_data('measurements',measurements) # md.write_table_to_file('measurements') # sys.command('upload_magic.py') print("end")
def main(): """ NAME zeq_magic.py DESCRIPTION reads in magic_measurements formatted file, makes plots of remanence decay during demagnetization experiments. Reads in prior interpretations saved in a pmag_specimens formatted file [and allows re-interpretations of best-fit lines and planes and saves (revised or new) interpretations in a pmag_specimens file. interpretations are saved in the coordinate system used. Also allows judicious editting of measurements to eliminate "bad" measurements. These are marked as such in the magic_measurements input file. they are NOT deleted, just ignored. ] Bracketed part not yet implemented SYNTAX zeq_magic.py [command line options] OPTIONS -h prints help message and quits -f MEASFILE: sets measurements format input file, default: measurements.txt -fsp SPECFILE: sets specimens format file with prior interpreations, default: specimens.txt -fsa SAMPFILE: sets samples format file sample=>site information, default: samples.txt -fsi SITEFILE: sets sites format file with site=>location informationprior interpreations, default: samples.txt -Fp PLTFILE: sets filename for saved plot, default is name_type.fmt (where type is zijd, eqarea or decay curve) -crd [s,g,t]: sets coordinate system, g=geographic, t=tilt adjusted, default: specimen coordinate system -spc SPEC plots single specimen SPEC, saves plot with specified format with optional -dir settings and quits -dir [L,P,F][beg][end]: sets calculation type for principal component analysis, default is none beg: starting step for PCA calculation end: ending step for PCA calculation [L,P,F]: calculation type for line, plane or fisher mean must be used with -spc option -fmt FMT: set format of saved plot [png,svg,jpg] -A: suppresses averaging of replicate measurements, default is to average -sav: saves all plots without review SCREEN OUTPUT: Specimen, N, a95, StepMin, StepMax, Dec, Inc, calculation type """ # initialize some variables doave, e, b = 1, 0, 0 # average replicates, initial end and beginning step intlist = ['magn_moment', 'magn_volume', 'magn_mass', 'magnitude'] plots, coord = 0, 's' noorient = 0 version_num = pmag.get_version() verbose = pmagplotlib.verbose calculation_type, fmt = "", "svg" spec_keys = [] geo, tilt, ask = 0, 0, 0 PriorRecs = [] # empty list for prior interpretations backup = 0 specimen = "" # can skip everything and just plot one specimen with bounds e,b if '-h' in sys.argv: print(main.__doc__) sys.exit() dir_path = pmag.get_named_arg("-WD", default_val=os.getcwd()) meas_file = pmag.get_named_arg("-f", default_val="measurements.txt") spec_file = pmag.get_named_arg("-fsp", default_val="specimens.txt") samp_file = pmag.get_named_arg("-fsa", default_val="samples.txt") site_file = pmag.get_named_arg("-fsi", default_val="sites.txt") #meas_file = os.path.join(dir_path, meas_file) #spec_file = os.path.join(dir_path, spec_file) #samp_file = os.path.join(dir_path, samp_file) #site_file = os.path.join(dir_path, site_file) plot_file = pmag.get_named_arg("-Fp", default_val="") crd = pmag.get_named_arg("-crd", default_val="s") if crd == "s": coord = "-1" elif crd == "t": coord = "100" else: coord = "0" saved_coord = coord fmt = pmag.get_named_arg("-fmt", "svg") specimen = pmag.get_named_arg("-spc", default_val="") #if specimen: # just save plot and exit # plots, verbose = 1, 0 beg_pca, end_pca = "", "" if '-dir' in sys.argv: ind = sys.argv.index('-dir') direction_type = sys.argv[ind + 1] beg_pca = int(sys.argv[ind + 2]) end_pca = int(sys.argv[ind + 3]) if direction_type == 'L': calculation_type = 'DE-BFL' if direction_type == 'P': calculation_type = 'DE-BFP' if direction_type == 'F': calculation_type = 'DE-FM' if '-A' in sys.argv: doave = 0 if '-sav' in sys.argv: plots, verbose = 1, 0 # first_save = 1 fnames = { 'measurements': meas_file, 'specimens': spec_file, 'samples': samp_file, 'sites': site_file } contribution = cb.Contribution( dir_path, custom_filenames=fnames, read_tables=['measurements', 'specimens', 'samples', 'sites']) # # import specimens if 'measurements' not in contribution.tables: print('-W- No measurements table found in your working directory') return specimen_cols = [ 'analysts', 'aniso_ftest', 'aniso_ftest12', 'aniso_ftest23', 'aniso_s', 'aniso_s_mean', 'aniso_s_n_measurements', 'aniso_s_sigma', 'aniso_s_unit', 'aniso_tilt_correction', 'aniso_type', 'aniso_v1', 'aniso_v2', 'aniso_v3', 'citations', 'description', 'dir_alpha95', 'dir_comp', 'dir_dec', 'dir_inc', 'dir_mad_free', 'dir_n_measurements', 'dir_tilt_correction', 'experiments', 'geologic_classes', 'geologic_types', 'hyst_bc', 'hyst_bcr', 'hyst_mr_moment', 'hyst_ms_moment', 'int_abs', 'int_b', 'int_b_beta', 'int_b_sigma', 'int_corr', 'int_dang', 'int_drats', 'int_f', 'int_fvds', 'int_gamma', 'int_mad_free', 'int_md', 'int_n_measurements', 'int_n_ptrm', 'int_q', 'int_rsc', 'int_treat_dc_field', 'lithologies', 'meas_step_max', 'meas_step_min', 'meas_step_unit', 'method_codes', 'sample', 'software_packages', 'specimen' ] if 'specimens' in contribution.tables: contribution.propagate_name_down('sample', 'measurements') # add location/site info to measurements table for naming plots if pmagplotlib.isServer: contribution.propagate_name_down('site', 'measurements') contribution.propagate_name_down('location', 'measurements') spec_container = contribution.tables['specimens'] if 'method_codes' not in spec_container.df.columns: spec_container.df['method_codes'] = None prior_spec_data = spec_container.get_records_for_code( 'LP-DIR', strict_match=False ) # look up all prior directional interpretations # # tie sample names to measurement data # else: spec_container, prior_spec_data = None, [] # # import samples for orientation info # if 'samples' in contribution.tables: samp_container = contribution.tables['samples'] samps = samp_container.df samp_data = samps.to_dict( 'records' ) # convert to list of dictionaries for use with get_orient else: samp_data = [] #if ('samples' in contribution.tables) and ('specimens' in contribution.tables): # # contribution.propagate_name_down('site','measurements') # contribution.propagate_cols(col_names=[ # 'azimuth', 'dip', 'orientation_quality','bed_dip','bed_dip_direction'], target_df_name='measurements', source_df_name='samples') ## # define figure numbers for equal area, zijderveld, # and intensity vs. demagnetiztion step respectively # ZED = {} ZED['eqarea'], ZED['zijd'], ZED['demag'] = 1, 2, 3 pmagplotlib.plot_init(ZED['eqarea'], 6, 6) pmagplotlib.plot_init(ZED['zijd'], 6, 6) pmagplotlib.plot_init(ZED['demag'], 6, 6) # save_pca=0 angle, direction_type, setangle = "", "", 0 # create measurement dataframe # meas_container = contribution.tables['measurements'] meas_data = meas_container.df # meas_data = meas_data[meas_data['method_codes'].str.contains( 'LT-NO|LT-AF-Z|LT-T-Z|LT-M-Z') == True] # fish out steps for plotting meas_data = meas_data[meas_data['method_codes'].str.contains( 'AN|ARM|LP-TRM|LP-PI-ARM') == False] # strip out unwanted experiments intensity_types = [ col_name for col_name in meas_data.columns if col_name in intlist ] intensity_types = [ col_name for col_name in intensity_types if any(meas_data[col_name]) ] if not len(intensity_types): print('-W- No intensity columns found') return # plot first non-empty intensity method found - normalized to initial value anyway - # doesn't matter which used int_key = intensity_types[0] # get all the non-null intensity records of the same type meas_data = meas_data[meas_data[int_key].notnull()] if 'quality' not in meas_data.columns: meas_data['quality'] = 'g' # set the default flag to good # need to treat LP-NO specially for af data, treatment should be zero, # otherwise 273. #meas_data['treatment'] = meas_data['treat_ac_field'].where( # cond=meas_data['treat_ac_field'] != 0, other=meas_data['treat_temp']) meas_data['treatment'] = meas_data['treat_ac_field'].where( cond=meas_data['treat_ac_field'].astype(bool), other=meas_data['treat_temp']) meas_data['ZI'] = 1 # initialize these to one meas_data['instrument_codes'] = "" # initialize these to blank # for unusual case of microwave power.... if 'treat_mw_power' in meas_data.columns: meas_data.loc[ (meas_data.treat_mw_power != 0) & (meas_data.treat_mw_power) & (meas_data.treat_mw_time), 'treatment'] = meas_data.treat_mw_power * meas_data.treat_mw_time # # get list of unique specimen names from measurement data # # this is a list of all the specimen names specimen_names = meas_data.specimen.unique() specimen_names = specimen_names.tolist() specimen_names.sort() # # set up new DataFrame for this sessions specimen interpretations # data_container = cb.MagicDataFrame(dtype='specimens', columns=specimen_cols) # this is for interpretations from this session current_spec_data = data_container.df if specimen == "": k = 0 else: k = specimen_names.index(specimen) # let's look at the data now while k < len(specimen_names): mpars = {"specimen_direction_type": "Error"} # set the current specimen for plotting this_specimen = specimen_names[k] # reset beginning/end pca if plotting more than one specimen if not specimen: beg_pca, end_pca = "", "" if verbose and this_specimen != "": print(this_specimen, k + 1, 'out of ', len(specimen_names)) if setangle == 0: angle = "" this_specimen_measurements = meas_data[ meas_data['specimen'].str.contains(this_specimen).astype( bool)] # fish out this specimen this_specimen_measurements = this_specimen_measurements[ -this_specimen_measurements['quality'].str.contains('b').astype( bool)] # remove bad measurements if len(this_specimen_measurements) != 0: # if there are measurements meas_list = this_specimen_measurements.to_dict( 'records') # get a list of dictionaries this_sample = "" if coord != '-1' and 'sample' in meas_list[0].keys( ): # look up sample name this_sample = pmag.get_dictitem(meas_list, 'specimen', this_specimen, 'T') if len(this_sample) > 0: this_sample = this_sample[0]['sample'] # # set up datablock [[treatment,dec, inc, int, direction_type],[....]] # # # figure out the method codes # units, methods, title = "", "", this_specimen if pmagplotlib.isServer: try: loc = this_specimen_measurements.loc[:, 'location'].values[0] except: loc = "" try: site = this_specimen_measurements.loc[:, 'site'].values[0] except: site = "" try: samp = this_specimen_measurements.loc[:, 'sample'].values[0] except: samp = "" title = "LO:_{}_SI:_{}_SA:_{}_SP:_{}_".format( loc, site, samp, this_specimen) # this is a list of all the specimen method codes meas_meths = this_specimen_measurements.method_codes.unique() tr = pd.to_numeric(this_specimen_measurements.treatment).tolist() if any(cb.is_null(treat, False) for treat in tr): print( '-W- Missing required values in measurements.treatment for {}, skipping' .format(this_specimen)) if specimen: return k += 1 continue if set(tr) == set([0]): print( '-W- Missing required values in measurements.treatment for {}, skipping' .format(this_specimen)) if specimen: return k += 1 continue for m in meas_meths: if 'LT-AF-Z' in m and 'T' not in units: units = 'T' # units include tesla tr[0] = 0 if 'LT-T-Z' in m and 'K' not in units: units = units + ":K" # units include kelvin if 'LT-M-Z' in m and 'J' not in units: units = units + ':J' # units include joules tr[0] = 0 units = units.strip(':') # strip off extra colons if 'LP-' in m: methods = methods + ":" + m decs = pd.to_numeric(this_specimen_measurements.dir_dec).tolist() incs = pd.to_numeric(this_specimen_measurements.dir_inc).tolist() # # fix the coordinate system # # revert to original coordinate system coord = saved_coord if coord != '-1': # need to transform coordinates to geographic # get the azimuth or_info, az_type = pmag.get_orient(samp_data, this_sample, data_model=3) if 'azimuth' in or_info.keys() and cb.not_null( or_info['azimuth']): #azimuths = pd.to_numeric( # this_specimen_measurements.azimuth).tolist() #dips = pd.to_numeric(this_specimen_measurements.dip).tolist() azimuths = len(decs) * [or_info['azimuth']] dips = len(decs) * [or_info['dip']] # if azimuth/dip is missing, plot using specimen coordinates instead else: azimuths, dips = [], [] if any([cb.is_null(az) for az in azimuths if az != 0]): coord = '-1' print("-W- Couldn't find azimuth and dip for {}".format( this_specimen)) print(" Plotting with specimen coordinates instead") elif any([cb.is_null(dip) for dip in dips if dip != 0]): coord = '-1' print("-W- Couldn't find azimuth and dip for {}".format( this_specimen)) print(" Plotting with specimen coordinates instead") else: coord = saved_coord # if azimuth and dip were found, continue with geographic coordinates if coord != "-1" and len(azimuths) > 0: dirs = [decs, incs, azimuths, dips] # this transposes the columns and rows of the list of lists dirs_geo = np.array(list(map(list, list(zip(*dirs))))) decs, incs = pmag.dogeo_V(dirs_geo) if coord == '100' and 'bed_dip_direction' in or_info.keys( ) and or_info[ 'bed_dip_direction'] != "": # need to do tilt correction too bed_dip_dirs = len(decs) * [ or_info['bed_dip_direction'] ] bed_dips = len(decs) * [or_info['bed_dip']] #bed_dip_dirs = pd.to_numeric( # this_specimen_measurements.bed_dip_direction).tolist() # get the azimuths #bed_dips = pd.to_numeric( # this_specimen_measurements.bed_dip).tolist() # get the azimuths dirs = [decs, incs, bed_dip_dirs, bed_dips] ## this transposes the columns and rows of the list of lists dirs_tilt = np.array(list(map(list, list(zip(*dirs))))) decs, incs = pmag.dotilt_V(dirs_tilt) if pmagplotlib.isServer: title = title + "CO:_t_" else: title = title + '_t' else: if pmagplotlib.isServer: title = title + "CO:_g_" else: title = title + '_g' if angle == "": angle = decs[0] ints = pd.to_numeric(this_specimen_measurements[int_key]).tolist() ZI = this_specimen_measurements.ZI.tolist() flags = this_specimen_measurements.quality.tolist() codes = this_specimen_measurements.instrument_codes.tolist() datalist = [tr, decs, incs, ints, ZI, flags, codes] # this transposes the columns and rows of the list of lists datablock = list(map(list, list(zip(*datalist)))) pmagplotlib.plot_zed(ZED, datablock, angle, title, units) if verbose and not set_env.IS_WIN: pmagplotlib.draw_figs(ZED) # # collect info for current_specimen_interpretation dictionary # # # find prior interpretation # prior_specimen_interpretations = [] if len(prior_spec_data): prior_specimen_interpretations = prior_spec_data[ prior_spec_data['specimen'].str.contains( this_specimen) == True] if (beg_pca == "") and (len(prior_specimen_interpretations) != 0): if len(prior_specimen_interpretations) > 0: beg_pcas = pd.to_numeric(prior_specimen_interpretations. meas_step_min.values).tolist() end_pcas = pd.to_numeric(prior_specimen_interpretations. meas_step_max.values).tolist() spec_methods = prior_specimen_interpretations.method_codes.tolist( ) # step through all prior interpretations and plot them for ind in range(len(beg_pcas)): spec_meths = spec_methods[ind].split(':') for m in spec_meths: if 'DE-BFL' in m: calculation_type = 'DE-BFL' # best fit line if 'DE-BFP' in m: calculation_type = 'DE-BFP' # best fit plane if 'DE-FM' in m: calculation_type = 'DE-FM' # fisher mean if 'DE-BFL-A' in m: calculation_type = 'DE-BFL-A' # anchored best fit line if len(beg_pcas) != 0: try: # getting the starting and ending points start, end = tr.index(beg_pcas[ind]), tr.index( end_pcas[ind]) mpars = pmag.domean(datablock, start, end, calculation_type) except ValueError: print( '-W- Specimen record contains invalid start/stop bounds:' ) mpars['specimen_direction_type'] = "Error" # calculate direction/plane if mpars["specimen_direction_type"] != "Error": # put it on the plot pmagplotlib.plot_dir(ZED, mpars, datablock, angle) if verbose and not set_env.IS_WIN: pmagplotlib.draw_figs(ZED) ### SKIP if no prior interpretation - this section should not be used: # else: # try: # start, end = int(beg_pca), int(end_pca) # except ValueError: # beg_pca = 0 # end_pca = len(datablock) - 1 # start, end = int(beg_pca), int(end_pca) # # # calculate direction/plane # try: # mpars = pmag.domean(datablock, start, end, calculation_type) # except Exception as ex: # print('-I- Problem with {}'.format(this_specimen)) # print(' ', ex) # print(' Skipping') # continue # k += 1 # if mpars["specimen_direction_type"] != "Error": # # put it on the plot # pmagplotlib.plot_dir(ZED, mpars, datablock, angle) # if verbose: # pmagplotlib.draw_figs(ZED) if plots == 1 or specimen != "": if plot_file == "": basename = title else: basename = plot_file files = {} for key in list(ZED.keys()): files[key] = basename + '_' + key + '.' + fmt if pmagplotlib.isServer: files[key] = basename + "TY:_{}_.".format(key) + fmt pmagplotlib.save_plots(ZED, files) if specimen != "": sys.exit() if verbose: recnum = 0 for plotrec in datablock: if units == 'T': print('%s: %i %7.1f %s %8.3e %7.1f %7.1f %s' % (plotrec[5], recnum, plotrec[0] * 1e3, " mT", plotrec[3], plotrec[1], plotrec[2], plotrec[6])) if units == "K": print('%s: %i %7.1f %s %8.3e %7.1f %7.1f %s' % (plotrec[5], recnum, plotrec[0] - 273, ' C', plotrec[3], plotrec[1], plotrec[2], plotrec[6])) if units == "J": print('%s: %i %7.1f %s %8.3e %7.1f %7.1f %s' % (plotrec[5], recnum, plotrec[0], ' J', plotrec[3], plotrec[1], plotrec[2], plotrec[6])) if 'K' in units and 'T' in units: if plotrec[0] >= 1.: print('%s: %i %7.1f %s %8.3e %7.1f %7.1f %s' % (plotrec[5], recnum, plotrec[0] - 273, ' C', plotrec[3], plotrec[1], plotrec[2], plotrec[6])) if plotrec[0] < 1.: print('%s: %i %7.1f %s %8.3e %7.1f %7.1f %s' % (plotrec[5], recnum, plotrec[0] * 1e3, " mT", plotrec[3], plotrec[1], plotrec[2], plotrec[6])) recnum += 1 # we have a current interpretation elif mpars["specimen_direction_type"] != "Error": # # create a new specimen record for the interpreation for this # specimen this_specimen_interpretation = { col: "" for col in specimen_cols } # this_specimen_interpretation["analysts"]=user this_specimen_interpretation['software_packages'] = version_num this_specimen_interpretation['specimen'] = this_specimen this_specimen_interpretation["method_codes"] = calculation_type this_specimen_interpretation["meas_step_unit"] = units this_specimen_interpretation["meas_step_min"] = tr[start] this_specimen_interpretation["meas_step_max"] = tr[end] this_specimen_interpretation["dir_dec"] = '%7.1f' % ( mpars['specimen_dec']) this_specimen_interpretation["dir_inc"] = '%7.1f' % ( mpars['specimen_inc']) this_specimen_interpretation["dir_dang"] = '%7.1f' % ( mpars['specimen_dang']) this_specimen_interpretation["dir_n_measurements"] = '%i' % ( mpars['specimen_n']) this_specimen_interpretation["dir_tilt_correction"] = coord methods = methods.replace(" ", "") if "T" in units: methods = methods + ":LP-DIR-AF" if "K" in units: methods = methods + ":LP-DIR-T" if "J" in units: methods = methods + ":LP-DIR-M" this_specimen_interpretation["method_codes"] = methods.strip( ':') this_specimen_interpretation[ "experiments"] = this_specimen_measurements.experiment.unique( )[0] # # print some stuff # if calculation_type != 'DE-FM': this_specimen_interpretation["dir_mad_free"] = '%7.1f' % ( mpars['specimen_mad']) this_specimen_interpretation["dir_alpha95"] = '' if verbose: if units == 'K': print( '%s %i %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %s \n' % (this_specimen_interpretation["specimen"], int(this_specimen_interpretation[ "dir_n_measurements"]), float(this_specimen_interpretation[ "dir_mad_free"]), float( this_specimen_interpretation["dir_dang"]), float(this_specimen_interpretation[ "meas_step_min"]) - 273, float(this_specimen_interpretation[ "meas_step_max"]) - 273, float( this_specimen_interpretation["dir_dec"]), float( this_specimen_interpretation["dir_inc"]), calculation_type)) elif units == 'T': print( '%s %i %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %s \n' % (this_specimen_interpretation["specimen"], int(this_specimen_interpretation[ "dir_n_measurements"]), float(this_specimen_interpretation[ "dir_mad_free"]), float( this_specimen_interpretation["dir_dang"]), float(this_specimen_interpretation[ "meas_step_min"]) * 1e3, float(this_specimen_interpretation[ "meas_step_max"]) * 1e3, float( this_specimen_interpretation["dir_dec"]), float( this_specimen_interpretation["dir_inc"]), calculation_type)) elif 'T' in units and 'K' in units: if float(this_specimen_interpretation[ 'meas_step_min']) < 1.0: min = float(this_specimen_interpretation[ 'meas_step_min']) * 1e3 else: min = float(this_specimen_interpretation[ 'meas_step_min']) - 273 if float(this_specimen_interpretation[ 'meas_step_max']) < 1.0: max = float(this_specimen_interpretation[ 'meas_step_max']) * 1e3 else: max = float(this_specimen_interpretation[ 'meas_step_max']) - 273 print( '%s %i %7.1f %i %i %7.1f %7.1f %7.1f %s \n' % (this_specimen_interpretation["specimen"], int(this_specimen_interpretation[ "dir_n_measurements"]), float(this_specimen_interpretation[ "dir_mad_free"]), float( this_specimen_interpretation["dir_dang"]), min, max, float( this_specimen_interpretation["dir_dec"]), float( this_specimen_interpretation["dir_inc"]), calculation_type)) else: print( '%s %i %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %s \n' % (this_specimen_interpretation["specimen"], int(this_specimen_interpretation[ "dir_n_measurements"]), float(this_specimen_interpretation[ "dir_mad_free"]), float( this_specimen_interpretation["dir_dang"]), float(this_specimen_interpretation[ "meas_step_min"]), float(this_specimen_interpretation[ "meas_step_max"]), float( this_specimen_interpretation["dir_dec"]), float( this_specimen_interpretation["dir_inc"]), calculation_type)) else: this_specimen_interpretation["dir_alpha95"] = '%7.1f' % ( mpars['specimen_alpha95']) this_specimen_interpretation["dir_mad_free"] = '' if verbose: if 'K' in units: print( '%s %i %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %s \n' % (this_specimen_interpretation["specimen"], int(this_specimen_interpretation[ "dir_n_measurments"]), float(this_specimen_interpretation[ "dir_mad_free"]), float( this_specimen_interpretation["dir_dang"]), float(this_specimen_interpretation[ "meas_step_min"]) - 273, float(this_specimen_interpretation[ "meas_step_max"]) - 273, float( this_specimen_interpretation["dir_dec"]), float( this_specimen_interpretation["dir_inc"]), calculation_type)) elif 'T' in units: print( '%s %i %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %s \n' % (this_specimen_interpretation["specimen"], int(this_specimen_interpretation[ "dir_n_measurements"]), float(this_specimen_interpretation[ "dir_alpha95"]), float( this_specimen_interpretation["dir_dang"]), float(this_specimen_interpretation[ "meas_step_min"]) * 1e3, float(this_specimen_interpretation[ "meas_step_max"]) * 1e3, float( this_specimen_interpretation["dir_dec"]), float( this_specimen_interpretation["dir_inc"]), calculation_type)) elif 'T' in units and 'K' in units: if float(this_specimen_interpretation[ 'meas_step_min']) < 1.0: min = float(this_specimen_interpretation[ 'meas_step_min']) * 1e3 else: min = float(this_specimen_interpretation[ 'meas_step_min']) - 273 if float(this_specimen_interpretation[ 'meas_step_max']) < 1.0: max = float(this_specimen_interpretation[ 'meas_step_max']) * 1e3 else: max = float(this_specimen_interpretation[ 'meas_step_max']) - 273 print('%s %i %7.1f %i %i %7.1f %7.1f %s \n' % ( this_specimen_interpretation["specimen"], int(this_specimen_interpretation[ "dir_n_measurements"]), float( this_specimen_interpretation["dir_alpha95"] ), min, max, float(this_specimen_interpretation["dir_dec"]), float(this_specimen_interpretation["dir_inc"]), calculation_type)) else: print( '%s %i %7.1f %7.1f %7.1f %7.1f %7.1f %s \n' % (this_specimen_interpretation["specimen"], int(this_specimen_interpretation[ "dir_n_measurements"]), float(this_specimen_interpretation[ "dir_alpha95"]), float(this_specimen_interpretation[ "meas_step_min"]), float(this_specimen_interpretation[ "meas_step_max"]), float( this_specimen_interpretation["dir_dec"]), float( this_specimen_interpretation["dir_inc"]), calculation_type)) if verbose: saveit = input("Save this interpretation? [y]/n \n") else: print("no data", this_specimen) if verbose: pmagplotlib.draw_figs(ZED) #res = input(' <return> for next specimen, [q]uit ') res = input("S[a]ve plots, [q]uit, or <return> to continue ") if res == 'a': files = { plot_type: this_specimen + "_" + plot_type + "." + fmt for plot_type in ZED } pmagplotlib.save_plots(ZED, files) print("") if res == 'q': return k += 1