Esempio n. 1
0
def remove_failed_data(new_useful_tracks_file,
                       data_directory,
                       failed_data_dir="failed_data"):
    #reads in new cuts or tracks that should be kept
    nfin = open(new_useful_tracks_file, 'r')
    nfin_lines = nfin.readlines()
    nfin.close()
    ncts = [nfin_line.split('\t')[0] for nfin_line in nfin_lines]

    utl.check_dir(failed_data_dir)

    for d in [x[0] for x in os.walk(data_directory)]:
        if d == data_directory: continue
        elif not os.path.isdir(d): continue
        elif '/E' in d or '/V' in d or '/E_backup' in d or '/V_backup' in d or '/example_E' in d or '/example_V' in d:
            continue
        elif all([d not in nct for nct in ncts]):
            print(
                "moving directory %s because it is no longer being analysed" %
                d)
            shutil.move(d, os.path.join(failed_data_dir, d))

    for nct in ncts:
        if not os.path.isfile(nct):
            print(
                "ERROR: there was a problem cleaning out failed data and %s was moved despite it still being used in analysis please manually put it back"
            )
Esempio n. 2
0
def make_cande_tab(cande_path, out_dir, coord_0_360=True):
    check_dir(out_dir)
    chron_path, n = cande_path, 1
    if not os.path.isfile(chron_path):
        print("no file %s" % (chron_path))
        return
    fchron = open(chron_path, 'r')
    lines = fchron.readlines()
    fchron.close()
    entries = [[], []]
    for line in lines[1:]:
        entry = line.split()
        if entry[0] == '>':
            if len(entries[0]) < 2 or len(entries[1]) < 2:
                entries = [[], []]
                continue
            lats = entries[1]
            lons = entries[0]
            df = pd.DataFrame(np.array(entries).T, columns=['lon', 'lat'])
            df['RGB'] = ['0,255,255' for i in range(len(entries[0]))]
            df.to_csv(os.path.join(out_dir, "sz_%d.tsv" % n),
                      sep='\t',
                      index=False)
            n += 1
            entries = [[], []]
        else:
            if coord_0_360:
                entries[0].append(float(entry[0]))
                entries[1].append(float(entry[1]))
            else:
                entries[0].append(convert_to_180_180(entry[0]))
                entries[1].append(float(entry[1]))
Esempio n. 3
0
def seperate_chron_into_spreading_zones(chron_to_analyse):
    #separate chrons into the different spreading zones
    spreading_zone_files = []
    chron, chron_color = chron_to_analyse
    fchron = open("../raw_data/chrons/cande/cande.%s" % str(chron))
    string = fchron.read()
    fchron.close()
    spreading_zones = string.split('>')
    utl.check_dir('spreading_zones')
    for i, spreading_zone in enumerate(spreading_zones):
        if spreading_zone == '': continue
        headerless_spreading_zone = spreading_zone.split('\n')[1:]
        headerless_spreading_zone_string = reduce(lambda x, y: x + '\n' + y,
                                                  headerless_spreading_zone)
        fchron_out_path = os.path.join('spreading_zones',
                                       'chron%s_sz%d.txt' % (chron, i))
        fchron_out = open(fchron_out_path, 'w+')
        fchron_out.write(headerless_spreading_zone_string)
        fchron_out.close()
        spreading_zone_files.append(fchron_out_path)
    ccz, gcz = utl.get_barckhausen_2013_chrons()
    if str(chron) in ccz.keys():
        i += 1
        ccz_data = np.array([(utl.convert_to_0_360(lonlat[0]),
                              float(lonlat[1])) for lonlat in ccz[str(chron)]])
        if len(ccz_data) > 1:
            nlons = np.arange(min(ccz_data[:, 0]), max(ccz_data[:, 0]), .025)
            nlats = np.interp(nlons, ccz_data[:, 0], ccz_data[:, 1])
            out_str = reduce(lambda x, y: x + '\n' + y, [
                str(nlon) + ' ' + str(nlat)
                for nlon, nlat in zip(nlons, nlats)
            ])
            fchron_out_path = os.path.join('spreading_zones',
                                           'chron%s_sz%d.txt' % (chron, i))
            print("Barckhausen data for CCZ included in %s" % fchron_out_path)
            fchron_out = open(fchron_out_path, 'w+')
            fchron_out.write(out_str)
            fchron_out.close()
            spreading_zone_files.append(fchron_out_path)
    if str(chron) in gcz.keys():
        i += 1
        gcz_data = np.array([(utl.convert_to_0_360(lonlat[0]),
                              float(lonlat[1])) for lonlat in gcz[str(chron)]])
        if len(gcz_data) > 1:
            nlats = np.arange(min(gcz_data[:, 1]), max(gcz_data[:, 1]), .05)
            nlons = np.interp(nlats, gcz_data[:, 1], gcz_data[:, 0])
            out_str = reduce(lambda x, y: x + '\n' + y, [
                str(nlon) + ' ' + str(nlat)
                for nlon, nlat in zip(nlons, nlats)
            ])
            fchron_out_path = os.path.join('spreading_zones',
                                           'chron%s_sz%d.txt' % (chron, i))
            print("Barckhausen data for GCZ included in %s" % fchron_out_path)
            fchron_out = open(fchron_out_path, 'w+')
            fchron_out.write(out_str)
            fchron_out.close()
            spreading_zone_files.append(fchron_out_path)
    return spreading_zone_files
Esempio n. 4
0
def plot_tracks(chrons_info,
                results_directory,
                tracks=[],
                track_dir="all_tracks",
                lon_0=180,
                lat_0=0,
                cuts=False):
    """
    plots track files in tracks with chron info on a default ortho map centered on the pacific for rough estimation of intersept and places these plots in the results directory. If no tracks provided it plots them all
    """

    if tracks == []:
        tracks = glob.glob('../raw_data/hi_alt/**/*.DAT') + glob.glob(
            '../raw_data/ship/**/*.lp')

    all_tracks_dir = os.path.join(results_directory, track_dir)
    utl.check_dir(all_tracks_dir)

    #Start loop making plots
    for track in tracks:

        #Get Track Name
        track_name = os.path.basename(os.path.basename(track))
        if not cuts: track_name = track_name.split('.')[0]
        else: track_name = track_name.replace('.', '-')

        #Create Figure
        fig = plt.figure(figsize=(9, 9), dpi=80)

        #Create Map
        aero_track_map = create_basic_map(projection='ortho',
                                          center_lon=lon_0,
                                          center_lat=lat_0,
                                          fig=fig)

        #Create Chron markers
        plot_chron_info(chrons_info, aero_track_map)

        dfin = utl.open_mag_file(track)

        lats = list(map(float, dfin['lat'].tolist()))
        lons = list(map(float, dfin['lon'].tolist()))

        aero_track_handle, = aero_track_map.plot(lons,
                                                 lats,
                                                 color='k',
                                                 zorder=3,
                                                 label=track_name,
                                                 transform=ccrs.PlateCarree())

        #plot title and labels
        plt.title(track_name)
        handles = [aero_track_handle]
        add_chron_info_to_legend(chrons_info, handles)
        plt.legend(handles=handles, loc='best')

        fig.savefig(os.path.join(all_tracks_dir, track_name + ".png"))
        plt.close(fig)
Esempio n. 5
0
def get_track_intersects(chron_to_analyse,
                         tracks_or_cuts,
                         spreading_zone_files,
                         data_directory='.',
                         bounding_lats=(-90, 90),
                         bounding_lons=(0, 360),
                         e=1):
    """ This function works in 0-360 longitude because otherwise there would be a discontinuty in the Pacific the region of interest """
    chron, chron_color = chron_to_analyse
    chron_name = "chron%s" % (str(chron))
    bound_check_func = lambda x: bounding_lats[0] < float(x[
        1]) and bounding_lats[1] > float(x[1]) and bounding_lons[0] < float(x[
            0]) and bounding_lons[1] > float(x[0])
    intersecting_tracks, out_string = [], ""
    for track in tqdm(tracks_or_cuts):
        print(track)
        dft = utl.open_mag_file(track)
        if dft.empty: continue
        lt = [[utl.convert_to_0_360(lon),
               float(lat)] for lon, lat in zip(dft['lon'], dft['lat'])]
        if not list(filter(bound_check_func, lt)):
            print("track out of bounds, skipping track")
            continue

        for spreading_zone_file in spreading_zone_files:
            lsz = [[line.split()[0], line.split()[1]]
                   for line in open(spreading_zone_file).readlines()
                   if len(line.split()) > 1]
            if not list(filter(bound_check_func, lsz)): continue
            idx = intersect_bf(lt, lsz, e=e)

            if not any(idx): continue
            else:
                print("-----------intersected in bounds-------------")
                intersecting_tracks.append(track)
                out_string += "%s\t%s\t%s\n" % (track, spreading_zone_file,
                                                str(idx))
                break

    print("found %d intersecting tracks" % len(intersecting_tracks))
    utl.check_dir(data_directory)
    fout_name = os.path.join(
        data_directory,
        "usable_tracks_and_intersects_for_%s.txt" % str(chron_name))
    if os.path.isfile(fout_name):
        print("backing up %s to %s" % (fout_name, fout_name + '.bak'))
        shutil.copyfile(fout_name, fout_name + '.bak')
    fout = open(fout_name, 'w+')
    print("writing to %s" % fout_name)
    fout.write(out_string)
    fout.close()

    return intersecting_tracks, fout_name
Esempio n. 6
0
def split_m77t(h77tf, m77tf, data_directory="shipmag_data"):

    check_dir(data_directory)
    h77t_df = pd.read_csv(h77tf, sep='\t', dtype=str)
    m77t_df = pd.read_csv(m77tf, sep='\t', dtype=str)

    if "SURVEY_ID" not in m77t_df.columns:
        m77t_df = pd.read_csv(
            m77tf,
            sep='\t',
            dtype=str,
            names=[
                "SURVEY_ID", "TIMEZONE", "DATE", "TIME", "LAT", "LON",
                "POS_TYPE", "NAV_QUALCO", "BAT_TTIME", "CORR_DEPTH",
                "BAT_CPCO", "BAT_TYPCO", "BAT_QUALCO", "MAG_TOT", "MAG_TOT2",
                "MAG_RES", "MAG_RESSEN", "MAG_DICORR", "MAG_SDEPTH",
                "MAG_QUALCO", "GRA_OBS", "EOTVOS", "FREEAIR", "GRA_QUALCO",
                "LINEID", "POINTID"
            ])

    new_h77t_files, new_m77t_files = [], []
    for survey_id in h77t_df['SURVEY_ID']:
        h77t_survey_df = h77t_df[h77t_df['SURVEY_ID'] == survey_id]
        m77t_survey_df = m77t_df[m77t_df['SURVEY_ID'] == survey_id]

        if "MAG_TOT" not in m77t_survey_df.columns:
            print("magnetic data not found for %s, skipping" % survey_id)
            continue
        m77t_survey_df = m77t_survey_df[m77t_survey_df["MAG_TOT"].notnull(
        )]  #get only data with uncorrected mag
        m77t_survey_df = m77t_survey_df[m77t_survey_df["DATE"].notnull(
        )]  #get only data with dates so they can be corrected

        if m77t_survey_df.empty:
            print("no magnetic data found in %s, skipping" % survey_id)
            continue

        survey_dir = os.path.join(data_directory, survey_id)
        check_dir(survey_dir)
        h77t_survey_df.to_csv(os.path.join(survey_dir, survey_id + '.h77t'),
                              sep='\t',
                              index=False)
        m77t_survey_df.to_csv(os.path.join(survey_dir, survey_id + '.m77t'),
                              sep='\t',
                              index=False)
        new_h77t_files.append(os.path.join(survey_dir, survey_id + '.h77t'))
        new_m77t_files.append(os.path.join(survey_dir, survey_id + '.m77t'))

    return new_h77t_files, new_m77t_files
Esempio n. 7
0
def save_iso_picks(deskew_path,srp_paths):
    anom_out_str = {}
    for srp_path in srp_paths:
        iso_df,avg_lon,avg_lat = get_lon_lat_from_plot_picks_and_deskew_file(deskew_path,srp_path)
        for anom,row in iso_df.iterrows():
            if anom in anom_out_str.keys(): anom_out_str[anom] += "> %s\n"%srp_path
            else: anom_out_str[anom] = "> %s\n"%srp_path
            row = row[row.notnull()]
            for lon_lat_dict in sorted(row.tolist(),key=lambda x,y={'lat':0}: x['lat']-y['lat']):
                anom_out_str[anom] += "%.3f\t%.3f\n"%(float(lon_lat_dict['lon']),float(lon_lat_dict['lat']))
    for key in anom_out_str.keys():
        out_dir = "new_isochron_picks"
        utl.check_dir(out_dir)
        out_file = "%s_chron.txt"%key
        outf = os.path.join(out_dir,out_file)
        f_out = open(outf,"+w")
        f_out.write(anom_out_str[key])
        f_out.close()
Esempio n. 8
0
def reduce_to_pole(deskew_path, pole_lon, pole_lat, spreading_rate_path=None, anomalous_skewness_model_path=None):

    asf,srf,sz_list = get_asf_srf(spreading_rate_path,anomalous_skewness_model_path)

    deskew_df = filter_deskew_and_calc_aei(deskew_path)

    print("reducing to pole - lat: %.3f, lon: %.3f"%(pole_lat,pole_lon))

    deskew_df = reduce_dsk_df_to_pole(deskew_df, pole_lon, pole_lat, asf, srf)

    old_results_dir = deskew_df['results_dir'].iloc[0]
    new_results_dir = os.path.join(old_results_dir,"pole_%.0f_%.0f_results"%(pole_lon,pole_lat))
    utl.check_dir(new_results_dir)
    deskew_df['results_dir'] = new_results_dir

#    reduced_deskew_df = deskew_df[['comp_name','phase_shift','step','rel_amp','age_min','age_max','inter_lat','inter_lon','strike','data_dir','results_dir','track_type','sz_name','r','g','b']]

    out_path = os.path.join(os.path.dirname(deskew_path),"pole_%.0f_%.0f.deskew"%(pole_lon,pole_lat))
    print("writing to %s"%out_path)
    utl.write_deskew_file(out_path,deskew_df)
Esempio n. 9
0
def seperate_E_V(cut_tracks):
    """
    Depriciated: just moves E and V into seperate files for analysis
    """
    for cut_track in cut_tracks:
        cut_track_dir, cut_track_path = os.path.split(cut_track)
        e_dir = os.path.join(cut_track_dir, 'E')
        v_dir = os.path.join(cut_track_dir, 'V')
        if not os.path.isdir(e_dir):
            utl.check_dir(e_dir)
        if not os.path.isdir(v_dir):
            utl.check_dir(v_dir)
        e_mv_files = (cut_track_path + ".E", cut_track_path + ".Ed",
                      cut_track_path + ".Ed.lp", cut_track_path + ".Ed.xyz")
        for e_mv_file in e_mv_files:
            shutil.copyfile(os.path.join(cut_track_dir, e_mv_file),
                            os.path.join(e_dir, e_mv_file))
        v_mv_files = (cut_track_path + ".V", cut_track_path + ".Vd",
                      cut_track_path + ".Vd.lp", cut_track_path + ".Vd.xyz")
        for v_mv_file in v_mv_files:
            shutil.copyfile(os.path.join(cut_track_dir, v_mv_file),
                            os.path.join(v_dir, v_mv_file))
Esempio n. 10
0
def find_track_cuts(tracks,
                    chrons_info,
                    results_directory,
                    tolerance=1,
                    min_angle=10,
                    plot=False):

    track_cuts = {}
    utl.check_dir(os.path.join(results_directory, "turning_points"))

    for track in tracks:

        #Get Track name
        track_name = os.path.basename(track).split('.')[0]
        print(track_name)

        #Run find_corners to simplify the path and find the "significant" turns in the track
        try:
            points, simplified, idx = rdp.find_corners(
                track + ".latlon",
                tolerance=tolerance,
                min_angle=np.deg2rad(min_angle))
        except IOError:
            print("file not found: %s" % (track + ".latlon"))
            continue

        x, y = points.T  #get points of track
        sx, sy = simplified.T  #get points of simplified track
        track_cuts[track] = [[syx, sxx] for sxx, syx in zip(sx[idx], sy[idx])
                             ]  #save lon and lat of "significant" turns

        if plot:
            pg.plot_track_cuts(x, y, sx, sy, idx, chrons_info, track_name,
                               results_directory)

    return track_cuts
Esempio n. 11
0
def seperate_data(data_directory, usable_tracks_path):
    usable_tracks_file = open(usable_tracks_path, 'r')
    usable_tracks = usable_tracks_file.readlines()
    usable_tracks_file.close()
    tracks_sz_and_inter = [track.split('\t') for track in usable_tracks]

    aeromag_directory = os.path.join(data_directory, 'aero')
    shipmag_directory = os.path.join(data_directory, 'ship')
    utl.check_dir(data_directory)
    utl.check_dir(aeromag_directory)
    utl.check_dir(shipmag_directory)
    new_tracks, out_string = [], ""
    for track, sz, inter in tracks_sz_and_inter:
        track_dir, track_file = os.path.split(track)
        if 'hi_alt' in track_dir:
            new_track_dir = os.path.join(aeromag_directory,
                                         track_file.split('-')[0],
                                         track_file.split('-')[1][:-4])
        elif 'ship' in track_dir:
            new_track_dir = os.path.join(
                shipmag_directory,
                os.path.basename(track_file).split('.')[0])
        else:
            print(
                "couldn't identify if the data for track %s was ship or aeromag just sticking it in the data directory, this does rely on a specific directory structure being used which while bad works so if you want to mess with how this is done feel free."
                % track)
            new_track_dir = data_directory
        utl.check_dir(new_track_dir)
        new_track_file = os.path.join(new_track_dir, track_file)
        shutil.copyfile(track, new_track_file)
        out_string += "%s\t%s\t%s" % (new_track_file, sz, str(inter))
        new_tracks.append(new_track_file)

    #write new file paths to usable_tracks_file
    seperated_tracks_path = usable_tracks_path
    seperated_tracks_file = open(seperated_tracks_path, 'w+')
    seperated_tracks_file.write(out_string)
    seperated_tracks_file.close()

    return new_tracks, seperated_tracks_path
Esempio n. 12
0
def create_deskew_file(chron_name,results_directory,age_min,age_max,data_directory='.',phase_shift=180,step=60):
    cut_tracks_path=os.path.join(data_directory,"usable_tracks_and_intersects_for_%s.txt"%str(chron_name))
    cut_tracks_file = open(cut_tracks_path,'r')
    cut_tracks = cut_tracks_file.readlines()
    cut_tracks_file.close()
    track_sz_and_inters = [track.split('\t') for track in cut_tracks]

    track_sz_and_inters.sort()

#    dout = {'comp_name':[],'phase_shift':[],'step':[],'inter_lat':[],'inter_lon':[],'strike':[],'age_min':[],'age_max':[],'track_type':[],'data_dir':[],'results_dir':[]}
#    dfout = pd.DataFrame(columns=['comp_name','phase_shift','step','inter_lat','inter_lon','strike','age_min','age_max','track_type','data_dir','results_dir'])
    out_str="comp_name\tphase_shift\tstep\tage_min\tage_max\tinter_lat\tinter_lon\tstrike\tdata_dir\tresults_dir\ttrack_type\tsz_name\tr\tg\tb\n"
    colors,i,j = [(0, 107, 164), (255, 128, 14), (171, 171, 171), (89, 89, 89), (95, 158, 209), (200, 82, 0), (137, 137, 137), (163, 200, 236), (255, 188, 121), (207, 207, 207)],0,1
    for i in range(len(colors)):
        r, g, b = colors[i]  
        colors[i] = (r / 255., g / 255., b / 255.)
    sz_to_color,sz_to_name = {},{}
    for track,sz,inter in track_sz_and_inters:

        idx = utl.read_idx_from_string(inter)
        if 'aero' in track:
            Ef = os.path.basename(track) + '.Ed.lp'
            Vf = os.path.basename(track) + '.Vd.lp'
            comps = [Ef,Vf]
            track_type = 'aero'
        elif 'ship' in track:
            comps = [os.path.basename(track) + '.lp']
            track_type = 'ship'

        data_dir = os.path.split(track)[0]

        azsz_path = track[:-3]+'_'+track[-2:]+'_'+os.path.basename(sz)[:-4]+'.azszs'
        try:
            azsz_df = pd.read_csv(azsz_path,sep=' ')
            strike = azsz_df['strike'][0]
        except:
            strike = 180

        if sz not in sz_to_color:
            if i >= len(colors)-1: print("more spreading zones than colors, looping"); i = 0
            sz_to_color[sz] = colors[i]
            sz_to_name[sz] = "Spreading Zone %d"%j
            i += 1; j += 1

        for f in comps:
            out_str += f + "\t"
            out_str += str(phase_shift) + "\t"
            out_str += str(step) + "\t"
            out_str += str(age_min) + "\t"
            out_str += str(age_max) + "\t"
            out_str += str(idx[0][0][1]) + "\t"
            out_str += str(idx[0][0][0]) + "\t"
            out_str += str(strike) + "\t"
            out_str += str(data_dir) + "\t"
            out_str += str(results_directory) + "\t"
            out_str += track_type + "\t"
            out_str += str(sz_to_name[sz]) + "\t"
            out_str += str(sz_to_color[sz][0]) + "\t"
            out_str += str(sz_to_color[sz][1]) + "\t"
            out_str += str(sz_to_color[sz][2]) + "\n"

    utl.check_dir(data_directory)
    fout = open(os.path.join(data_directory,chron_name+'.deskew'), "w+")
    fout.write(out_str)
    fout.close()
Esempio n. 13
0
def cut_tracks_and_flip(track_cuts, data_directory, heading="east"):

    cut_tracks, flipped_data = [], []
    for track, cuts in track_cuts.items():
        print("Starting Track: %s" % track)
        directory, path = os.path.split(track)
        dfin = utl.open_mag_file(track)
        #        fin = open(track,'r')
        #        lines = fin.readlines()
        #        fin.close()
        #        lines = [line.split() for line in lines]
        #        dfin = pd.DataFrame(lines,columns=["time","lat","lon","n_comp","s_comp","h_comp","v_comp","mag","dec","inc","None","alt"])
        lats = list(map(float, dfin['lat'].tolist()))
        lons = list(map(utl.convert_to_0_360, dfin['lon'].tolist()))
        df_segments = []
        for cut in cuts:
            try:
                cut_index = [[lon, lat] for lon, lat in zip(lons, lats)
                             ].index([utl.convert_to_0_360(cut[0]), cut[1]])
            except ValueError as e:
                import pdb
                pdb.set_trace()
            print("cutting track: %s along index: %d" % (track, cut_index))
            df_segments.append(dfin.loc[:cut_index])
            dfin = dfin.loc[cut_index:]
        df_segments.append(dfin)
        i = 1
        for df_segment in df_segments:
            if len(df_segment) == 0: continue
            if heading == 'east':
                flip_bool = (utl.convert_to_0_360(df_segment['lon'].iloc[0]) >
                             utl.convert_to_0_360(df_segment['lon'].iloc[-1])
                             )  #is heading easterly
            elif heading == 'west':
                flip_bool = (utl.convert_to_0_360(df_segment['lon'].iloc[0]) <
                             utl.convert_to_0_360(df_segment['lon'].iloc[-1])
                             )  #is heading westerly
            elif heading == 'north':
                flip_bool = (
                    df_segment['lat'].iloc[0] > df_segment['lat'].iloc[-1]
                )  #is heading northerly
            elif heading == 'south':
                flip_bool = (
                    df_segment['lat'].iloc[0] < df_segment['lat'].iloc[-1]
                )  #is heading southerly
            else:
                print(
                    "the heading provided is not a cardinal direction please rerun with this corrected"
                )
                return
            if flip_bool:
                print(
                    "flipping data for cut: %d track: %s such that path is %serly and thus (hopefully) oldest data first"
                    % (i, path, heading))
                df_segment = df_segment.iloc[::-1]
                flipped_data.append(track.split('.')[0] + '.c%d' % i)
            if not os.path.isdir(os.path.join(directory, 'c%d' % i)):
                print("making directory %s" %
                      os.path.join(directory, 'c%d' % i))
                utl.check_dir(os.path.join(directory, 'c%d' % i))
            segment_path = os.path.join(directory, 'c%d' % i,
                                        path.split('.')[0] + '.c%d' % i)
            i += 1
            print("writing: %s" % segment_path)
            df_segment.to_csv(segment_path,
                              sep='\t',
                              header=False,
                              index=False)
            cut_tracks.append(segment_path)
    f_flipped = open(os.path.join(data_directory, "flipped_data.txt"), 'w+')
    f_flipped.write(reduce(lambda x, y: x + '\n' + y, flipped_data))
    f_flipped.close()
    return cut_tracks, flipped_data
Esempio n. 14
0
def plot_az_strike(track, spreading_zone_file, idx, az, strike, chron_color,
                   chron_name, results_directory, fout_name):

    #Create Figure
    fig = plt.figure(figsize=(9, 9), dpi=80)

    #Create Chron markers
    dft = utl.open_mag_file(track)
    lt = [[utl.convert_to_0_360(lon), float(lat)]
          for lon, lat in zip(dft['lon'], dft['lat'])]
    at = np.array(lt)
    lsz = [
        list(map(float, line.split()))
        for line in open(spreading_zone_file).readlines()
    ]
    asz = np.array(lsz)

    #Create Map
    #            gcm = create_basic_map() #uses defaults, hit shift-tab in parens to see what they are

    llcrnrlon = min(at[:, 0]) - 20 if min(at[:, 0]) - 20 > 0 else 0
    llcrnrlat = min(at[:, 1]) - 20 if min(at[:, 1]) - 20 > -89 else -89
    urcrnrlon = max(at[:, 0]) + 20 if max(at[:, 0]) + 20 < 360 else 360
    urcrnrlat = max(at[:, 1]) + 20 if max(at[:, 1]) + 20 < 89 else 89

    gcm = create_basic_map(projection='merc',
                           llcrnrlat=llcrnrlat,
                           urcrnrlat=urcrnrlat,
                           llcrnrlon=llcrnrlon,
                           urcrnrlon=urcrnrlon,
                           fig=fig)

    sz_handle, = gcm.plot(asz[:, 0],
                          asz[:, 1],
                          color=chron_color,
                          zorder=1,
                          label=chron_name,
                          transform=ccrs.PlateCarree())

    gcm_handle, = gcm.plot(at[:, 0],
                           at[:, 1],
                           color='k',
                           zorder=2,
                           label=os.path.basename(track),
                           transform=ccrs.PlateCarree())

    gcm.scatter(at[idx[1][0]][0],
                at[idx[1][0]][1],
                color='g',
                marker='o',
                s=10,
                zorder=3,
                label='nearest intercept',
                transform=ccrs.PlateCarree())

    geodict = Geodesic(6371000., 0.).Direct(float(at[idx[1][0]][1]),
                                            float(at[idx[1][0]][0]), float(az),
                                            1000000)
    b_lon, b_lat = (360 + geodict["lon2"]) % 360, geodict["lat2"]
    gcm.arrow(b_lon,
              b_lat,
              b_lon - at[idx[1][0]][0],
              b_lat - at[idx[1][0]][1],
              fc="white",
              ec="r",
              linewidth=1,
              head_width=1,
              head_length=1,
              label='azimuth',
              transform=ccrs.PlateCarree())

    geodict = Geodesic(6371000., 0.).Direct(float(at[idx[1][0]][1]),
                                            float(at[idx[1][0]][0]),
                                            float(strike), 1000000)
    b_lon, b_lat = (360 + geodict["lon2"]) % 360, geodict["lat2"]
    gcm.arrow(b_lon,
              b_lat,
              b_lon - at[idx[1][0]][0],
              b_lat - at[idx[1][0]][1],
              fc="white",
              ec="pink",
              linewidth=1,
              head_width=1,
              head_length=1,
              label='strike',
              transform=ccrs.PlateCarree())

    #plot title and labels
    plt.title(os.path.basename(track))
    plt.legend(loc='best')

    az_plots_dir = os.path.join(results_directory, "azimuth_strike_plots")
    utl.check_dir(az_plots_dir)

    fig.savefig(
        os.path.join(az_plots_dir,
                     os.path.basename(fout_name)[:-5] + "png"))
    plt.close(fig)
Esempio n. 15
0
def split_m88t(h88tf,m88tf,data_directory="aeromag_data"):
    check_dir(data_directory)
    h88t_df = pd.read_csv(h88tf,sep='\t',dtype=str)
    m88t_df = pd.read_csv(m88tf,sep='\t',dtype=str)

    data_type = ""
    if "X" in h88t_df["PARAMS_CO"].iloc[0] and "Y" in h88t_df["PARAMS_CO"].iloc[0] and "Z" in h88t_df["PARAMS_CO"].iloc[0]:
        data_type = "vector"
    elif "T" in h88t_df["PARAMS_CO"].iloc[0]:
        data_type = "total"
    else:
        raise TypeError("Could not identify the type of aeromagnetic data being prepared for analysis from the h88t PARAMS_CO header, please specify if XYZ vector data exist or if only T (total) component data exists here and try again")

    new_h88t_files,new_m88t_files,dates_list = [],[],[]
    for survey_id in h88t_df['SURVEY_ID']:
        h_survey_df = h88t_df[h88t_df['SURVEY_ID']==survey_id]
        survey_df = m88t_df[m88t_df['SURVEY_ID']==survey_id]

        survey_name = survey_id.split("WORLD")[-1].strip("_ -")
        survey_dir = os.path.join(data_directory,survey_name)
        check_dir(survey_dir)

        if len(survey_df['LINEID'].drop_duplicates()) == 1: raise RuntimeError("This method has yet to be implemented for surveys which do not have any line ids and need to be split based on changes in flight base which should be possible by calculating the distance between all points and looking for jumps in the delta distance value")

        for e in survey_df['LINEID'].drop_duplicates():
            track_name = survey_name+'_'+e
            line_df = survey_df[survey_df['LINEID']==e]

            #get avg decimal year for this flight
            dates_dict = {}
            dates = line_df[line_df['DATE'].notnull()]['DATE'].tolist()
            avg_date = sum(map(lambda x: dt_to_dec(datetime(int(x[0:4]),int(x[4:6]),int(x[6:8]))),dates))/len(dates)
            year = dates[0][0:4]
            month = dates[0][4:6]
            day = dates[0][6:8]
            dates_dict['profile'] = track_name
            dates_dict['decimal_year'] = avg_date
            dates_dict['year'] = year
            dates_dict['month'] = month
            dates_dict['day'] = day
            dates_dict['mean'] = 'mean'
            dates_list.append(dates_dict)

            if data_type=="vector":
                line_df = line_df[(line_df["MAG_X_NRTH"].notnull()) & (line_df["MAG_Y_EAST"].notnull()) & (line_df["MAG_Z_VERT"].notnull())]
            else:
                line_df = line_df[(line_df["MAG_TOTOBS"].notnull())]

            if np.isnan(e): e = "nan"

            line_f_name = os.path.join(survey_dir,track_name+".m88t")
            line_df.to_csv(line_f_name,sep='\t',index=False)
            new_m88t_file.append(line_f_name)

        h_survey_name = os.path.join(survey_dir,survey_id+".h88t")
        h_survey_df.to_csv(h_survey_name,sep='\t',index=False)
        new_h88t_files.append(h_survey_name)

    dates_df = pd.DataFrame(dates_list)
    dates_df.reindex('profile')
    dates_df.to_csv(os.path.join(survey_dir,survey_name+".dates"),sep='\t')

    return new_h88t_files,new_m88t_files,date_files