Пример #1
0
def process_directory(params):

    #parameters for processing
    input_dir = params["input_directory"]
    ws = params["subset_wavelength_range"][0]
    we = params["subset_wavelength_range"][1]
    pattern = params["grouping_pattern"]
    patt_name = params['grouping_output_name']

    #read the spectrums
    extension = {'asd': 1, 'ASD': 1}
    spec_files = [
        os.path.join(input_dir, f) for f in os.listdir(input_dir)
        if get_extension(f) in extension
    ]
    raw_specs = [SdalReader().read_spectrum(f) for f in spec_files]
    if len(raw_specs) == 0: return
    print("Data directory: {}".format(input_dir))
    print("  Read {} {} files".format(len(raw_specs), extension))

    #perform jump correction
    jumpcorrector = JumpCorrector(params["jumpcorrection_wavelengths"],
                                  params["jumpcorrection_stablezone"])
    jc_specs = [jumpcorrector.correct(s) for s in raw_specs]
    print("   Jump corrected {} spectrums(s)".format(len(jc_specs)))

    #separate into green vegetation and non-green vegetation spectra
    gvd = GreenVegDetector()
    raw_white_specs, raw_target_specs = [], []
    jc_white_specs, jc_target_specs = [], []
    ndvis, reflectance_ranges = [], []
    for (raw, jc) in zip(raw_specs, jc_specs):
        (status, ndvi, reflectance_range) = gvd.is_green_vegetation(jc)
        ndvis.append(ndvi)
        reflectance_ranges.append(reflectance_range)
        if status:
            raw_target_specs.append(raw)
            jc_target_specs.append(jc)
        else:
            raw_white_specs.append(raw)
            jc_white_specs.append(jc)
    print("   # white spectra: {}".format(len(jc_white_specs)))
    print("   # target spectra: {}".format(len(jc_target_specs)))

    #subset to desired wavelength range
    #do this for the jc_target_specs only
    proc_target_specs = [s.wavelength_subset(ws, we) for s in jc_target_specs]
    print("   Subsetted spectra to range: {}, {}".format(ws, we))

    #identify groups using SpectrumRegex and create SpectrumGroup objects
    patt_groups = []
    grps_dict = SpectrumRegex().make_groups(proc_target_specs, pattern)
    patt_groups = [SpectrumGroup().group(grps_dict[k], k) for k in grps_dict]
    print("   Grouped spectrums: {} groups".format(len(patt_groups)))

    #create output directories and save data
    save_data(input_dir, raw_white_specs, raw_target_specs, proc_target_specs,
              patt_name, patt_groups)
Пример #2
0
def save_data(out_dir, raw_white_specs, raw_target_specs, proc_target_specs,
              patt_name, patt_groups):

    from SpectrumGroup import SpectrumGroup

    #create output directories
    sodir = os.path.join(out_dir, 'sdal_outputs')
    if os.path.exists(sodir):
        shutil.rmtree(sodir)
        time.sleep(0.1)
        print("  Deleted: {}".format(sodir))
    os.mkdir(sodir)
    os.mkdir(os.path.join(sodir, 'raw_whites'))
    os.mkdir(os.path.join(sodir, 'raw_targets'))
    os.mkdir(os.path.join(sodir, 'proc_targets'))
    os.mkdir(os.path.join(sodir, 'plots'))
    os.mkdir(os.path.join(sodir, 'dataframes'))
    os.mkdir(os.path.join(sodir, patt_name + "_means"))
    os.mkdir(os.path.join(sodir, patt_name + "_medians"))
    os.mkdir(os.path.join(sodir, patt_name + "_stds"))
    print("   Created output directory sdal_outputs and its sub-directories")

    #create the list of namedtuples in order to save data and plots
    save_specs = []
    stat_groups = defaultdict(list)
    #construct named tuples for the patt_groups
    for g in patt_groups:
        ss = SaveSpec(group=g,
                      df=os.path.join(sodir, 'dataframes'),
                      plots=os.path.join(sodir, 'plots'),
                      specs="",
                      means=os.path.join(sodir, patt_name + "_means"),
                      medians=os.path.join(sodir, patt_name + "_medians"),
                      stds=os.path.join(sodir, patt_name + "_stds"))
        save_specs.append(ss)
        stat_groups['means'].append(g.mean)
        stat_groups['medians'].append(g.median)
        stat_groups['stds'].append(g.std)
    print("    Constructed SaveSpec for patt_groups")

    #construct a spectrum group and named tuple for the stat_groups spectrums
    for key in stat_groups:
        sg = SpectrumGroup().group(spectrums=stat_groups[key],
                                   group_name=patt_name + "_" + key)
        ss = SaveSpec(group=sg,
                      df=os.path.join(sodir, 'dataframes'),
                      plots=os.path.join(sodir, 'plots'),
                      specs="",
                      means="",
                      medians="",
                      stds="")
        save_specs.append(ss)
    print("    Constructed SaveSpec for stat_groups")
    #construct a spectrum group and named tuple for raw and proc spectrums
    raw_proc_groups = {
        'raw_whites': raw_white_specs,
        'raw_targets': raw_target_specs,
        'proc_targets': proc_target_specs
    }
    for key in raw_proc_groups:
        sg = SpectrumGroup().group(spectrums=raw_proc_groups[key],
                                   group_name=key)
        ss = SaveSpec(group=sg,
                      df=os.path.join(sodir, 'dataframes'),
                      plots=os.path.join(sodir, 'plots'),
                      specs=os.path.join(sodir, key),
                      means="",
                      medians="",
                      stds="")
        save_specs.append(ss)
    print("    Constructed SaveSpec for raw_proc_groups")
    #do the data output
    for ss in save_specs:
        if ss.group.is_empty():
            print("      EMPTY: {}".format(ss.group.name))
        else:
            ss.group.save_dataframe(ss.df)
            ss.group.save_plots(ss.plots)
            ss.group.save_spectrums(ss.specs)
            ss.group.save_stats(ss.means, ss.medians, ss.stds)
            print("      Saved: {}".format(ss.group.name))
Пример #3
0
def test(testsdir,
         verbose = False):
    idir = os.path.join(testsdir, "input")
    statuses = []
    testcount = 0        
 
    #TEST 1:
    atol = 0.000001   
    testcount = testcount + 1
    #create SpectrumGroup from Spectrum objects
    #--create the Spectrum objects    
    ins = []
    idtmplt = "spec{}"
    company = "csv"
    instrument = "sdal"
    waves = [500.0, 501.0, 502.0, 503.0, 504.0, 505.0, 506.0]
    gstart = 0.1134
    gstep = 0.1
    step = 0.01
    for i in range(4):
        start = gstart + i*gstep
        refls = [(start + j*step) for j in range(len(waves))]
        s = Spectrum(np.array([waves, refls], dtype = np.double).transpose(),
                     idtmplt.format(i+1),
                     company,
                     instrument)
        ins.append(s)
    #--create the SpectrumGroup object
    specgrp = SpectrumGroup(spectrums = ins)
    #--create expected data
    expdata = np.empty((len(ins), len(waves)), dtype = np.double)
    for i in range(4):
        start = gstart + i*gstep
        expdata[i, :] = [(start + j*step) for j in range(len(waves))]
    expwaves = np.array(waves, dtype = np.double)
    expidstrs = [idtmplt.format(i + 1) for i in range(len(ins))]
    expcompanys = [company]*len(ins)
    expinstruments = [instrument]*len(ins)
    #--do the comparisons
    statuses.append(np.allclose(specgrp.data, expdata, atol = atol))   
    statuses.append(np.allclose(specgrp.wavelengths, expwaves, atol = atol))
    statuses.append(specgrp.idstrs == expidstrs)
    statuses.append(specgrp.companys == expcompanys)
    statuses.append(specgrp.instruments == expinstruments)
    if not all(statuses):
        print("Failed TEST {}".format(testcount))
    
    
    #TEST 2:
    testcount = testcount + 1
    #break SpectrumGroup to form Spectrum objects
    outs = specgrp.ungroup()
    for (i, o) in zip(ins, outs):
        statuses.append(i.idstr == o.idstr)
        statuses.append(i.company == o.company)
        statuses.append(i.instrument == o.instrument)
        statuses.append(np.allclose(i.data, o.data, atol = atol))
    if not all(statuses):
        print("Failed TEST {}".format(testcount))
    
    
    #TEST 3:
    testcount = testcount + 1
    #test subsetting
    #--subset
    subgrp = specgrp.wavelength_subset(502.0, 505.0)
    #--create expected data
    expdata = np.array([[ 0.1334, 0.1434, 0.1534, 0.1634],
                        [ 0.2334, 0.2434, 0.2534, 0.2634],
                        [ 0.3334, 0.3434, 0.3534, 0.3634],
                        [ 0.4334, 0.4434, 0.4534, 0.4634]], dtype = np.double)
    expwaves = np.array([502, 503, 504, 505], dtype = np.double)
    #--do comparisons
    statuses.append(np.allclose(subgrp.data, expdata, atol = atol))   
    statuses.append(np.allclose(subgrp.wavelengths, expwaves, atol = atol))
    statuses.append(subgrp.idstrs == expidstrs)
    statuses.append(subgrp.companys == expcompanys)
    statuses.append(subgrp.instruments == expinstruments)
    if not all(statuses):
        print("Failed TEST {}".format(testcount))
        
    
    #TEST 4:
    testcount = testcount + 1
    #test reading from csv and writing to csv
    #--read input.csv    
    ringrp = SpectrumGroup(filename = os.path.join(idir, "input_test4.csv"))
    #--create expected data
    gstart = 0.1134
    gstep = 0.1
    step = 0.01
    expdata = np.empty((4, 7), dtype = np.double)
    for i in range(4):
        start = gstart + i*gstep
        expdata[i, :] = [(start + j*step) for j in range(len(waves))]
    waves = [500.0, 501.0, 502.0, 503.0, 504.0, 505.0, 506.0]
    expwaves = np.array(waves, dtype = np.double)
    expidstrs = ["spec1", "spec2", "spec3", "spec4"]
    expcompanys = ["csv"]*4
    expinstruments = ["sdal"]*4
    #--do the comparisons
    statuses.append(np.allclose(ringrp.data, expdata, atol = atol))   
    statuses.append(np.allclose(ringrp.wavelengths, expwaves, atol = atol))
    statuses.append(ringrp.idstrs == expidstrs)
    statuses.append(ringrp.companys == expcompanys)
    statuses.append(ringrp.instruments == expinstruments)
    if not all(statuses):
        print("Failed TEST {}".format(testcount))
        
    #TEST 5:
    testcount = testcount + 1
    #read input/csv file
    ringrp = SpectrumGroup(filename = os.path.join(idir, "input_test5.csv"))
    #compute stats
    statsgrp = ringrp.compute_stats()
    #ungroup into spectrums
    statspec = statsgrp.ungroup()
    #expected data
    mean = np.array([0.2634, 0.2734, 0.2834, 0.2934, 0.3034, 0.3134, 0.3234], 
                    dtype = np.double)
    std = np.array([0.12909944, 0.12909944, 0.12909944, 0.12909944, 
                    0.12909944, 0.12909944, 0.12909944], dtype = np.double)
    median = np.array([0.2634, 0.2734, 0.2834, 0.2934, 0.3034, 0.3134, 0.3234],
                      dtype = np.double)
    expdata = np.vstack((mean, std, median))
    waves = [500.0, 501.0, 502.0, 503.0, 504.0, 505.0, 506.0]
    expwaves = np.array(waves, dtype = np.double)
    expidstrs = ["sdal_mean", "sdal_std", "sdal_median"]
    expcompany = "asd+csv+tmp"
    expinstrument = "sdal+sdal1+sdal2"
    #do the comparisons
    for (i, s) in enumerate(statspec):
        statuses.append(np.allclose(s.wavelengths, expwaves))
        statuses.append(np.allclose(s.reflectances, expdata[i, :]))
        statuses.append(s.idstr == expidstrs[i])
        statuses.append(s.company == expcompany)
        statuses.append(s.instrument == expinstrument)
    if not all(statuses):
        print("Failed TEST {}".format(testcount))
   
    
    #status message
    if all(statuses):
        print("{}: PASSED".format(__file__))
    else:
        print("{}: FAILED".format(__file__))
def process_directory(params):

    #parameters for processing
    input_dir = params["input_directory"]
    ws = params["subset_wavelength_range"][0]
    we = params["subset_wavelength_range"][1]
    pattern = params["grouping_pattern"]
    patt_name = params['grouping_output_name']

    #read the spectrums
    extension = 'sed'
    spec_files = [
        os.path.join(input_dir, f) for f in os.listdir(input_dir)
        if get_extension(f) == extension
    ]
    raw_specs = [SdalReader().read_spectrum(f) for f in spec_files]
    if len(raw_specs) == 0: return
    print("Data directory: {}".format(input_dir))
    print("  Read {} {} files".format(len(raw_specs), extension))

    #uniquify wavelengths, monotone them and interpolate to 1nm
    ohr = OverlapHandler(rstype='cubic')
    rs_specs = []
    for s in raw_specs:
        if ohr.is_1nm(s):
            rs_specs.append(s)
        else:
            rs_specs.append(ohr.process_overlap(s))
    print("  Uniquified and monotoned spectrums")

    #separate into green vegetation and non-green vegetation spectra
    gvd = GreenVegDetector()
    raw_white_specs, raw_target_specs = [], []
    rs_white_specs, rs_target_specs = [], []
    ndvis, reflectance_ranges = [], []
    for (raw, rs) in zip(raw_specs, rs_specs):
        (status, ndvi, reflectance_range) = gvd.is_green_vegetation(rs)
        ndvis.append(ndvi)
        reflectance_ranges.append(reflectance_range)
        if status:
            raw_target_specs.append(raw)
            rs_target_specs.append(rs)
        else:
            raw_white_specs.append(raw)
            rs_white_specs.append(rs)
    print("   # white spectra: {}".format(len(rs_white_specs)))
    print("   # target spectra: {}".format(len(rs_target_specs)))

    #subset to desired wavelength range
    #do this for the jc_target_specs only
    proc_target_specs = [s.wavelength_subset(ws, we) for s in rs_target_specs]
    print("   Subsetted spectra to range: {}, {}".format(ws, we))

    #identify groups using SpectrumRegex and create SpectrumGroup objects
    grps_dict = SpectrumRegex().make_groups(proc_target_specs, pattern)
    patt_groups = [SpectrumGroup().group(grps_dict[k], k) for k in grps_dict]
    print("   Grouped spectrums: {} groups".format(len(patt_groups)))

    #create output directories and save data
    save_data(input_dir, raw_white_specs, raw_target_specs, proc_target_specs,
              patt_name, patt_groups)
Пример #5
0
def process(params):
#    params.print_params()
    #get the project params and verify
    project = params.get_params("project")
    if project:
        verify_project(project)
    else:
        print("--project is required")
        sys.exit(0)
    #get the resampling params
    resampling = params.get_params("resampling")
    #get the jumpcorrection params and verify
    jumpcorrection = params.get_params("jumpcorrection")
    if jumpcorrection:
        verify_jumpcorrection(jumpcorrection)
    
    #get the groupings and verify them
    groupings = {grp:params.get_params(grp) for grp in params.get_groups()}
    verify_groupings(params.default_group, params.get_groups(), groupings)

    tags = ["raw", params.default_group]
    specs = defaultdict(list)
    #specs["raw"] created
    #get the filenames
    allfiles = os.listdir(project["indir"])
    extfiles = []
    for f in allfiles:
        ext = get_directory_filename_extension(f)[2]
        if ext == project["fileext"]:
            extfiles.append(os.path.join(project["indir"], f))
    #read the raw spectrums
    uniquifier = WaveUniquifier()
    rawspecs = [SdalReader().read_spectrum(f) for f in extfiles]
    uniqspecs = [uniquifier.uniquify(s) for s in rawspecs]
    specs["raw"] = uniqspecs
    
    #specs["preproc"] created
    #do the pre-processing
    prepspecs = specs["raw"]
    if resampling:
        resampler = WaveResampler(rstype = resampling["type"],
                                  wavestart = resampling["range"][0],
                                  wavestop = resampling["range"][1],
                                  spacing = resampling["spacing"])
        rsspecs = [resampler.resample(s) for s in prepspecs]
        prepspecs = rsspecs
    if jumpcorrection:
        corrector = JumpCorrector(jumpcorrection["wavelengths"],
                                  jumpcorrection["stablezone"])
        jcspecs = [corrector.correct(s) for s in prepspecs]
        prepspecs = jcspecs
    #detect the references
    refdet = ReferenceDetector(context = "gveg")
    nonrefs = []
    refs = []
    for s in prepspecs:
        if refdet.is_reference(s):
            refs.append(s)
        else:
            nonrefs.append(s)
    specs[params.default_group] = nonrefs
    
    #specs[group_tag] created
    #do the grouping 
    for t in groupings:
        tags.append(t)
        itag = groupings[t]["intag"]
        patt = groupings[t]["pattern"]
        regex = SpectrumRegex()
        tgrps = regex.make_groups(specs[itag], patt)
        for tg in tgrps:
            sg = SpectrumGroup(spectrums = tgrps[tg])
            ms = sg.mean_spectrum()
            ms.idstr = tg
            specs[t].append(ms)

#    subsets = {grp:params.get_params(grp) for grp in params.get_subsets()}
#    print(subsets)
#    for t in subsets:
#        itag = subsets[t]["intag"]
#        otag = subsets[t]["outtag"]
#        wavestart = subsets[t]["range"][0]
#        wavestop = subsets[t]["range"][1]
#        for s in specs[itag]:
#            subspec = s.wavelength_subset(wavestart, wavestop)
#            subspec.idstr = subspec.idstr + otag
#            print("idstr = {}".format(subspec.idstr))
#            specs[otag].append(subspec)
                           
    #create outputs
    prjdir = os.path.join(project["outdir"], project["name"])
    os.mkdir(prjdir)
    for t in specs:
        tdir = os.path.join(prjdir, t)
        os.mkdir(tdir)
        tgrpfn = "___{}___.csv".format(t)                       
        for s in specs[t]:
            s.write_csv(odir = tdir)
        sg = SpectrumGroup(spectrums = specs[t])
        sg.write_csv(tdir, tgrpfn)
def process_directory(params):
    
    #parameters for processing
    input_dir = params["input_directory"]
    ws = params["subset_wavelength_range"][0]
    we = params["subset_wavelength_range"][1]
    
    #read the spectrums    
    extension = {'asd': 1, 'ASD': 1}
    spec_files = [os.path.join(input_dir, f) for f in os.listdir(input_dir) 
                                             if get_extension(f) in extension]
    raw_specs = [SdalReader().read_spectrum(f) for f in spec_files]
    if len(raw_specs) == 0: return    
    print("Data directory: {}".format(input_dir))
    print("  Read {} {} files".format(len(raw_specs), extension))
    
    
    #perform jump correction
    jumpcorrector = JumpCorrector(params["jumpcorrection_wavelengths"],
                                  params["jumpcorrection_stablezone"])
    jc_specs = [jumpcorrector.correct(s) for s in raw_specs]
    print("   Jump corrected {} spectrums(s)".format(len(jc_specs)))
    
    
    #separate into green vegetation and non-green vegetation spectra
    gvd = GreenVegDetector()
    raw_white_specs, raw_target_specs = [], []
    jc_white_specs, jc_target_specs = [], []
    ndvis, reflectance_ranges = [], []
    for (raw, jc) in zip(raw_specs, jc_specs):
        (status, ndvi, reflectance_range) = gvd.is_green_vegetation(jc)
        ndvis.append(ndvi)
        reflectance_ranges.append(reflectance_range)
        if status:
            raw_target_specs.append(raw)
            jc_target_specs.append(jc)
        else:
            raw_white_specs.append(raw)
            jc_white_specs.append(jc)
    print("   # white spectra: {}".format(len(jc_white_specs)))
    print("   # target spectra: {}".format(len(jc_target_specs)))

    
    #subset to desired wavelength range
    #do this for the jc_target_specs only
    proc_target_specs = [s.wavelength_subset(ws, we) for s in jc_target_specs]
    print("   Subsetted spectra to range: {}, {}".format(ws, we))

    
    #create SpectrumGroup object for raw_target_specs
    raw_target_sg = SpectrumGroup()
    raw_target_sg.group(raw_target_specs, "raw_target")
    raw_target_sg.save_dataframe(input_dir)
    raw_target_sg.save_plots(input_dir)

    #create SpectrumGroup object for raw_white_specs
    raw_white_sg = SpectrumGroup()
    raw_white_sg.group(raw_white_specs, "raw_white")
    raw_white_sg.save_dataframe(input_dir)
    raw_white_sg.save_plots(input_dir)

        
    #create SpectrumGroup object for proc_target_specs
    proc_target_sg = SpectrumGroup()
    proc_target_sg.group(proc_target_specs, "proc_target")
    proc_target_sg.save_dataframe(input_dir)
    proc_target_sg.save_plots(input_dir)
Пример #7
0
def test(testsdir, verbose=False):
    idir = os.path.join(testsdir, "input")
    statuses = []
    testcount = 0

    #TEST 1:
    atol = 0.000001
    testcount = testcount + 1
    #create SpectrumGroup from Spectrum objects
    #--create the Spectrum objects
    ins = []
    idtmplt = "spec{}"
    company = "csv"
    instrument = "sdal"
    waves = [500.0, 501.0, 502.0, 503.0, 504.0, 505.0, 506.0]
    gstart = 0.1134
    gstep = 0.1
    step = 0.01
    for i in range(4):
        start = gstart + i * gstep
        refls = [(start + j * step) for j in range(len(waves))]
        s = Spectrum(
            np.array([waves, refls], dtype=np.double).transpose(),
            idtmplt.format(i + 1), company, instrument)
        ins.append(s)
    #--create the SpectrumGroup object
    specgrp = SpectrumGroup(spectrums=ins)
    #--create expected data
    expdata = np.empty((len(ins), len(waves)), dtype=np.double)
    for i in range(4):
        start = gstart + i * gstep
        expdata[i, :] = [(start + j * step) for j in range(len(waves))]
    expwaves = np.array(waves, dtype=np.double)
    expidstrs = [idtmplt.format(i + 1) for i in range(len(ins))]
    expcompanys = [company] * len(ins)
    expinstruments = [instrument] * len(ins)
    #--do the comparisons
    statuses.append(np.allclose(specgrp.data, expdata, atol=atol))
    statuses.append(np.allclose(specgrp.wavelengths, expwaves, atol=atol))
    statuses.append(specgrp.idstrs == expidstrs)
    statuses.append(specgrp.companys == expcompanys)
    statuses.append(specgrp.instruments == expinstruments)
    if not all(statuses):
        print("Failed TEST {}".format(testcount))

    #TEST 2:
    testcount = testcount + 1
    #break SpectrumGroup to form Spectrum objects
    outs = specgrp.ungroup()
    for (i, o) in zip(ins, outs):
        statuses.append(i.idstr == o.idstr)
        statuses.append(i.company == o.company)
        statuses.append(i.instrument == o.instrument)
        statuses.append(np.allclose(i.data, o.data, atol=atol))
    if not all(statuses):
        print("Failed TEST {}".format(testcount))

    #TEST 3:
    testcount = testcount + 1
    #test subsetting
    #--subset
    subgrp = specgrp.wavelength_subset(502.0, 505.0)
    #--create expected data
    expdata = np.array(
        [[0.1334, 0.1434, 0.1534, 0.1634], [0.2334, 0.2434, 0.2534, 0.2634],
         [0.3334, 0.3434, 0.3534, 0.3634], [0.4334, 0.4434, 0.4534, 0.4634]],
        dtype=np.double)
    expwaves = np.array([502, 503, 504, 505], dtype=np.double)
    #--do comparisons
    statuses.append(np.allclose(subgrp.data, expdata, atol=atol))
    statuses.append(np.allclose(subgrp.wavelengths, expwaves, atol=atol))
    statuses.append(subgrp.idstrs == expidstrs)
    statuses.append(subgrp.companys == expcompanys)
    statuses.append(subgrp.instruments == expinstruments)
    if not all(statuses):
        print("Failed TEST {}".format(testcount))

    #TEST 4:
    testcount = testcount + 1
    #test reading from csv and writing to csv
    #--read input.csv
    ringrp = SpectrumGroup(filename=os.path.join(idir, "input_test4.csv"))
    #--create expected data
    gstart = 0.1134
    gstep = 0.1
    step = 0.01
    expdata = np.empty((4, 7), dtype=np.double)
    for i in range(4):
        start = gstart + i * gstep
        expdata[i, :] = [(start + j * step) for j in range(len(waves))]
    waves = [500.0, 501.0, 502.0, 503.0, 504.0, 505.0, 506.0]
    expwaves = np.array(waves, dtype=np.double)
    expidstrs = ["spec1", "spec2", "spec3", "spec4"]
    expcompanys = ["csv"] * 4
    expinstruments = ["sdal"] * 4
    #--do the comparisons
    statuses.append(np.allclose(ringrp.data, expdata, atol=atol))
    statuses.append(np.allclose(ringrp.wavelengths, expwaves, atol=atol))
    statuses.append(ringrp.idstrs == expidstrs)
    statuses.append(ringrp.companys == expcompanys)
    statuses.append(ringrp.instruments == expinstruments)
    if not all(statuses):
        print("Failed TEST {}".format(testcount))

    #TEST 5:
    testcount = testcount + 1
    #read input/csv file
    ringrp = SpectrumGroup(filename=os.path.join(idir, "input_test5.csv"))
    #compute stats
    statsgrp = ringrp.compute_stats()
    #ungroup into spectrums
    statspec = statsgrp.ungroup()
    #expected data
    mean = np.array([0.2634, 0.2734, 0.2834, 0.2934, 0.3034, 0.3134, 0.3234],
                    dtype=np.double)
    std = np.array([
        0.12909944, 0.12909944, 0.12909944, 0.12909944, 0.12909944, 0.12909944,
        0.12909944
    ],
                   dtype=np.double)
    median = np.array([0.2634, 0.2734, 0.2834, 0.2934, 0.3034, 0.3134, 0.3234],
                      dtype=np.double)
    expdata = np.vstack((mean, std, median))
    waves = [500.0, 501.0, 502.0, 503.0, 504.0, 505.0, 506.0]
    expwaves = np.array(waves, dtype=np.double)
    expidstrs = ["sdal_mean", "sdal_std", "sdal_median"]
    expcompany = "asd+csv+tmp"
    expinstrument = "sdal+sdal1+sdal2"
    #do the comparisons
    for (i, s) in enumerate(statspec):
        statuses.append(np.allclose(s.wavelengths, expwaves))
        statuses.append(np.allclose(s.reflectances, expdata[i, :]))
        statuses.append(s.idstr == expidstrs[i])
        statuses.append(s.company == expcompany)
        statuses.append(s.instrument == expinstrument)
    if not all(statuses):
        print("Failed TEST {}".format(testcount))

    #status message
    if all(statuses):
        print("{}: PASSED".format(__file__))
    else:
        print("{}: FAILED".format(__file__))
Пример #8
0
def process_directory(params):

    #parameters for processing
    input_dir = params["input_directory"]
    ws = params["subset_wavelength_range"][0]
    we = params["subset_wavelength_range"][1]

    #read the spectrums
    extension = {'asd': 1, 'ASD': 1}
    spec_files = [
        os.path.join(input_dir, f) for f in os.listdir(input_dir)
        if get_extension(f) in extension
    ]
    raw_specs = [SdalReader().read_spectrum(f) for f in spec_files]
    if len(raw_specs) == 0: return
    print("Data directory: {}".format(input_dir))
    print("  Read {} {} files".format(len(raw_specs), extension))

    #perform jump correction
    jumpcorrector = JumpCorrector(params["jumpcorrection_wavelengths"],
                                  params["jumpcorrection_stablezone"])
    jc_specs = [jumpcorrector.correct(s) for s in raw_specs]
    print("   Jump corrected {} spectrums(s)".format(len(jc_specs)))

    #separate into green vegetation and non-green vegetation spectra
    gvd = GreenVegDetector()
    raw_white_specs, raw_target_specs = [], []
    jc_white_specs, jc_target_specs = [], []
    ndvis, reflectance_ranges = [], []
    for (raw, jc) in zip(raw_specs, jc_specs):
        (status, ndvi, reflectance_range) = gvd.is_green_vegetation(jc)
        ndvis.append(ndvi)
        reflectance_ranges.append(reflectance_range)
        if status:
            raw_target_specs.append(raw)
            jc_target_specs.append(jc)
        else:
            raw_white_specs.append(raw)
            jc_white_specs.append(jc)
    print("   # white spectra: {}".format(len(jc_white_specs)))
    print("   # target spectra: {}".format(len(jc_target_specs)))

    #subset to desired wavelength range
    #do this for the jc_target_specs only
    proc_target_specs = [s.wavelength_subset(ws, we) for s in jc_target_specs]
    print("   Subsetted spectra to range: {}, {}".format(ws, we))

    #create SpectrumGroup object for raw_target_specs
    raw_target_sg = SpectrumGroup()
    raw_target_sg.group(raw_target_specs, "raw_target")
    raw_target_sg.save_dataframe(input_dir)
    raw_target_sg.save_plots(input_dir)

    #create SpectrumGroup object for raw_white_specs
    raw_white_sg = SpectrumGroup()
    raw_white_sg.group(raw_white_specs, "raw_white")
    raw_white_sg.save_dataframe(input_dir)
    raw_white_sg.save_plots(input_dir)

    #create SpectrumGroup object for proc_target_specs
    proc_target_sg = SpectrumGroup()
    proc_target_sg.group(proc_target_specs, "proc_target")
    proc_target_sg.save_dataframe(input_dir)
    proc_target_sg.save_plots(input_dir)