Example #1
0
def get_patch_spectra(fname, oname=None):
    """
    Get the patch spectra and write to a csv file.

    :param fname:
        The name of the data cube (e.g., data.raw).

    :param oname: (optional)
        The output name of the csv file.  The default is to replace .raw 
        from the data file with .csv.
    """

    # -- set the output file name
    oname = oname if oname else fname.replace(".raw", ".csv")

    # -- read the data cube
    cube = read_hyper(fname)

    # -- set the patch coordinates (the first is sky)
    rr = ((0, 160), (1276, 1428), (1009, 1081), (872, 896), (925, 948),
          (901, 921), (1021, 1038), (870, 887), (930, 947))
    cr = ((0, cube.ncol), (474, 521), (826, 846), (747, 760), (733, 736),
          (771, 775), (84, 101), (675, 692), (230, 247))

    # -- loop through patches and get spectra
    print("getting patch spectra...")
    specs = np.array([cube.data[:, rr[i][0]:rr[i][1], cr[i][0]:cr[i][1]] \
                          .mean(-1).mean(-1) for i in range(len(rr))])

    # -- save to csv
    print("writing to {0}...".format(oname))
    np.savetxt(oname, np.vstack([cube.waves, specs]).T, delimiter=",")

    return
Example #2
0
def get_patch_spectra(fname, rr, cr):
    """
    Get the patch spectra.

    :param fname:
        The name of the data cube (e.g., data.raw).

    :param rr:
        List of tuples containing the upper and lower row boundaries for 
        the regions.

    :param cr:
        List of tuples containing the upper and lower columen boundaries for 
        the regions.
    """

    # -- read the data cube
    cube = read_hyper(fname)

    # -- loop through patches and get spectra
    print("getting patch spectra...")
    nreg = len(rr)
    specs = np.array([cube.data[:, rr[i][0]:rr[i][1], cr[i][0]:cr[i][1]] \
                          .mean(-1).mean(-1) for i in range(nreg)])

    return specs
Example #3
0
def sptr_mean(path, fname, boolean_mask):
    '''
	sptr_mean takes an input of the hyperspectral image and the boolean array
	from hyper_pixcorr function to give an output image with mean spectral intensities
	across the sources in each spectral channel

	Input Parameters:
	------------
	path = str
		Path to the directory where hyperspectral images are located

	fname = str
		File name of raw hyperspectral image

	boolean_mask = np.array
		Output boolean mask of sources from hyper_pixcorr function

	Output:
	------------
	src_sptr_mean = np.array
		Mean Spectrum of sources across corresponsing source pixels
	'''

    # Reading the Raw hyperspectral image
    cube = utils.read_hyper(path, fname)

    # Storing the hyperspectral image as a memmap for future computations
    img = 1.0 * cube.data

    #Labeling the sources in Boolean Mask
    mask = boolean_mask
    labels, count = mm.label(mask)

    index = np.arange(count + 1)
    sptr_stack = []

    for i in range(0, img.shape[0]):
        channel = img[i, :-1, :-1]
        src_mean = mm.mean(channel, labels, index)
        sptr_stack.append(src_mean)  #redund
        #sptr_stack = np.array(sptr_stack)    #redund
        #sources = np.array([sptr_stack[:,i] for i in range(count)])   #redund

    return np.array(sptr_stack)
Example #4
0
def stack_meanspectra(dpath):
    '''
    stack_meanspectra takes an input of the hyperspectral image directory path and
    stacks all the hyperspectral scans with in the directory and gives an output 
    data cube which contains the mean spectrum of all scans

    Input Parameters:
    ------------
    dpath = str
        Path to the directory where hyperspectral images are located

    Output:
    ------------
    stack_mean = np.memmap
        Mean Spectrum of all scans
    
    '''

    # -- get the file list
    #dpath = os.path.join(os.environ["REBOUND_DATA"], "slow_hsi_scans")
    flist = sorted(glob.glob(os.path.join(dpath, "*.raw")))

    # -- create the memory maps
    cubes = [utils.read_hyper(f) for f in flist]

    # -- get the minimum number columns
    mincol = min([cube.ncol for cube in cubes])

    # -- initialize the stack
    print("initializing stack...")
    stack = np.zeros((cube.nwav, cube.nrow, mincol), dtype=np.uint16)

    # -- loop through cubes and sum
    for cube in cubes:
        print("adding {0}".format(cube.filename))
        stack[...] += cube.data[..., :mincol]

    return stack / len(cubes)
Example #5
0
def get_patch_spectra(fname, oname=None):
    """
    Get the patch spectra and write to a csv file.

    :param fname:
        The name of the data cube (e.g., data.raw).

    :param oname: (optional)
        The output name of the csv file.  The default is to replace .raw 
        from the data file with .csv.
    """

    # -- set the output file name
    oname = oname if oname else fname.replace(".raw", ".csv")

    # -- read the data cube
    cube = read_hyper(fname)

    # -- set the patch coordinates (the first is sky)
    #    rr = ((0, 160), (1232, 1428), (1001, 1081), (872, 896),
    #          (1042, 1118), (868, 1018), (866, 996))
    rr = ((0, 160), (1232, 1428), (1001, 1046), (872, 896), (1042, 1118),
          (868, 1018), (866, 996))
    cr = ((0, cube.ncol), (474, 539), (826, 850), (747, 764), (72, 121),
          (659, 688), (221, 251))

    # -- loop through patches and get spectra
    print("getting patch spectra...")
    specs = np.array([cube.data[:, rr[i][0]:rr[i][1], cr[i][0]:cr[i][1]] \
                          .mean(-1).mean(-1) for i in range(len(rr))])

    # -- save to csv
    print("writing to {0}...".format(oname))
    np.savetxt(oname, np.vstack([cube.waves, specs]).T, delimiter=",")

    return
Example #6
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

import os
import glob
from utils import read_hyper

# -- get the file list
dpath = os.path.join(os.environ["REBOUND_DATA"], "slow_hsi_scans")
flist = sorted(glob.glob(os.path.join(dpath, "*.raw")))

# -- create the memory maps
cubes = [read_hyper(f) for f in flist]

# -- get the minimum number columns
mincol = min([cube.ncol for cube in cubes])

# -- initialize the stack
print("initializing stack...")
stack = np.zeros((cube.nwav, cube.nrow, mincol), dtype=np.uint16)

# -- loop through cubes and sum
for cube in cubes:
    print("adding {0}".format(cube.filename))
    stack[...] += cube.data[..., :mincol]

# -- write to original .raw format
opath = os.path.join(os.environ["REBOUND_WRITE"], oname)
oname = "_".join(os.path.split(flist[0])[-1].split("_")[:-1]) + "_stack.raw"
stack.transpose(2, 0, 1)[..., ::-1] \
     .flatten().tofile(os.path.join(opath, oname))
Example #7
0
def process_scan(fname):
    """
    Process a VNIR scan for vegetation.
    """

    # -- get the scan number
    snum = fname.split("_")[1].replace(".raw", "")

    # -- set the output files
    vegfile = os.path.join(VEGDIR, "veg_specs_{0}.npy".format(snum))
    skyfile = os.path.join(SKYDIR, "sky_spec_{0}.npy".format(snum))
    satfile = os.path.join(SATDIR, "nsat_{0}.npy".format(snum))
    bldfile = os.path.join(BLDDIR, "bld_specs_{0}.npy".format(snum))
    altfile = os.path.join(ALTDIR, "alt_specs_{0}.npy".format(snum))
    newfile = os.path.join(NEWDIR, "new_specs_{0}.npy".format(snum))

    # -- check if they exist
    vegdone = os.path.isfile(vegfile)
    skydone = os.path.isfile(skyfile)
    satdone = os.path.isfile(satfile)
    blddone = os.path.isfile(bldfile)
    altdone = os.path.isfile(altfile)
    newdone = os.path.isfile(newfile)
    if vegdone and skydone and satdone and blddone and altdone and newdone:
        return

    # -- read data file and initialize time
    print("working on scan {0}...".format(snum))
    t0 = time.time()
    cube = hu.read_hyper(fname)

    # -- pull off vegetation pixels (remove last column if necessary)
    if not vegdone:
        print("pulling off vegetation pixels...")

        # -- write to file
        np.save(vegfile, cube.data[:,
                                   trind.reshape(sh)[:, :cube.data.shape[2]]])

    # -- calculate sky
    if not skydone:
        print("calculating sky...")

        # -- write to file
        np.save(skyfile, cube.data[:, :nsrow].mean(-1).mean(-1))

    # -- calculate number of saturated pixels
    if not satdone:
        print("calculating saturated pixels...")

        # -- write to file
        np.save(satfile, (cube.data == satval).sum(0))

    # -- get the building spectrum
    if not blddone:
        print("calculating building spectrum...")

        # -- write to file
        np.save(bldfile, cube.data[:, 990:1034, 799:956])
        np.save(bldfile.replace("specs_", "specs_avg_"),
                cube.data[:, 990:1034, 799:956].mean(-1).mean(-1))

    # -- get the alternate building spectrum
    if not altdone:
        print("calculating alternate building spectrum...")

        # -- write to file
        # region 1
        # 933 344
        # 933 364
        # 970 344
        # 970 364
        # region 2
        # 970 352
        # 970 364
        # 1000 352
        # 1000 364
        # region 3
        # 931 455
        # 931 477
        # 962 455
        # 962 477
        r1r = [933, 970]
        r1c = [344, 364]
        r2r = [970, 1000]
        r2c = [352, 364]
        r3r = [931, 962]
        r3c = [455, 477]
        npixr1 = (r1r[1] - r1r[0]) * (r1c[1] - r1c[0])
        npixr2 = (r2r[1] - r2r[0]) * (r2c[1] - r2c[0])
        npixr3 = (r3r[1] - r3r[0]) * (r3c[1] - r3c[0])
        aspecs = np.hstack([cube.data[:,r1r[0]:r1r[1],r1c[0]:r1c[1]] \
                                .reshape(cube.data.shape[0],npixr1),
                            cube.data[:,r2r[0]:r2r[1],r2c[0]:r2c[1]] \
                                .reshape(cube.data.shape[0],npixr2),
                            cube.data[:,r3r[0]:r3r[1],r3c[0]:r3c[1]] \
                                .reshape(cube.data.shape[0],npixr3)])
        np.save(altfile, aspecs.T)
        np.save(altfile.replace("specs_", "specs_avg_"), aspecs.mean(-1))

    # -- get the new building spectrum
    if not newdone:
        print("calculating new building spectrum...")

        # -- write to file
        r1r = [1125, 1137]
        r1c = [790, 829]
        npixr1 = (r1r[1] - r1r[0]) * (r1c[1] - r1c[0])
        nspecs = cube.data[:,r1r[0]:r1r[1],r1c[0]:r1c[1]] \
            .reshape(cube.data.shape[0],npixr1)
        np.save(newfile.replace("specs_", "specs_avg_"), nspecs.mean(-1))

    # -- alert user
    dt = time.time() - t0
    print("processed cube in {0}m:{1:02}s".format(int(dt // 60), int(dt % 60)))

    return
Example #8
0
    Get a normalized frame at some wavelength lambda.
    """
    fr = cube.data[np.abs(cube.waves - lam).argmin()]

    return fr / float(fr.max())


# -- utilities
dpath = os.path.join("..", "data")
rname = "veg_00945.raw"
fname = os.path.join(dpath, rname)
icol = 605
rr = (1175, 1350)

# -- read the cube
cube = read_hyper(fname)

# -- get the spectra
specs = cube.data[:, rr[0]:rr[1], icol].T.copy()

# -- get the mean spectrum
mspec = specs.mean(0)

# -- regress out that spectrum from each
cc = np.array([np.polyfit(mspec, 1.0 * i, 1) for i in specs])
mod = (cc[:, 1] + cc[:, 0] * mspec[:, np.newaxis]).T
res = specs - mod

# -- get the RGB image
rgb = make_rgb8(cube.data, cube.waves, scl=5.)
rgb2 = make_rgb8(cube.data, cube.waves, scl=2.5)
Example #9
0
def hyper_pixcorr(path, fname, thresh=0.3):
    '''
	hyper_pixcorr takes an input of the hyperspectral image and the threshold
	correlation values and gives an output boolean array of pixels that are 
	correlated with their neighbors.
    
	Input Parameters:
	------------

	path = str
	Path to the directory where hyperspectral images are located

	fname = str
	File name of raw hyperspectral image

	thresh = float
	Threshold correlation value for masking the correlated sources

	Output:
	------------
	final_mask = np.array
	Boolean array of pixel locations with correlations along both axes
	    
	Note: The dimension of this array is trimmed by 1 row and 1 column than
	the input image
	'''

    # Reading the Raw hyperspectral image
    cube = utils.read_hyper(path, fname)

    # Storing the hyperspectral image as a memmap for future computations
    img = 1.0 * cube.data

    # Normalizing the Image
    img -= img.mean(0, keepdims=True)
    img /= img.std(0, keepdims=True)

    # Computing the correlations between the left-right pixels
    corr_x = (img[:, :-1, :] * img[:, 1:, :]).mean(0)
    corr_x = corr_x[:, :-1]

    # Computing the correlations between the top-down pixels
    corr_y = (img[:, :, :-1] * img[:, :, 1:]).mean(0)
    corr_y = corr_y[:-1, :]

    # Splitting the top-botton part of the image
    corr_x_top = corr_x[:800, :]
    corr_x_bot = corr_y[800:1599, :]

    # Splitting the top-botton part of the image
    corr_y_top = corr_y[:800, :]
    corr_y_bot = corr_y[800:1599, :]

    # Creating a Mask for all the pixels/sources with correlation greater than threshold
    corr_mask_x = np.concatenate(((corr_x_top > 0.6), (corr_x_bot > thresh)),
                                 axis=0)

    corr_mask_y = np.concatenate(((corr_y_top > 0.6), (corr_y_bot > thresh)),
                                 axis=0)

    # Merging the correlation masks in left-right and top-down directions
    final_mask = corr_mask_x | corr_mask_y

    return final_mask